python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Owner(s): ["module: complex"]
import torch
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.common_dtype import complex_types
devices = (torch.device('cpu'), torch.device('cuda:0'))
class TestComplexTensor(TestCase):
@dtypes(*complex_types())
def test_to_list(self, device, dtype):
# test that the complex float tensor has expected values and
# there's no garbage value in the resultant list
self.assertEqual(torch.zeros((2, 2), device=device, dtype=dtype).tolist(), [[0j, 0j], [0j, 0j]])
@dtypes(torch.float32, torch.float64)
def test_dtype_inference(self, device, dtype):
# issue: https://github.com/pytorch/pytorch/issues/36834
default_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
x = torch.tensor([3., 3. + 5.j], device=device)
torch.set_default_dtype(default_dtype)
self.assertEqual(x.dtype, torch.cdouble if dtype == torch.float64 else torch.cfloat)
instantiate_device_type_tests(TestComplexTensor, globals())
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_complex.py
|
# Owner(s): ["module: unknown"]
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfSlowGradcheckEnv,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipCUDAIfRocm,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch.utils._python_dispatch import enable_torch_dispatch_mode
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
# TODO: fixme https://github.com/pytorch/pytorch/issues/68972
torch.set_default_dtype(torch.float32)
# variant testing is only done with torch.float and torch.cfloat to avoid
# excessive test times and maximize signal to noise ratio
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
# Get names of all the operators which have ref in their entry in OpInfo (testing infra)
# except for elementwise unary operators (separately implemented in test/test_unary_ufuncs.py),
# elementwise binary operators (separately implemented in test_binary_ufuncs.py),
# reduction operations (separately impelemented in test_reductions.py),
# and Spectral Functions (separately implemented for only 1D as of now, in test/test_spectral_ops.py)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
# Tests that apply to all operators and aren't related to any particular
# system
@skipIfSlowGradcheckEnv
class TestCommon(TestCase):
exact_dtype = True
# Verifies, on teardown, that no OpInfo is still using dynamic dtypes in CI
@classmethod
def tearDownClass(cls):
super().tearDownClass()
if IS_CI:
err_msg = (
"The operator(s) below is(are) using dynamic_dtypes in the OpInfo entries."
"This is OK for testing, but be sure to set the dtypes manually before landing your PR!"
)
# Assure no opinfo entry has dynamic_dtypes
filtered_ops = list(filter(opinfo.utils.is_dynamic_dtype_set, op_db))
for op in filtered_ops:
fmt_str = opinfo.utils.str_format_dynamic_dtype(op)
err_msg += "\n" + fmt_str
assert len(filtered_ops) == 0, err_msg
# Validates that each OpInfo works correctly on different CUDA devices
@onlyCUDA
@deviceCountAtLeast(2)
@ops(op_db, allowed_dtypes=(torch.float32, torch.long))
def test_multiple_devices(self, devices, dtype, op):
for cuda_device_str in devices:
cuda_device = torch.device(cuda_device_str)
# NOTE: only tests on first sample
samples = op.sample_inputs(cuda_device, dtype)
sample = first_sample(self, samples)
result = op(sample.input, *sample.args, **sample.kwargs)
if isinstance(result, torch.Tensor):
self.assertTrue(result.device == cuda_device)
elif is_iterable_of_tensors(result):
self.assertTrue(all(map(lambda t: t.device == cuda_device, result)))
else:
self.skipTest(
"Skipped! Only supports single tensor or iterable of tensor outputs."
)
# Tests that the function and its (ndarray-accepting) reference produce the same
# values on the tensors from sample_inputs func for the corresponding op.
# This test runs in double and complex double precision because
# NumPy does computation internally using double precision for many functions
# resulting in possible equality check failures.
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyNativeDeviceTypes
@suppress_warnings
@ops(_ref_test_ops, allowed_dtypes=(torch.float64, torch.long, torch.complex128))
def test_numpy_ref(self, device, dtype, op):
try:
# Sets the default dtype to NumPy's default dtype of double
cur_default = torch.get_default_dtype()
torch.set_default_dtype(torch.double)
for sample_input in op.reference_inputs(device, dtype):
self.compare_with_reference(
op, op.ref, sample_input, exact_dtype=(dtype is not torch.long)
)
finally:
torch.set_default_dtype(cur_default)
# Tests that experimental Python References can propagate shape, dtype,
# and device metadata properly.
# See https://github.com/pytorch/pytorch/issues/78050 for a discussion of stride propagation.
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyNativeDeviceTypes
@ops(python_ref_db)
def test_python_ref_meta(self, device, dtype, op):
mode = torch._prims.get_prim_fake_mode()
def _to_tensormeta(x):
if isinstance(x, torch.Tensor):
out = FakeTensor.from_tensor(x, mode)
return out
return x
# TODO: iterate over requires_grad true/false
for sample in op.reference_inputs(device, dtype, requires_grad=False):
result = op(sample.input, *sample.args, **sample.kwargs)
meta_sample = sample.transform(_to_tensormeta)
try:
with enable_torch_dispatch_mode(mode):
meta_result = op(meta_sample.input, *meta_sample.args, **meta_sample.kwargs)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
continue
if isinstance(result, torch.Tensor):
self.assertTrue(isinstance(meta_result, FakeTensor))
prims.utils.compare_tensor_meta(result, meta_result)
elif isinstance(result, Sequence):
for a, b in zip(result, meta_result):
if isinstance(a, torch.Tensor) or isinstance(b, torch.Tensor):
self.assertTrue(isinstance(b, FakeTensor))
prims.utils.compare_tensor_meta(a, b)
def _ref_test_helper(self, ctx, device, dtype, op, skip_zero_numel=False, skip_zero_dim=False):
# NOTE: this test works by comparing the reference
ex = None
for sample in op.reference_inputs(device, dtype, requires_grad=False):
if isinstance(sample.input, torch.Tensor) and sample.input.numel() == 0 and skip_zero_numel:
continue
if isinstance(sample.input, torch.Tensor) and sample.input.ndim == 0 and skip_zero_dim:
continue
with ctx():
ref_result = op(sample.input, *sample.args, **sample.kwargs)
torch_result = op.torch_opinfo(sample.input, *sample.args, **sample.kwargs)
for a, b in zip(tree_flatten(ref_result)[0], tree_flatten(torch_result)[0]):
if isinstance(a, torch.Tensor) or isinstance(b, torch.Tensor):
prims.utils.compare_tensor_meta(a, b)
if getattr(op, 'validate_view_consistency', True):
self.assertEqual(a._is_view(), b._is_view())
# Computes the dtype the more precise computatino would occur in
precise_dtype = torch.bool
if prims.utils.is_integer_dtype(dtype):
# Note: bool and integer dtypes do not have more
# precise dtypes -- they simply must be close
precise_dtype = dtype
if prims.utils.is_float_dtype(dtype):
precise_dtype = torch.double
if prims.utils.is_complex_dtype(dtype):
precise_dtype = torch.cdouble
# Checks if the results are close
try:
self.assertEqual(
ref_result,
torch_result,
exact_stride=False,
exact_device=True,
exact_layout=True,
exact_is_coalesced=True,
)
except AssertionError as e:
# Raises the error if the precise dtype comparison wouldn't be
# different
if dtype is precise_dtype:
raise e
ex = e
# Goes to next sample if these results are close
if not ex:
continue
# If the results are not close, checks that the
# reference is more accurate than the torch op
def _make_precise(x):
if isinstance(x, torch.dtype):
return precise_dtype
if isinstance(x, torch.Tensor) and x.dtype is dtype:
return x.to(precise_dtype)
return x
precise_sample = sample.transform(_make_precise)
precise_result = op.torch_opinfo(precise_sample.input, *precise_sample.args, **precise_sample.kwargs)
def _distance(a, b):
# Special-cases boolean comparisons
if prims.utils.is_boolean_dtype(a.dtype):
assert b.dtype is torch.bool
return (a ^ b).sum()
same = (a == b)
if prims.utils.is_float_dtype(a.dtype) or prims.utils.is_complex_dtype(a.dtype):
same = torch.logical_or(same, torch.logical_and(torch.isnan(a), torch.isnan(b)))
actual_error = torch.where(same, 0, torch.abs(a - b)).sum()
return actual_error
ref_distance = 0
for a, b in zip(tree_flatten(ref_result)[0], tree_flatten(precise_result)[0]):
ref_distance = ref_distance + _distance(a, b)
torch_distance = 0
for a, b in zip(tree_flatten(torch_result)[0], tree_flatten(precise_result)[0]):
torch_distance = torch_distance + _distance(a, b)
# TODO: consider adding some tolerance to this comparison
msg = f"Reference result was farther ({ref_distance}) from the precise " \
f"computation than the torch result was ({torch_distance})!"
self.assertTrue(ref_distance <= torch_distance, msg=msg)
# Reports numerical accuracy discrepancies
if ex is not None:
msg = "Test passed because the reference was more accurate than the torch operator."
warnings.warn(msg)
# Tests that experimental Python References perform the same computation
# as the operators they reference, when operator calls in the torch
# namesapce are remapped to the refs namespace (torch.foo becomes refs.foo).
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyNativeDeviceTypes
@ops(python_ref_db)
def test_python_ref(self, device, dtype, op):
# In this test, primTorch refs call into the refs namespace
# For example, a ref with torch.foo in it will calls refs.foo instead
# Direct calls to refs and prims are not affected
self._ref_test_helper(lambda: TorchRefsMode(strict=True), device, dtype, op)
# Tests that experimental Python References perform the same computation
# as the operators they reference, when operator calls in the torch
# namespace are preserved (torch.foo remains torch.foo).
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyNativeDeviceTypes
@ops(python_ref_db)
def test_python_ref_torch_fallback(self, device, dtype, op):
# In this test, refs call into the torch namespace (after the initial invocation)
# For example, a ref with torch.foo in it will call torch.foo instead of refs.foo
# Direct calls to refs and prims are not translated
self._ref_test_helper(contextlib.nullcontext, device, dtype, op)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyCUDA
@skipCUDAIfRocm
@ops(python_ref_db)
@parametrize('executor', ['aten', 'nvfuser'])
def test_python_ref_executor(self, device, dtype, op, executor):
# TODO: Not all dtypes are supported with nvfuser
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
if executor == "nvfuser" and dtype not in _torch_dtype_to_nvfuser_dtype_map:
raise unittest.SkipTest(f"nvfuser doesn't support dtype {dtype}")
# nvFuser tests are rather slow so we only run int32 and float32 types
if executor == "nvfuser" and dtype not in [torch.int32, torch.float32]:
raise unittest.SkipTest("skipped for speed")
if executor == "nvfuser" and not op.supports_nvfuser:
raise unittest.SkipTest(f"{op.name} doesn't support nvfuser")
# nvFuser doesn't support reduction operations on 0-dim tensors yet
skip_zero_dim = False
if executor == "nvfuser" and isinstance(op, ReductionPythonRefInfo):
skip_zero_dim = True
# skip zero-dim tensors for some composites of reduction operations
normalization_ops = ["_refs.softmax", "_refs.logsumexp", "_refs.log_softmax"]
if executor == "nvfuser" and op.name in normalization_ops:
skip_zero_dim = True
from torch._prims.executor import make_traced
from copy import copy
op = copy(op)
executor = "strictly_nvfuser" if executor == "nvfuser" else executor
op.op = partial(make_traced(op.op), executor=executor)
self._ref_test_helper(
contextlib.nullcontext,
device,
dtype,
op,
skip_zero_numel=("nvfuser" in executor), # nvfuser doesn't support zero-sized tensors
skip_zero_dim=skip_zero_dim,
)
@skipMeta
@onlyNativeDeviceTypes
@ops([op for op in op_db if op.error_inputs_func is not None], dtypes=OpDTypes.none)
def test_errors(self, device, op):
error_inputs = op.error_inputs(device)
for ei in error_inputs:
si = ei.sample_input
with self.assertRaisesRegex(ei.error_type, ei.error_regex):
op(si.input, *si.args, **si.kwargs)
@skipMeta
@onlyNativeDeviceTypes
@ops([op for op in python_ref_db if op.error_inputs_func is not None], dtypes=OpDTypes.none)
def test_python_ref_errors(self, device, op):
mode = torch._prims.get_prim_fake_mode()
def _to_tensormeta(x):
if isinstance(x, torch.Tensor):
return FakeTensor.from_tensor(x, mode)
return x
error_inputs = op.error_inputs(device)
for ei in error_inputs:
si = ei.sample_input
meta_sample = si.transform(_to_tensormeta)
# TODO: match strings
with self.assertRaisesRegex(ei.error_type, ""):
op(meta_sample.input, *meta_sample.args, **meta_sample.kwargs)
# Tests that the function produces the same result when called with
# noncontiguous tensors.
# TODO: get working with Windows by addressing failing operators
# TODO: get working with ASAN by addressing failing operators
@unittest.skipIf(IS_WINDOWS, "Skipped under Windows")
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyNativeDeviceTypes
@suppress_warnings
@ops(op_db, allowed_dtypes=(torch.float32, torch.long, torch.complex64))
def test_noncontiguous_samples(self, device, dtype, op):
test_grad = dtype in op.supported_backward_dtypes(torch.device(device).type)
sample_inputs = op.sample_inputs(device, dtype, requires_grad=test_grad)
for sample_input in sample_inputs:
t_inp, t_args, t_kwargs = (
sample_input.input,
sample_input.args,
sample_input.kwargs,
)
noncontig_sample = sample_input.noncontiguous()
n_inp, n_args, n_kwargs = (
noncontig_sample.input,
noncontig_sample.args,
noncontig_sample.kwargs,
)
# Verifies sample input tensors should have no grad or history
sample_tensor = t_inp if isinstance(t_inp, torch.Tensor) else t_inp[0]
assert sample_tensor.grad is None
assert sample_tensor.grad_fn is None
# validates forward
expected = op(t_inp, *t_args, **t_kwargs)
actual = op(n_inp, *n_args, **n_kwargs)
self.assertEqual(actual, expected)
# Validate backward
# Short-circuits if the op doesn't support grad in this device x dtype
if not test_grad:
continue
expected = sample_input.output_process_fn_grad(expected)
actual = sample_input.output_process_fn_grad(actual)
if isinstance(expected, torch.Tensor):
grad_for_expected = torch.randn_like(expected)
grad_for_actual = noncontiguous_like(grad_for_expected)
elif isinstance(expected, Sequence):
# Filter output elements that do not require grad
expected = [
t
for t in expected
if isinstance(t, torch.Tensor) and t.requires_grad
]
actual = [
n for n in actual if isinstance(n, torch.Tensor) and n.requires_grad
]
grad_for_expected = [torch.randn_like(t) for t in expected]
grad_for_actual = [noncontiguous_like(n) for n in grad_for_expected]
else:
# Nothing to do if it returns a scalar or things like that
continue
# Concatenate inputs into a tuple
t_inputs = (
(t_inp,) + t_args
if isinstance(t_inp, torch.Tensor)
else tuple(t_inp) + t_args
)
n_inputs = (
(n_inp,) + n_args
if isinstance(n_inp, torch.Tensor)
else tuple(n_inp) + n_args
)
# Filter the elemnts that are tensors that require grad
t_input_tensors = [
t for t in t_inputs if isinstance(t, torch.Tensor) and t.requires_grad
]
n_input_tensors = [
n for n in n_inputs if isinstance(n, torch.Tensor) and n.requires_grad
]
self.assertEqual(len(t_input_tensors), len(n_input_tensors))
# Some functions may not use all the inputs to generate gradients. One of the
# few examples of this "odd" behaviour is F.hinge_embedding_loss
t_grads = torch.autograd.grad(
expected, t_input_tensors, grad_for_expected, allow_unused=True
)
n_grads = torch.autograd.grad(
actual, n_input_tensors, grad_for_actual, allow_unused=True
)
msg = "Got different gradients for contiguous / non-contiguous inputs wrt input {}."
for i, (t, n) in enumerate(zip(t_grads, n_grads)):
self.assertEqual(t, n, msg=msg.format(i))
# Separates one case from the following test_out because many ops don't properly implement the
# incorrectly sized out parameter warning properly yet
# Cases test here:
# - out= with the correct dtype and device, but the wrong shape
@ops(_ops_and_refs, dtypes=OpDTypes.none)
def test_out_warning(self, device, op):
# Prefers running in float32 but has a fallback for the first listed supported dtype
supported_dtypes = op.supported_dtypes(self.device_type)
if len(supported_dtypes) == 0:
self.skipTest("Skipped! Op has not supported dtypes on this device.")
dtype = (
torch.float32
if torch.float32 in supported_dtypes
else list(supported_dtypes)[0]
)
samples = op.sample_inputs(device, dtype)
for sample in samples:
# calls it normally to get the expected result
expected = op(sample.input, *sample.args, **sample.kwargs)
op_out = partial(op, sample.input, *sample.args, **sample.kwargs)
# Short-circuits if output is not a single tensor or an
# iterable of tensors
if not isinstance(expected, torch.Tensor) and not is_iterable_of_tensors(
expected, include_empty=True
):
self.skipTest(
"Skipped! Only supports single tensor or iterable of tensor outputs."
)
# Validates the op doesn't support out if it claims not to
if not op.supports_out:
with self.assertRaises(Exception):
assert op_out(out=expected) != NotImplemented
return
# A wrapper around map that works with single tensors and always
# instantiates the map. Used below to apply transforms to
# single tensor and iterable tensor outputs.
def _apply_out_transform(fn, out):
if isinstance(out, torch.Tensor):
return fn(out)
# assumes (see above) that out is an iterable of tensors
return tuple(map(fn, out))
# Extracts strides from a tensor or iterable of tensors into a tuple
def _extract_strides(out):
if isinstance(out, torch.Tensor):
return (out.stride(),)
# assumes (see above) that out is an iterable of tensors
return tuple(map(lambda t: t.stride(), out))
# Extracts data pointers from a tensor or iterable of tensors into a tuple
# NOTE: only extracts on the CPU and CUDA device types since some
# device types don't have storage
def _extract_data_ptrs(out):
if self.device_type != "cpu" and self.device_type != "cuda":
return ()
if isinstance(out, torch.Tensor):
return (out.data_ptr(),)
# assumes (see above) that out is an iterable of tensors
return tuple(map(lambda t: t.data_ptr(), out))
@suppress_warnings
def _compare_out(transform, *, compare_strides_and_data_ptrs=True):
out = _apply_out_transform(transform, expected)
original_strides = _extract_strides(out)
original_ptrs = _extract_data_ptrs(out)
op_out(out=out)
final_strides = _extract_strides(out)
final_ptrs = _extract_data_ptrs(out)
self.assertEqual(expected, out)
if compare_strides_and_data_ptrs:
stride_msg = "Strides are not the same! Original strides were {0} and strides are now {1}".format(
original_strides, final_strides
)
self.assertEqual(original_strides, final_strides, msg=stride_msg)
self.assertEqual(original_ptrs, final_ptrs)
# Case Zero: out= with the correct dtype and device, but the wrong shape
# Expected behavior: if nonempty, resize with a warning.
def _case_zero_transform(t):
wrong_shape = list(t.shape)
if len(wrong_shape) == 0:
# Handles scalar tensor case (empty list)
wrong_shape = [2]
else:
wrong_shape[-1] = wrong_shape[-1] + 1
return make_tensor(wrong_shape, dtype=t.dtype, device=t.device)
# Verifies the out values are correct
_compare_out(_case_zero_transform, compare_strides_and_data_ptrs=False)
# Additionally validates that the appropriate warning is thrown if a nonempty
# tensor is resized.
def _any_nonempty(out):
if isinstance(out, torch.Tensor):
return out.numel() > 0
return any(x.numel() > 0 for x in out)
out = _apply_out_transform(_case_zero_transform, expected)
msg_fail = "Resized a non-empty tensor but did not warn about it."
if _any_nonempty(out):
with self.assertWarnsRegex(
UserWarning, "An output with one or more elements", msg=msg_fail
):
op_out(out=out)
# Validates ops implement the correct out= behavior
# See https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
# for a description of the correct behavior
# Validates the following cases:
# - Case 0: out has the correct shape, dtype, and device but is full of extremal values
# - Case 1: out has the correct shape, dtype, and device but is noncontiguous
# - Case 2: out has the correct dtype and device, but is zero elements
# - Case 3: out has the correct shape and dtype, but is on a different device type
# - Case 4: out has the correct shape and device, but a dtype that cannot
# "safely" cast to
#
# Case 3 and 4 are slightly different when the op is a factory function:
# - if device, dtype are NOT passed, any combination of dtype/device should be OK for out
# - if device, dtype are passed, device and dtype should match
@ops(_ops_and_refs, dtypes=OpDTypes.any_one)
def test_out(self, device, dtype, op):
# Prefers running in float32 but has a fallback for the first listed supported dtype
samples = op.sample_inputs(device, dtype)
for sample in samples:
# calls it normally to get the expected result
expected = op(sample.input, *sample.args, **sample.kwargs)
op_out = partial(op, sample.input, *sample.args, **sample.kwargs)
# Short-circuits if output is not a single tensor or an
# iterable of tensors
if not isinstance(expected, torch.Tensor) and not is_iterable_of_tensors(
expected, include_empty=True
):
self.skipTest(
"Skipped! Only supports single tensor or iterable of tensor outputs."
)
# Validates the op doesn't support out if it claims not to
if not op.supports_out:
with self.assertRaises(Exception):
assert op_out(out=expected) != NotImplemented
return
# A wrapper around map that works with single tensors and always
# instantiates the map. Used below to apply transforms to
# single tensor and iterable tensor outputs.
def _apply_out_transform(fn, out):
if isinstance(out, torch.Tensor):
return fn(out)
# assumes (see above) that out is an iterable of tensors
return tuple(map(fn, out))
# Extracts strides from a tensor or iterable of tensors into a tuple
def _extract_strides(out):
if isinstance(out, torch.Tensor):
return (out.stride(),)
# assumes (see above) that out is an iterable of tensors
return tuple(map(lambda t: t.stride(), out))
# Extracts data pointers from a tensor or iterable of tensors into a tuple
# NOTE: only extracts on the CPU and CUDA device types since some
# device types don't have storage
def _extract_data_ptrs(out):
if self.device_type != "cpu" and self.device_type != "cuda":
return ()
if isinstance(out, torch.Tensor):
return (out.data_ptr(),)
# assumes (see above) that out is an iterable of tensors
return tuple(map(lambda t: t.data_ptr(), out))
def _compare_out(transform, *, compare_strides_and_data_ptrs=True):
out = _apply_out_transform(transform, expected)
original_strides = _extract_strides(out)
original_ptrs = _extract_data_ptrs(out)
op_out(out=out)
final_strides = _extract_strides(out)
final_ptrs = _extract_data_ptrs(out)
self.assertEqual(expected, out)
if compare_strides_and_data_ptrs:
stride_msg = "Strides are not the same! Original strides were {0} and strides are now {1}".format(
original_strides, final_strides
)
self.assertEqual(original_strides, final_strides, msg=stride_msg)
self.assertEqual(original_ptrs, final_ptrs)
# Case 0: out= with the correct shape, dtype, and device
# but NaN values for floating point and complex tensors, and
# maximum values for integer tensors.
# Expected behavior: out= values have no effect on the computation.
def _case_zero_transform(t):
try:
info = torch.iinfo(t.dtype)
return torch.full_like(t, info.max)
except TypeError as te:
# for non-integer types fills with NaN
return torch.full_like(t, float("nan"))
_compare_out(_case_zero_transform)
# Case 1: out= with the correct shape, dtype, and device,
# but noncontiguous.
# Expected behavior: strides are respected and `out` storage is not changed.
def _case_one_transform(t):
return make_tensor(
t.shape, dtype=t.dtype, device=t.device, noncontiguous=True
)
_compare_out(_case_one_transform)
# Case 2: out= with the correct dtype and device, but has no elements.
# Expected behavior: resize without warning.
def _case_two_transform(t):
return make_tensor((0,), dtype=t.dtype, device=t.device)
_compare_out(_case_two_transform, compare_strides_and_data_ptrs=False)
# Also validates that no warning is thrown when this out is resized
out = _apply_out_transform(_case_two_transform, expected)
with warnings.catch_warnings(record=True) as caught:
warnings.simplefilter("always")
op_out(out=out)
# Verifies no warning is a resize warning
for w in caught:
if "An output with one or more elements" in str(w.message):
self.fail(
"Resizing an out= argument with no elements threw a resize warning!"
)
# Case 3: out= with correct shape and dtype, but wrong device.
wrong_device = None
if torch.device(device).type != "cpu":
wrong_device = "cpu"
elif torch.cuda.is_available():
wrong_device = "cuda"
factory_fn_msg = (
"\n\nNOTE: If your op is a factory function (i.e., it accepts TensorOptions) you should mark its "
"OpInfo with `is_factory_function=True`."
)
if wrong_device is not None:
def _case_three_transform(t):
return make_tensor(t.shape, dtype=t.dtype, device=wrong_device)
out = _apply_out_transform(_case_three_transform, expected)
if op.is_factory_function and sample.kwargs.get("device", None) is None:
op_out(out=out)
else:
msg_fail = (
f"Expected RuntimeError when calling with input.device={device} and out.device={wrong_device}."
) + factory_fn_msg
with self.assertRaises(RuntimeError, msg=msg_fail):
op_out(out=out)
# Case 4: out= with correct shape and device, but a dtype
# that output cannot be "safely" cast to (long).
# Expected behavior: error.
# NOTE: this case is filtered by dtype since some ops produce
# bool tensors, for example, which can be safely cast to any
# dtype. It is applied when single tensors are floating point or complex
# dtypes, or if an op returns multiple tensors when at least one such
# tensor is a floating point or complex dtype.
_dtypes = floating_and_complex_types_and(torch.float16, torch.bfloat16)
if (
isinstance(expected, torch.Tensor)
and expected.dtype in _dtypes
or (
not isinstance(expected, torch.Tensor)
and any(t.dtype in _dtypes for t in expected)
)
):
def _case_four_transform(t):
return make_tensor(t.shape, dtype=torch.long, device=t.device)
out = _apply_out_transform(_case_four_transform, expected)
msg_fail = "Expected RuntimeError when doing an unsafe cast!"
msg_fail = (
msg_fail
if not isinstance(expected, torch.Tensor)
else (
"Expected RuntimeError when doing an unsafe cast from a result of dtype "
f"{expected.dtype} into an out= with dtype torch.long"
)
) + factory_fn_msg
if op.is_factory_function and sample.kwargs.get("dtype", None) is None:
op_out(out=out)
else:
with self.assertRaises(RuntimeError, msg=msg_fail):
op_out(out=out)
# Tests that the forward and backward passes of operations produce the
# same values for the cross-product of op variants (method, inplace)
# against eager's gold standard op function variant
@_variant_ops(op_db)
def test_variant_consistency_eager(self, device, dtype, op):
# Acquires variants (method variant, inplace variant, operator variant, inplace_operator variant, aliases)
method = op.method_variant
inplace = op.inplace_variant
operator = op.operator_variant
inplace_operator = op.inplace_operator_variant
# list of all inplace ops: inplace variant + alias inplace variants if exist
inplace_ops = [inplace, inplace_operator]
variants = [method, inplace, operator, inplace_operator]
operators = [operator, inplace_operator]
for a_op in op.aliases:
variants.append(a_op.op)
variants.append(a_op.method_variant)
variants.append(a_op.inplace_variant)
inplace_ops.append(a_op.inplace_variant)
inplace_variants = tuple(filter(None, inplace_ops))
variants = tuple(filter(None, variants))
operators = tuple(filter(None, operators))
_requires_grad = dtype in op.supported_backward_dtypes(
torch.device(device).type
)
include_conjugated_inputs = op.test_conjugated_samples and dtype.is_complex
samples = op.sample_inputs(
device,
dtype,
requires_grad=_requires_grad,
include_conjugated_inputs=include_conjugated_inputs,
)
samples = list(samples)
def _test_consistency_helper(samples, variants):
for sample in samples:
# TODO: Check grad for all Tensors requiring grad if sample.input is TensorList
tensor = (
sample.input
if isinstance(sample.input, torch.Tensor)
else sample.input[0]
)
# Computes function forward and backward values
tensor.grad = None
expected_forward = op(sample.input, *sample.args, **sample.kwargs)
expected_grad = None
output_process_fn_grad = (
sample.output_process_fn_grad
if sample.output_process_fn_grad
else lambda x: x
)
# Skips inplace variants if the output dtype is not the same as
# the input dtype
skip_inplace = False
if (
isinstance(expected_forward, torch.Tensor)
and expected_forward.dtype is not tensor.dtype
):
skip_inplace = True
# TODO: backward consistency only supported for single tensor outputs
# TODO: backward consistency only checked on sample.input, not all
# tensor inputs
# TODO: update to handle checking grads of all tensor inputs as
# derived from each tensor output
if isinstance(
expected_forward, torch.Tensor
) and dtype in op.supported_backward_dtypes(torch.device(device).type):
output_process_fn_grad(expected_forward).sum().backward()
expected_grad = tensor.grad
# Test eager consistency
for variant in variants:
# Skips inplace ops
if variant in inplace_ops and skip_inplace:
continue
# Compares variant's forward
# Note: copies the to-be-modified input when testing the inplace variant
tensor.grad = None
cloned = (
clone_input_helper(sample.input)
if variant in inplace_ops
else sample.input
)
if variant in inplace_ops and sample.broadcasts_input:
with self.assertRaises(
RuntimeError,
msg=(
"inplace variant either incorrectly allowed "
"resizing or you have marked the sample {}"
" incorrectly with `broadcasts_self=True".format(
sample.summary()
)
),
):
variant_forward = variant(
cloned, *sample.args, **sample.kwargs
)
continue
if variant in operators and sample.kwargs:
# skip samples with kwargs for operator variants
continue
variant_forward = variant(cloned, *sample.args, **sample.kwargs)
self.assertEqual(expected_forward, variant_forward)
# Compares variant's backward
if expected_grad is not None and (
variant not in inplace_ops or op.supports_inplace_autograd
):
output_process_fn_grad(variant_forward).sum().backward()
self.assertEqual(expected_grad, tensor.grad)
_test_consistency_helper(samples, variants)
def _test_inplace_preserve_storage(samples, variants):
for sample in samples:
# Skips inplace variants if the output dtype is not the same as
# the input dtype
expected_forward = op(sample.input, *sample.args, **sample.kwargs)
tensor = (
sample.input
if isinstance(sample.input, torch.Tensor)
else sample.input[0]
)
skip_inplace = False
if (
isinstance(expected_forward, torch.Tensor)
and expected_forward.dtype is not tensor.dtype
):
skip_inplace = True
if skip_inplace:
return
for variant in variants:
cloned = (
clone_input_helper(sample.input)
if variant in inplace_ops
else sample.input
)
inp_tensor = (
cloned if isinstance(cloned, torch.Tensor) else cloned[0]
)
data_ptr = inp_tensor.data_ptr()
if variant in operators and sample.kwargs:
# skip samples with kwargs for operator variants
continue
variant_forward = variant(cloned, *sample.args, **sample.kwargs)
# TODO Support non-tensor outputs if they exist for inplace ops
if isinstance(variant_forward, torch.Tensor):
self.assertEqual(
data_ptr, variant_forward.data_ptr(), atol=0, rtol=0
)
else:
self.assertTrue(
False,
"Non-tensor outputs for inplace ops are not supported",
)
if len(inplace_ops) > 0:
inplace_samples = list(
filter(lambda sample: not sample.broadcasts_input, samples)
)
_test_inplace_preserve_storage(inplace_samples, inplace_variants)
# Reference testing for operations in complex32 against complex64.
# NOTE: We test against complex64 as NumPy doesn't have a complex32 equivalent dtype.
@ops(op_db, allowed_dtypes=(torch.complex32,))
def test_complex_half_reference_testing(self, device, dtype, op):
if not op.supports_dtype(torch.complex32, device):
unittest.skip("Does not support complex32")
for sample in op.sample_inputs(device, dtype):
actual = op(sample.input, *sample.args, **sample.kwargs)
# sample.transform applies the lambda to torch.Tensor and torch.dtype.
# However, we only want to apply it to Tensors with dtype `torch.complex32`..
transformed_sample = sample.transform(lambda x: x.to(torch.complex64) if isinstance(
x, torch.Tensor) and x.dtype is torch.complex32 else x)
expected = op(
transformed_sample.input,
*transformed_sample.args,
**transformed_sample.kwargs,
)
# Since range of chalf is much less compared to cfloat,
# we get `inf`s easily (eg. with `pow`, `exp`),
# so we cast `cfloat` back to `chalf`.
expected = tree_map(lambda x: x.to(torch.complex32) if isinstance(
x, torch.Tensor) and x.dtype is torch.complex64 else x, expected)
# `exact_dtype` is False because for ops like real, imag
# we get different dtypes for `actual` and `expected`
# `chalf` input -> `half` output
# `cfloat` input -> `float` output
self.assertEqual(actual, expected, exact_dtype=False)
@ops(op_db, allowed_dtypes=(torch.bool,))
@unittest.skipIf(TEST_WITH_UBSAN, "Test uses undefined behavior")
def test_non_standard_bool_values(self, device, dtype, op):
# Test boolean values other than 0x00 and 0x01 (gh-54789)
def convert_boolean_tensors(x):
if not isinstance(x, torch.Tensor) or x.dtype != torch.bool:
return x
# Map False -> 0 and True -> Random value in [2, 255]
true_vals = torch.randint(2, 255, x.shape, dtype=torch.uint8, device=x.device)
false_vals = torch.zeros((), dtype=torch.uint8, device=x.device)
x_int = torch.where(x, true_vals, false_vals)
ret = x_int.view(torch.bool)
self.assertEqual(ret, x)
return ret
for sample in op.sample_inputs(device, dtype):
expect = op(sample.input, *sample.args, **sample.kwargs)
transformed = sample.transform(convert_boolean_tensors)
actual = op(transformed.input, *transformed.args, **transformed.kwargs)
self.assertEqual(expect, actual)
# Validates that each OpInfo specifies its forward and backward dtypes
# correctly for CPU and CUDA devices
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipMeta
@onlyNativeDeviceTypes
@ops(ops_and_refs, dtypes=OpDTypes.none)
def test_dtypes(self, device, op):
# Check complex32 support only if the op claims.
# TODO: Once the complex32 support is better, we should add check for complex32 unconditionally.
device_type = torch.device(device).type
include_complex32 = (
(torch.complex32,)
if op.supports_dtype(torch.complex32, device_type)
else ()
)
# dtypes to try to backward in
allowed_backward_dtypes = floating_and_complex_types_and(
*((torch.half, torch.bfloat16) + include_complex32)
)
# lists for (un)supported dtypes
supported_dtypes = set()
unsupported_dtypes = set()
supported_backward_dtypes = set()
unsupported_backward_dtypes = set()
def unsupported(dtype):
unsupported_dtypes.add(dtype)
if dtype in allowed_backward_dtypes:
unsupported_backward_dtypes.add(dtype)
for dtype in all_types_and_complex_and(
*((torch.half, torch.bfloat16, torch.bool) + include_complex32)
):
# tries to acquire samples - failure indicates lack of support
requires_grad = dtype in allowed_backward_dtypes
try:
samples = tuple(
op.sample_inputs(device, dtype, requires_grad=requires_grad)
)
except Exception as e:
unsupported(dtype)
continue
for sample in samples:
# tries to call operator with the sample - failure indicates
# lack of support
try:
result = op(sample.input, *sample.args, **sample.kwargs)
supported_dtypes.add(dtype)
except Exception as e:
# NOTE: some ops will fail in forward if their inputs
# require grad but they don't support computing the gradient
# in that type! This is a bug in the op!
unsupported(dtype)
continue
# Checks for backward support in the same dtype, if the input has
# one or more tensors requiring grad
def _tensor_requires_grad(x):
if isinstance(x, dict):
for k, v in x.items():
if _tensor_requires_grad(v):
return True
if isinstance(x, (list, tuple)):
for a in x:
if _tensor_requires_grad(a):
return True
if isinstance(x, torch.Tensor) and x.requires_grad:
return True
return False
requires_grad = _tensor_requires_grad(sample.input) \
or _tensor_requires_grad(sample.args) or _tensor_requires_grad(sample.kwargs)
if not requires_grad:
continue
try:
result = sample.output_process_fn_grad(result)
if isinstance(result, torch.Tensor):
backward_tensor = result
elif isinstance(result, Sequence) and isinstance(
result[0], torch.Tensor
):
backward_tensor = result[0]
else:
continue
# Note: this grad may not have the same dtype as dtype
# For functions like complex (float -> complex) or abs
# (complex -> float) the grad tensor will have a
# different dtype than the input.
# For simplicity, this is still modeled as these ops
# supporting grad in the input dtype.
grad = torch.randn_like(backward_tensor)
backward_tensor.backward(grad)
supported_backward_dtypes.add(dtype)
except Exception as e:
unsupported_backward_dtypes.add(dtype)
# Checks that dtypes are listed correctly and generates an informative
# error message
supported_forward = supported_dtypes - unsupported_dtypes
partially_supported_forward = supported_dtypes & unsupported_dtypes
unsupported_forward = unsupported_dtypes - supported_dtypes
supported_backward = supported_backward_dtypes - unsupported_backward_dtypes
partially_supported_backward = (
supported_backward_dtypes & unsupported_backward_dtypes
)
unsupported_backward = unsupported_backward_dtypes - supported_backward_dtypes
device_type = torch.device(device).type
claimed_forward = set(op.supported_dtypes(device_type))
supported_but_unclaimed_forward = supported_forward - claimed_forward
claimed_but_unsupported_forward = claimed_forward & unsupported_forward
claimed_backward = set(op.supported_backward_dtypes(device_type))
supported_but_unclaimed_backward = supported_backward - claimed_backward
claimed_but_unsupported_backward = claimed_backward & unsupported_backward
# Partially supporting a dtype is not an error, but we print a warning
if (len(partially_supported_forward) + len(partially_supported_backward)) > 0:
msg = "Some dtypes for {0} on device type {1} are only partially supported!\n".format(
op.name, device_type
)
if len(partially_supported_forward) > 0:
msg = (
msg
+ "The following dtypes only worked on some samples during forward: {0}.\n".format(
partially_supported_forward
)
)
if len(partially_supported_backward) > 0:
msg = (
msg
+ "The following dtypes only worked on some samples during backward: {0}.\n".format(
partially_supported_backward
)
)
print(msg)
if (
len(supported_but_unclaimed_forward)
+ len(claimed_but_unsupported_forward)
+ len(supported_but_unclaimed_backward)
+ len(claimed_but_unsupported_backward)
) == 0:
return
# Reference operators often support additional dtypes, and that's OK
if op in python_ref_db:
if (
len(claimed_but_unsupported_forward)
+ len(claimed_but_unsupported_backward)
) == 0:
return
# Generates error msg
msg = "The supported dtypes for {0} on device type {1} are incorrect!\n".format(
op.name, device_type
)
if len(supported_but_unclaimed_forward) > 0:
msg = (
msg
+ "The following dtypes worked in forward but are not listed by the OpInfo: {0}.\n".format(
supported_but_unclaimed_forward
)
)
if len(supported_but_unclaimed_backward) > 0:
msg = (
msg
+ "The following dtypes worked in backward but are not listed by the OpInfo: {0}.\n".format(
supported_but_unclaimed_backward
)
)
if len(claimed_but_unsupported_forward) > 0:
msg = (
msg
+ "The following dtypes did not work in forward but are listed by the OpInfo: {0}.\n".format(
claimed_but_unsupported_forward
)
)
if len(claimed_but_unsupported_backward) > 0:
msg = (
msg
+ "The following dtypes did not work in backward but are listed by the OpInfo: {0}.\n".format(
claimed_but_unsupported_backward
)
)
self.fail(msg)
class TestCompositeCompliance(TestCase):
# Checks if the operator (if it is composite) is written to support most
# backends and Tensor subclasses. See "CompositeImplicitAutograd Compliance"
# in aten/src/ATen/native/README.md for more details
@unittest.skipIf(
IS_FBCODE or IS_SANDCASTLE, "__torch_dispatch__ does not work in fbcode"
)
@ops(op_db, allowed_dtypes=(torch.float,))
def test_operator(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
composite_compliance.check_with_mode(op, args, kwargs, self.assertEqual)
composite_compliance.check_all_permutations(op, args, kwargs, self.assertEqual)
@unittest.skipIf(
IS_FBCODE or IS_SANDCASTLE, "__torch_dispatch__ does not work in fbcode"
)
@ops([op for op in op_db if op.supports_autograd], allowed_dtypes=(torch.float,))
def test_backward(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
# We pass assertEqual so that decorators like `toleranceOverride`
# actually work (otherwise they silently do nothing!)
composite_compliance.check_backward_formula(
op.get_op(), args, kwargs,
sample.output_process_fn_grad,
op.gradcheck_wrapper, self.assertEqual)
@unittest.skipIf(
IS_FBCODE or IS_SANDCASTLE, "__torch_dispatch__ does not work in fbcode"
)
@ops(op_db, allowed_dtypes=(torch.float,))
def test_forward_ad(self, device, dtype, op):
if torch.float not in op.supported_backward_dtypes(device):
raise unittest.SkipTest("Does not support autograd")
if not op.supports_forward_ad:
raise unittest.SkipTest("Does not support forward_ad")
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
# We pass assertEqual so that decorators like `toleranceOverride`
# actually work (otherwise they silently do nothing!)
composite_compliance.check_forward_ad_formula(
op.get_op(), args, kwargs, op.gradcheck_wrapper, self.assertEqual)
@skipIfSlowGradcheckEnv
class TestMathBits(TestCase):
# Tests that
# 1. The operator's output for physically conjugated/negated tensors and conjugate/negative view tensors
# produces the same value
# 2. The gradients are same in both cases mentioned in (1)
# 3. If the operator's inplace variant is supported, tests that the inplace operation
# produces the correct value when called on a conjugate/negative view tensor and that the output
# has its conj/neg bit set to true
# This test only runs for C -> R and C -> C functions
# TODO: add tests for `R->C` functions
# Note: This test runs for functions that take both tensors and tensorlists as input.
def _test_math_view(
self,
device,
dtype,
op,
samples,
math_op_physical,
math_op_view,
is_bit_set,
out_type,
):
inplace_variant = op.inplace_variant
# helper function to clone and conjugate/negate the input if its a tensor
# else clone the sequence and conjugate/negate the first element in the sequence
# If a requires_grad argument is provided the tensor being conjugated/negated will
# have its requires_grad set to that value.
def clone_and_perform_view(input, **kwargs):
if isinstance(input, torch.Tensor):
requires_grad = kwargs.get("requires_grad", input.requires_grad)
with torch.no_grad():
# Ensure view represents the original sample input
input = math_op_physical(input)
# Note: .conj() is not called under no_grad mode since it's not allowed to modify a
# view created in no_grad mode. Here it's ok to do so, so as a workaround we call conj
# before resetting the requires_grad field for input
input = math_op_view(input)
assert input.is_leaf
return input.requires_grad_(requires_grad)
if isinstance(input, Sequence):
out = list(map(clone_input_helper, input))
out[0] = clone_and_perform_view(out[0])
return tuple(out)
for sample in samples:
tensor = (
sample.input
if isinstance(sample.input, torch.Tensor)
else sample.input[0]
)
cloned1 = clone_and_perform_view(sample.input)
# Computes function forward value with a physically conjugated/negated tensor and
# a conj/neg view tensor and verifies that the output in both case are equal.
expected_forward = op(sample.input, *sample.args, **sample.kwargs)
forward_with_mathview = op(cloned1, *sample.args, **sample.kwargs)
self.assertEqual(expected_forward, forward_with_mathview)
# If the op has an inplace variant, and the input doesn't require broadcasting
# and has the same dtype as output, verify that the inplace operation on a conjugated/negated
# input produces correct output, and the output tensor has the conj/neg bit set to True
if inplace_variant is not None and not sample.broadcasts_input:
cloned2 = clone_and_perform_view(tensor, requires_grad=False)
if (
isinstance(expected_forward, torch.Tensor)
and expected_forward.dtype is tensor.dtype
):
inplace_forward = inplace_variant(
cloned2, *sample.args, **sample.kwargs
)
self.assertTrue(is_bit_set(inplace_forward))
self.assertEqual(inplace_forward, expected_forward)
# TODO: backward consistency only supported for single tensor outputs
# TODO: backward consistency only checked on sample.input, not all
# tensor inputs
# TODO: update to handle checking grads of all tensor inputs as
# derived from each tensor output
if (
isinstance(expected_forward, torch.Tensor)
and expected_forward.requires_grad
):
output_process_fn_grad = sample.output_process_fn_grad or (lambda x: x)
expected_forward = output_process_fn_grad(expected_forward)
forward_with_mathview = output_process_fn_grad(forward_with_mathview)
tensor = (
sample.input
if isinstance(sample.input, torch.Tensor)
else sample.input[0]
)
expected_forward.sum().backward(retain_graph=True)
forward_with_mathview.sum().backward(retain_graph=True)
if tensor.grad is not None:
cloned1_tensor = (
cloned1 if isinstance(cloned1, torch.Tensor) else cloned1[0]
)
self.assertEqual(tensor.grad, cloned1_tensor.grad)
tensor.grad, cloned1_tensor.grad = None, None
# a repeat of the above test if output is not complex valued
if out_type(expected_forward):
grad = torch.randn_like(expected_forward)
expected_forward.backward(grad)
forward_with_mathview.backward(
math_op_view(math_op_physical(grad))
)
self.assertEqual(tensor.grad, cloned1_tensor.grad)
@ops(ops_and_refs, allowed_dtypes=(torch.cfloat,))
def test_conj_view(self, device, dtype, op):
if not op.test_conjugated_samples:
self.skipTest("Operation doesn't support conjugated inputs.")
math_op_physical = torch.conj_physical
math_op_view = torch.conj
_requires_grad = torch.cfloat in op.supported_backward_dtypes(
torch.device(device).type
)
is_bit_set = torch.is_conj
samples = op.sample_inputs(device, dtype, requires_grad=_requires_grad)
self._test_math_view(
device,
dtype,
op,
samples,
math_op_physical,
math_op_view,
is_bit_set,
torch.is_complex,
)
@ops(ops_and_refs, allowed_dtypes=(torch.double,))
def test_neg_view(self, device, dtype, op):
if not op.test_neg_view:
self.skipTest("Operation not tested with tensors with negative bit.")
math_op_physical = torch.neg
math_op_view = torch._neg_view
is_bit_set = torch.is_neg
samples = op.sample_inputs(device, dtype, requires_grad=op.supports_autograd)
self._test_math_view(
device,
dtype,
op,
samples,
math_op_physical,
math_op_view,
is_bit_set,
lambda x: True,
)
@ops(ops_and_refs, allowed_dtypes=(torch.cdouble,))
def test_neg_conj_view(self, device, dtype, op):
if not op.test_neg_view:
self.skipTest("Operation not tested with tensors with negative bit.")
if not op.test_conjugated_samples:
self.skipTest("Operation doesn't support conjugated inputs.")
def math_op_physical(x):
return -x.conj_physical()
def math_op_view(x):
return torch._neg_view(x).conj()
def is_bit_set(x):
return torch.is_neg(x) and torch.is_conj(x)
_requires_grad = dtype in op.supported_backward_dtypes(
torch.device(device).type
)
samples = op.sample_inputs(device, dtype, requires_grad=_requires_grad)
# Only test one sample
samples = itertools.islice(samples, 1)
self._test_math_view(
device,
dtype,
op,
samples,
math_op_physical,
math_op_view,
is_bit_set,
torch.is_complex,
)
# input strides and size may have been altered due to the result of an inplace op
def check_inplace_view(func, input, rs, input_size, input_strides):
if func is None:
return
# TODO: extend this test to test ops with multiple outputs and ops like native_batch_norm.out
# which mutate not necessarily the first input.
if isinstance(rs, torch.Tensor) and rs is input:
unequal_size = rs.size() != input_size
unequal_strides = rs.stride() != input_strides
# resize_ should probably have inplace_view tag. Not adding the tag since it
# breaks some codegen logic
if (unequal_size or unequal_strides):
if isinstance(func, torch._ops.OpOverloadPacket):
func = func.default
# Reference: https://github.com/pytorch/pytorch/issues/78759
if func is not torch.ops.aten.resize_.default:
# TODO: use self.assertIn when we have separate tests for each tag
assert torch.Tag.inplace_view in func.tags
# A mode that when enabled runs correctness checks to ensure
# that operators have expected tags based on their input and
# ouput tensor properties
@skipIfSlowGradcheckEnv
class TestTagsMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if isinstance(args[0], torch.Tensor):
old_size = args[0].size()
old_stride = args[0].stride()
rs = func(*args, **kwargs)
check_inplace_view(func, args[0], rs, old_size, old_stride)
else:
rs = func(*args, **kwargs)
return rs
# Test to verify the correctness for tags in `tags.yaml`, also available for access through `torch.Tags`
@skipIfSlowGradcheckEnv
class TestTags(TestCase):
@onlyCPU
@ops(ops_and_refs, dtypes=OpDTypes.any_one)
def test_tags(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
# TODO: Test tags for ops that return a list of tensors
input = sample.input
if isinstance(input, torch.Tensor):
old_size = input.size()
old_stride = input.stride()
with TestTagsMode():
rs = op(input, *sample.args, **sample.kwargs)
# TODO: add test for aliases: https://github.com/pytorch/pytorch/issues/78761
aten_name = op.aten_name if op.aten_name is not None else op.name
opoverloadpacket = getattr(torch.ops.aten, aten_name, None)
check_inplace_view(opoverloadpacket, input, rs, old_size, old_stride)
@skipIfSlowGradcheckEnv
class TestRefsOpsInfo(TestCase):
import_paths = ["_refs", "_refs.special", "_refs.nn.functional", "_refs.fft"]
module_alls = [(path, import_module(f"torch.{path}").__all__) for path in import_paths]
ref_ops_names = tuple(itertools.chain.from_iterable(
[f"{path}.{op}" for op in module_all] for path, module_all in module_alls))
ref_db_names = set(ref_op.name for ref_op in python_ref_db)
# TODO: References that do not have an entry in python_ref_db
skip_ref_ops = {
'_refs.bitwise_right_shift',
'_refs.copy_to',
'_refs.empty_strided',
'_refs.equal',
'_refs.full',
'_refs.full_like',
'_refs.item',
'_refs.ones',
'_refs.ones_like',
'_refs.std_var',
'_refs.swap_axes',
'_refs.uniform',
'_refs.scalar_tensor',
'_refs.trunc_divide',
'_refs.zeros',
'_refs.zeros_like'
}
not_in_decomp_table = {
# duplicated in _decomp and _refs
'_refs.nn.functional.elu',
'_refs.nn.functional.mse_loss',
'_refs.transpose',
'_refs.var',
'_refs.rsub',
# these are not aten ops?
'_refs.broadcast_shapes',
'_refs.broadcast_tensors',
'_refs.nn.functional.tanhshrink',
'_refs.swap_axes',
# CompositeImplicitAutograd
'_refs.allclose',
'_refs.atleast_1d',
'_refs.atleast_2d',
'_refs.atleast_3d',
'_refs.broadcast_to',
'_refs.chunk',
'_refs.column_stack',
'_refs.contiguous',
'_refs.dsplit',
'_refs.dstack',
'_refs.fill',
'_refs.flatten',
'_refs.fliplr',
'_refs.flipud',
'_refs.float_power',
'_refs.hsplit',
'_refs.hstack',
'_refs.isclose',
'_refs.isfinite',
'_refs.narrow',
'_refs.positive',
'_refs.ravel',
'_refs.reshape',
'_refs.square',
'_refs.tensor_split',
'_refs.true_divide',
'_refs.trunc_divide',
'_refs.vsplit',
'_refs.vstack',
'_refs.linalg.matrix_norm',
'_refs.linalg.norm',
'_refs.linalg.svd',
'_refs.linalg.svdvals',
'_refs.unflatten',
# CompositeExplicitAutograd,
'_refs.unbind',
# ref implementation missing kwargs
'_refs.empty', # missing "pin_memory"
'_refs.empty_like', # missing "layout"
'_refs.full', # missing "layout"
'_refs.full_like', # missing "layout"
'_refs.ones', # missing "layout"
'_refs.ones_like', # missing "layout"
'_refs.round', # missing "decimals"
'_refs.scalar_tensor', # missing "layout"
'_refs.zeros', # missing "layout"
'_refs.zeros_like', # missing "layout"
# other
'_refs.as_strided', # _prims._as_strided_meta: "reduce() of empty sequence with no initial value"
'_refs.copy_to', # torch._C._jit_get_operation: No such operator aten::copy_to
'_refs.clone', # test_meta.py: view size is not compatible with input tensor's size and stride
'_refs.equal', # 'bool' object has no attribute 'dtype'
'_refs.conj', # Calls _prims.conj
'_refs.real',
'_refs.imag',
}
@parametrize("op", ref_ops_names)
def test_refs_are_in_python_ref_db(self, op):
if op in self.skip_ref_ops:
raise unittest.SkipTest(f"{op} does not have an entry in python_ref_db")
self.assertIn(op, self.ref_db_names)
@parametrize("op", ref_ops_names)
def test_refs_are_in_decomp_table(self, op):
path = op.split('.')
module_path = '.'.join(path[:-1])
op_name = path[-1]
op_impl = getattr(import_module(f"torch.{module_path}"), op_name)
if op in self.not_in_decomp_table:
self.assertFalse(op_impl in torch._decomp.decomposition_table.values(),
f"Unexpectedly found {op} in torch._decomp.decomposition_table.values()")
else:
self.assertTrue(op_impl in torch._decomp.decomposition_table.values(),
f"Did not find {op} in torch._decomp.decomposition_table.values()")
fake_skips = (
"aminmax", # failing input
"cholesky", # Could not run 'aten::cholesky' with arguments from the 'Meta' backend
"cholesky_inverse", # Could not run 'aten::cholesky' with arguments from the 'Meta' backend
"cov", # aweights cannot be negtaive
"istft", # window overlap add min: 0
"linalg.eigvals", # The tensor has a non-zero number of elements, but its data is not allocated yet
"linalg.eigvalsh", # aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.matrix_power", # Could not run 'aten::eye.m_out' with arguments from the 'Meta' backend
# "linalg.pinv", # Could not run 'aten::pinv.out' with arguments from the 'Meta' backen
"linalg.matrix_rank.hermitian", # Could not run 'aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.pinv.hermitian", # tensor.mH is only supported on matrices or batches of matrices. Got 1-D tensor
"linalg.solve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta' backend
"linalg.tensorsolve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta'
"lu_solve", # MALLOC ERROR: debug
"multinomial", # Could not run 'aten::multinomial' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_1", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_3", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_5", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"nanmean", # logical_not() got an unexpected keyword argument 'out'
"quantile", # quantile() q values must be in the range [0, 1]
"nanquantile", # quantile() q values must be in the range [0, 1]
"nn.functional.ctc_loss", # The tensor has a non-zero number of elements, but its data is not allocated yet
"nn.functional.embedding_bag", # sometimes errors
"nn.functional.nll_loss", # sometimes errors
"nn.functional.max_pool1d", # The tensor has a non-zero number of elements
"to_sparse", # Could not run 'aten::to_sparse' with arguments from the 'Meta' backend
"tensor_split", # The tensor has a non-zero number of elements, but its data is not allocated yet
"repeat_interleave", # cannot repeat_interleave a meta tensor without output_size
"segment_reduce.lengths", # Could not run 'aten::segment_reduce' with arguments from the 'Meta' backend.
"sparse.sampled.addmm", # sparsity not supported
# Can not infer total number of classes from meta. no way at present to throw DynamicOutputShapeException
"nn.functional.one_hot",
)
fake_autocast_device_skips = defaultdict(dict)
# TODO: investigate/fix
fake_autocast_device_skips["cpu"] = set(
("linalg.pinv",)
)
dynamic_output_op_tests = (
"argwhere",
"bincount",
"combinations",
"linalg.lstsq",
"masked_select",
"nonzero",
"unique_consecutive",
"unique",
"linalg.lstsq.grad_oriented",
)
# some inputs invoke dynamic output shape operators, some do not
sometimes_dynamic_output_op_test = (
"__getitem__",
"index_select",
)
aliasing_failures = (
"histogramdd",
"nn.functional.pixel_shuffle",
"nn.functional.pixel_unshuffle",
)
fake_striding_skips = (
"fft.fft2",
"fft.fft",
"fft.fftn",
"fft.hfft2",
"fft.hfft",
"fft.hfftn",
"fft.ifft2",
"fft.ifft",
"fft.ifftn",
"fft.ihfft2",
"fft.ihfft",
"fft.ihfftn",
"fft.irfft2",
"fft.irfft",
"fft.irfftn",
"fft.rfft2",
"fft.rfft",
"fft.rfftn",
"svd",
"linalg.svd",
"nn.functional.conv_transpose2d",
)
@skipIfSlowGradcheckEnv
class TestFakeTensorNonErroring(TestCase):
def _test_fake_helper(self, device, dtype, op, context):
name = op.name
if op.variant_test_name:
name += "." + op.variant_test_name
if name in fake_skips or "sparse" in name or "jiterator" in name:
self.skipTest("Skip failing test")
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
try:
mode = FakeTensorMode(inner=None)
def map_to_fake(e):
if isinstance(e, torch.Tensor):
return mode.from_tensor(e)
else:
return e
input = tree_map(map_to_fake, sample.input)
args = tree_map(map_to_fake, sample.args)
kwargs = tree_map(map_to_fake, sample.kwargs)
try:
with context():
res = op(sample.input, *sample.args, **sample.kwargs)
except Exception as e:
continue
with context():
with enable_torch_dispatch_mode(mode):
res_fake = op(input, *args, **kwargs)
def outputs_alias_inputs(outputs, inputs):
input_storages = set()
for out in tree_flatten(outputs)[0]:
if isinstance(out, torch.Tensor):
input_storages.add(out.storage()._cdata)
for inp in tree_flatten(inputs)[0]:
if isinstance(inp, torch.Tensor) and inp.storage()._cdata in input_storages:
return True
return False
for fake_out, real_out in zip(
tree_flatten(res_fake)[0], tree_flatten(res)[0]
):
if not isinstance(fake_out, torch.Tensor):
self.assertTrue(not isinstance(real_out, torch.Tensor))
continue
self.assertTrue(isinstance(fake_out, FakeTensor))
# if you see a shape exception here, you may need to add
# a `dynamic_output_shape` tag to an operator
check_strides = name not in fake_striding_skips
# if there is a striding failure here as a result of adding a primtorch ref,
# feel free to add the op to `fake_striding_skips` but please tag
# @eellison on the pr.
# see: https://github.com/pytorch/pytorch/issues/78050
prims.utils.compare_tensor_meta(fake_out, real_out, check_strides)
if name not in aliasing_failures:
fake_aliasing = outputs_alias_inputs((input, args, kwargs), res_fake)
real_aliasing = outputs_alias_inputs((sample.input, sample, args, sample.kwargs), res)
self.assertEqual(fake_aliasing, real_aliasing)
self.assertTrue(name not in dynamic_output_op_tests)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
pass
except torch._subclasses.fake_tensor.DynamicOutputShapeException:
self.assertTrue(name in dynamic_output_op_tests or name in sometimes_dynamic_output_op_test)
@ops(op_db, dtypes=OpDTypes.any_one)
def test_fake(self, device, dtype, op):
self._test_fake_helper(device, dtype, op, contextlib.nullcontext)
@ops(op_db, dtypes=OpDTypes.any_one)
def test_fake_autocast(self, device, dtype, op):
if op.name in fake_autocast_device_skips[device]:
self.skipTest("Skip failing test")
context = torch.cuda.amp.autocast if device == "cuda" else torch.cpu.amp.autocast
self._test_fake_helper(device, dtype, op, context)
instantiate_device_type_tests(TestCommon, globals())
instantiate_device_type_tests(TestCompositeCompliance, globals())
instantiate_device_type_tests(TestMathBits, globals())
instantiate_device_type_tests(TestRefsOpsInfo, globals(), only_for="cpu")
instantiate_device_type_tests(TestFakeTensorNonErroring, globals())
instantiate_device_type_tests(TestTags, globals())
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/test_ops.py
|
# Owner(s): ["module: fx"]
import math
import numbers
import operator
import pickle
import sys
import tempfile
import unittest
from typing import Callable, Dict, Union, List, Optional
from types import BuiltinFunctionType
import torch
import torch.fx.experimental.optimization as optimization
from torch.fx._symbolic_trace import symbolic_trace
from torch.fx.experimental import merge_matmul
from torch.fx.experimental.accelerator_partitioner import Partitioner
from torch.fx.experimental.normalize import NormalizeOperators, NormalizeArgs
from torch.fx.passes import graph_manipulation
from torch.fx.passes.param_fetch import lift_lowering_attrs_to_nodes
from torch.fx.experimental.partitioner_utils import (
NodeLatency,
get_partition_to_latency_mapping,
get_latency_of_partitioned_graph,
Device,
PartitionerConfig,
PartitionMode,
)
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.experimental.schema_type_annotation import AnnotateTypesWithSchema
import torch.fx.experimental.meta_tracer
from torch.fx.graph_module import GraphModule
from torch.fx.node import Node
from torch.fx.operator_schemas import (
_torchscript_type_to_python_type,
normalize_function,
normalize_module,
type_matches,
create_type_hint,
)
from torch.fx.passes.shape_prop import ShapeProp
from torch.fx.passes.split_module import split_module
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_nn import module_tests, new_module_tests
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
try:
import torchvision.models
from torchvision.models import resnet18
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
skipIfNoMkldnn = unittest.skipIf(
not (torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available()),
"no MKLDNN",
)
def symbolic_trace_with_rewrite(root: Union[torch.nn.Module, Callable]) -> GraphModule:
return GraphModule(
root if isinstance(root, torch.nn.Module) else torch.nn.Module(),
RewritingTracer().trace(root),
)
class TestFXExperimental(JitTestCase):
def test_find_single_partition(self):
class TestModule(torch.nn.Module):
def forward(self, a, b):
return a + b
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(1)
b = torch.rand(1)
graph_manipulation.get_size_of_all_nodes(traced, [a, b])
partitioner = Partitioner()
devices = [
Device("dev_0", 125, 0),
Device("dev_1", 150, 1),
Device("dev_2", 125, 2),
]
partitioner_config = PartitionerConfig(devices)
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
dag = ret.dag
self.assertEqual(traced(a, b), module_with_submodules(a, b))
assert dag.nodes[0].logical_device_ids == [1]
def test_lack_of_devices(self):
class TestModule(torch.nn.Module):
def forward(self, a, b):
return a + b
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
b = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a, b])
partitioner = Partitioner()
devices = [Device("dev_0", 4, 0), Device("dev_1", 4, 1)]
partitioner_config = PartitionerConfig(devices, PartitionMode.size_based)
catch_runtime_error = False
try:
ret = partitioner.partition_graph(traced, m, partitioner_config)
except RuntimeError:
catch_runtime_error = True
assert catch_runtime_error
def test_large_node_error(self):
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, a):
linear = self.linear(a)
add = linear + a
return add
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a])
partitioner = Partitioner()
devices = [
Device("dev_0", 40, 0),
Device("dev_1", 40, 0),
Device("dev_2", 40, 0),
Device("dev_3", 40, 0),
Device("dev_4", 40, 0),
]
partitioner_config = PartitionerConfig(devices, PartitionMode.size_based)
catch_runtime_error = False
try:
ret = partitioner.partition_graph(traced, m, partitioner_config)
except RuntimeError:
catch_runtime_error = True
assert catch_runtime_error
def test_partition_node_manipulation(self):
class TestModule(torch.nn.Module):
def forward(self, a, b):
add_1 = a + b
add_2 = add_1 + torch.rand(4)
add_3 = add_2 + torch.rand(4)
return add_3
m = TestModule()
traced = symbolic_trace(m)
a, b = torch.rand(4), torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a, b])
partitioner = Partitioner()
devices = [Device("dev_0", 1000, 0)]
partitioner_config = PartitionerConfig(devices)
ret = partitioner.partition_graph(traced, m, partitioner_config)
partition = partitioner.partitions[0]
assert partition.used_mem_bytes == 112
# Select add_2 node to remove
selected_node = None
for node in partition.nodes:
if node.name == "add_2":
selected_node = node
partition.remove_node(selected_node)
assert partition.used_mem_bytes == 80
def test_size_based_partition(self):
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(4, 4)
self.c = torch.rand(4)
def forward(self, a, b):
add_1 = a + b
linear = self.linear(add_1)
add_2 = linear + self.c
return add_2
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
b = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a, b])
partitioner = Partitioner()
devices = [
Device("dev_0", 125, 0),
Device("dev_1", 125, 1),
Device("dev_2", 125, 2),
]
partitioner_config = PartitionerConfig(devices, PartitionMode.size_based)
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
dag = ret.dag
self.assertEqual(traced(a, b), module_with_submodules(a, b))
for i, node in enumerate(dag.nodes):
assert node.logical_device_ids == [i]
def test_partition_device_mapping(self):
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, a):
b = torch.rand(4)
add_1 = a + b
linear_1 = self.linear(add_1)
add_2 = torch.rand(4) + a
add_3 = add_2 + linear_1
return add_3
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a])
partitioner = Partitioner()
devices = [Device("dev_0", 120, 0), Device("dev_1", 160, 1)]
partitioner_config = PartitionerConfig(devices, PartitionMode.size_based)
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
dag = ret.dag
self.assertEqual(traced(a), module_with_submodules(a))
for i, node in enumerate(dag.nodes):
if i == 1:
assert node.logical_device_ids == [1]
else:
assert node.logical_device_ids == [0]
def test_sparse_nn_partition(self):
class MyRecommendationModule(torch.nn.Module):
def create_mlp(self, num_of_layers: int, input_size: int, output_size: int):
layers = torch.nn.ModuleList()
for _ in range(num_of_layers):
ll = torch.nn.Linear(input_size, output_size)
layers.append(ll)
layers.append(torch.nn.ReLU())
return layers
def __init__(self):
super(MyRecommendationModule, self).__init__()
layers = self.create_mlp(4, 4, 4)
self.bottom_layers = torch.nn.Sequential(*layers)
layers = self.create_mlp(3, 24, 24)
self.top_layers = torch.nn.Sequential(*layers)
self.embedding_layers = torch.nn.ModuleList()
el = torch.nn.EmbeddingBag(500000, 4, mode="sum", sparse=True)
self.embedding_layers.append(el)
for i in range(3):
el = torch.nn.EmbeddingBag(1000000, 4, mode="sum", sparse=True)
self.embedding_layers.append(el)
el = torch.nn.EmbeddingBag(500000, 4, mode="sum", sparse=True)
self.embedding_layers.append(el)
def forward(self, a, b, offset):
x = self.bottom_layers(a)
y = []
c = []
for i in range(len(self.embedding_layers)):
temp = torch.randint(10, (8,))
c.append(temp + b)
for i in range(len(self.embedding_layers)):
if i % 2 == 0:
y.append(self.embedding_layers[i](c[i], offset))
else:
y.append(
self.embedding_layers[i](torch.randint(10, (8,)), offset)
)
z = torch.cat([x] + y, dim=1)
p = self.top_layers(z)
return p
m = MyRecommendationModule()
a = torch.rand(2, 4)
b = torch.randint(10, (8,))
offset = torch.randint(1, (2,))
traced = symbolic_trace(m)
graph_manipulation.get_size_of_all_nodes(traced, [a, b, offset])
devices = [
Device("dev_0", 33000000, 0),
Device("dev_1", 33000000, 1),
Device("dev_2", 33000000, 2),
]
partitioner_config = PartitionerConfig(devices, PartitionMode.sparse_nn)
partitioner = Partitioner()
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
dag = ret.dag
self.assertEqual(traced(a, b, offset), module_with_submodules(a, b, offset))
assert len(module_with_submodules.graph.nodes) == 24
def test_partition_latency(self):
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, a):
add_1 = a + torch.rand(4)
add_2 = add_1 + torch.rand(4)
linear_1 = self.linear(add_1)
add_3 = add_2 + linear_1
add_4 = add_2 + add_3
return add_4
def get_node_to_latency_mapping(fx_module: GraphModule):
"""Given a fx module, generate node latency for each node
based on the size of each node
"""
node_to_latency_mapping: Dict[Node, NodeLatency] = {}
for node in fx_module.graph.nodes:
if node.op not in {"output", "placeholder", "get_attr"}:
if node.size_bytes.total_size == node.size_bytes.output_size:
node_to_latency_mapping[node] = NodeLatency(
node.size_bytes.total_size, 2.0 * node.size_bytes.total_size
)
else:
node_to_latency_mapping[node] = NodeLatency(
node.size_bytes.total_size, node.size_bytes.output_size
)
return node_to_latency_mapping
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a])
node_to_latency_mapping = get_node_to_latency_mapping(traced)
devices = [Device("dev_0", 200, 0), Device("dev_1", 200, 1)]
partitioner = Partitioner()
partitioner_config = PartitionerConfig(devices)
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
self.assertEqual(traced(a), module_with_submodules(a))
partitions = partitioner.partitions
partition_to_latency_mapping = get_partition_to_latency_mapping(
partitions, node_to_latency_mapping
)
for p in partition_to_latency_mapping:
if p.partition_id == 0:
assert partition_to_latency_mapping[p] == (128.0, 80.0, 160.0)
else:
assert partition_to_latency_mapping[p] == (16.0, 32.0, 32.0)
transfer_rate_bytes_per_sec = 2
critical_path_latency_sec = get_latency_of_partitioned_graph(
partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec
)
assert critical_path_latency_sec == 208.0
def test_cost_aware_partition(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, a):
add_1 = a + torch.rand(4)
add_2 = add_1 + torch.rand(4)
linear_1 = self.linear(add_1)
add_3 = add_2 + torch.rand(4)
add_4 = add_2 + linear_1
add_5 = add_3 + add_4
return add_5
def get_node_to_latency_mapping(fx_module: GraphModule):
node_to_latency_mapping: Dict[Node, NodeLatency] = {}
for node in fx_module.graph.nodes:
if node.op not in {"output", "placeholder", "get_attr"}:
if node.size_bytes.total_size == node.size_bytes.output_size:
node_to_latency_mapping[node] = NodeLatency(
node.size_bytes.total_size, 1
)
else:
node_to_latency_mapping[node] = NodeLatency(
node.size_bytes.total_size, node.size_bytes.output_size
)
return node_to_latency_mapping
m = MyModule()
traced = symbolic_trace(m)
a = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a])
devices = [
Device("dev_0", 125, 0),
Device("dev_1", 125, 1),
Device("dev_2", 125, 2),
Device("dev_3", 125, 3),
]
node_to_latency_mapping = get_node_to_latency_mapping(traced)
partitioner_config = PartitionerConfig(
devices,
mode=PartitionMode.cost_aware,
transfer_rate_bytes_per_sec=2,
node_to_latency_mapping=node_to_latency_mapping,
)
partitioner = Partitioner()
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
dag = ret.dag
self.assertEqual(traced(a), module_with_submodules(a))
partitions = partitioner.partitions
partition_to_latency_mapping = get_partition_to_latency_mapping(
partitions, node_to_latency_mapping
)
critical_path_latency_sec = get_latency_of_partitioned_graph(
partitions,
partition_to_latency_mapping,
partitioner_config.transfer_rate_bytes_per_sec,
)
assert critical_path_latency_sec == 160.0
def test_aot_based_partition(self):
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.b = torch.rand(4)
self.c = torch.rand(4)
def forward(self, a):
add_1 = a + self.b
add_2 = self.c + add_1
return add_2
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
node_to_partition_id = {}
partition_to_logical_devices = {}
count = 0
graph_manipulation.get_size_of_all_nodes(traced, [a])
for node in traced.graph.nodes:
if node.op not in {"placeholder", "get_attr", "output"}:
node_to_partition_id[node] = count
partition_to_logical_devices[count] = [0]
count += 1
devices = [Device("dev_0", 200, 0)]
partitioner_config = PartitionerConfig(
devices=devices,
mode=PartitionMode.aot_based,
node_to_partition_mapping=node_to_partition_id,
partition_to_logical_device_mapping=partition_to_logical_devices,
)
partitioner = Partitioner()
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
dag = ret.dag
self.assertEqual(module_with_submodules(a), traced(a))
for node in dag.nodes:
assert node.size_bytes == 48
assert node.logical_device_ids == [0]
def test_replace_target_nodes_with(self):
class testModule(torch.nn.Module):
def forward(self, a, b):
return a + b
m = testModule()
traced = symbolic_trace(m)
input1 = torch.randn(1)
input2 = torch.randn(1)
assert (input1 + input2) == traced(input1, input2)
graph_manipulation.replace_target_nodes_with(
fx_module=traced,
old_op="call_function",
old_target=operator.add,
new_op="call_function",
new_target=operator.mul,
)
assert (input1 * input2) == traced(input1, input2)
def test_saturate_host(self):
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, a):
add_1 = a + torch.rand(4)
add_2 = add_1 + torch.rand(4)
linear_1 = self.linear(add_1)
add_3 = add_2 + linear_1
add_4 = add_2 + add_3
return add_4
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a])
devices = [
Device("dev_0", 200, 0),
Device("dev_1", 200, 1),
Device("dev_2", 100, 2),
Device("dev_3", 100, 3),
Device("dev_4", 200, 4),
Device("dev_5", 100, 5),
]
partitioner = Partitioner()
# Without host saturation, the model will be split into two partitions.
# dev_0 holds partition 0 of 192 bytes and dev_1 holds partition 1 of 48 bytes.
partitioner_config = PartitionerConfig(devices, saturate_host=True)
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
self.assertEqual(traced(a), module_with_submodules(a))
partitions = partitioner.partitions
self.assertEqual(len(partitions), 2)
# With host saturation, partition 1 will be replicated to dev_4, and partition 2
# will be replicated to dev_2.
self.assertEqual(partitions[0].logical_device_ids, [0, 4])
self.assertEqual(partitions[1].logical_device_ids, [1, 2])
@skipIfNoTorchVision
def test_conv_bn_fusion(self):
rn18 = resnet18().eval()
traced = symbolic_trace(rn18)
fused = optimization.fuse(traced)
self.assertTrue(
all(not isinstance(m, torch.nn.BatchNorm2d) for m in fused.modules())
)
N, C, H, W = 20, 3, 224, 224
inp = torch.randn(N, C, H, W)
self.assertEqual(fused(inp), rn18(inp))
def test_conv_bn_fusion_not_running_state(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(32, 64, 3, stride=2)
self.bn = torch.nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
model = M().eval()
traced = symbolic_trace(model)
fused = optimization.fuse(traced)
inp = torch.randn([1, 32, 50, 50])
# bn need not be folded in conv
self.assertTrue(
any(isinstance(m, torch.nn.BatchNorm2d) for m in fused.modules())
)
self.assertEqual(fused(inp), model(inp))
def test_call_to_assert_no_msg(self):
class M(torch.nn.Module):
def forward(self, a, b):
assert a == b
return a + b
m = M()
traced = symbolic_trace_with_rewrite(m)
# Make sure the graph is well-formed
traced.graph.lint()
# Check the IR to make sure there's a call_function node with target == "Assert"
self.assertTrue(
any(
node.op == "call_function" and node.target == torch._assert
for node in traced.graph.nodes
)
)
# Ensure that the assert throws when it's supposed to and doesn't throw when it's not supposed to
traced(3, 3)
with self.assertRaisesRegex(AssertionError, ""):
traced(3, 5)
# Confirm that the output is correct
self.assertEqual(traced(3, 3), m(3, 3))
def test_meta_tracer(self):
class MetaTracerTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(num_embeddings=42, embedding_dim=16)
self.layernorm = torch.nn.LayerNorm(16)
def forward(self, x):
emb = self.emb(x)
emb = emb + torch.arange(emb.shape[-1], dtype=torch.float, device=emb.device)
lol = self.layernorm(emb)
return torch.relu(lol) if lol.shape[0] < 30 else torch.sigmoid(lol)
mttm = MetaTracerTestModule()
for BS in [15, 35]:
x = torch.zeros(BS, dtype=torch.long).random_(42)
meta_args = {'x' : x.to(device='meta')}
gm = torch.fx.experimental.meta_tracer.symbolic_trace(mttm, meta_args=meta_args)
torch.testing.assert_close(gm(x), mttm(x))
# Test serialization/deserialization
with tempfile.TemporaryDirectory() as tmp_dir:
with open(f'{tmp_dir}/meta_module.pkl', 'wb') as f:
pickle.dump(gm, f)
with open(f'{tmp_dir}/meta_module.pkl', 'rb') as f:
loaded = pickle.load(f)
torch.testing.assert_close(loaded(x), mttm(x))
def test_call_to_assert_with_msg(self):
class M(torch.nn.Module):
def forward(self, a, b):
assert a == b, "test message"
return a + b
m = M()
traced = symbolic_trace_with_rewrite(m)
# Make sure the graph is well-formed
traced.graph.lint()
# Check the IR to make sure there's a call_function node with target == "Assert"
self.assertTrue(
any(
node.op == "call_function" and node.target == torch._assert
for node in traced.graph.nodes
)
)
# Ensure that the assert throws when it's supposed to and doesn't throw when it's not supposed to
traced(3, 3)
with self.assertRaisesRegex(AssertionError, "test message"):
traced(3, 5)
# Confirm that the output is correct
self.assertEqual(traced(3, 3), m(3, 3))
def test_call_to_assert_with_empty_msg(self):
class M(torch.nn.Module):
def forward(self, a, b):
assert a == b, ""
return a + b
m = M()
traced = symbolic_trace_with_rewrite(m)
# Make sure the graph is well-formed
traced.graph.lint()
# Check the IR to make sure there's a call_function node with target == "Assert"
self.assertTrue(
any(
node.op == "call_function" and node.target == torch._assert
for node in traced.graph.nodes
)
)
# Ensure that the assert throws when it's supposed to and doesn't throw when it's not supposed to
traced(3, 3)
with self.assertRaisesRegex(AssertionError, ""):
traced(3, 5)
# Confirm that the output is correct
self.assertEqual(traced(3, 3), m(3, 3))
def test_call_to_assert_with_multiline_message(self):
class M(torch.nn.Module):
def forward(self, a, b):
error_msg = """
An error message with
terrible spacing
"""
assert a == b, error_msg
return a + b
m = M()
traced = symbolic_trace_with_rewrite(m)
# Make sure the graph is well-formed
traced.graph.lint()
# Check the IR to make sure there's a call_function node with target == "Assert"
self.assertTrue(
any(
node.op == "call_function" and node.target == torch._assert
for node in traced.graph.nodes
)
)
# Ensure that the assert throws when it's supposed to and doesn't throw when it's not supposed to
error_msg = """
An error message with
terrible spacing
"""
traced(3, 3)
with self.assertRaisesRegex(AssertionError, error_msg):
traced(3, 5)
# Confirm that the output is correct
self.assertEqual(traced(3, 3), m(3, 3))
def test_subgraph_creation(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x, y):
z = self.linear(x + self.param).clamp(min=0.0, max=1.0)
w = self.linear(y).clamp(min=0.0, max=1.0)
return z + w
# symbolically trace model
my_module = MyModule()
my_module_traced = symbolic_trace(my_module)
# random mod partitioning
partition_counter = 0
NPARTITIONS = 3
# Add some random meta info to make sure it is kept around.
for node in my_module_traced.graph.nodes:
if node.op != "output":
node.meta["test_meta_info"] = True
def mod_partition(node: Node):
nonlocal partition_counter
partition = partition_counter % NPARTITIONS
partition_counter = (partition_counter + 1) % NPARTITIONS
return partition
# split module in module with submodules
module_with_submodules = split_module(
my_module_traced, my_module, mod_partition
)
# Check that test_meta_info was still on all nodes.
submodules = dict(module_with_submodules.named_modules())
for node in module_with_submodules.graph.nodes:
if node.op == "call_module":
submod = submodules[node.target]
self.assertTrue(isinstance(submod, torch.fx.GraphModule))
for submod_node in submod.graph.nodes:
if submod_node.op != "output":
stored_op = submod_node.meta.get("test_meta_info")
self.assertTrue(stored_op is not None and stored_op)
x = torch.rand(3, 4)
y = torch.rand(3, 4)
orig_out = my_module_traced(x, y)
submodules_out = module_with_submodules(x, y)
self.assertEqual(orig_out, submodules_out)
def test_split_module_kwargs_expansion(self):
class ModuleWithKwargsExpansion(torch.nn.Module):
def forward(self, x, **kwargs):
return x + kwargs['foo']
mod = ModuleWithKwargsExpansion()
traced = torch.fx.symbolic_trace(mod)
seen_getitem = False
def split_callback(n):
nonlocal seen_getitem
split_idx = int(seen_getitem)
if n.target == operator.getitem:
seen_getitem = True
return split_idx
split = split_module(traced, mod, split_callback)
x = torch.randn(5, 3)
foo = torch.randn(5, 3)
torch.testing.assert_allclose(split(x, foo=foo), traced(x, foo=foo))
@skipIfNoTorchVision
def test_subgraph_trivial_resnet(self):
# Smoke test trivially splitting resnet into 1 partition works
# There was an issue before causing submodule names to be aliased
m = resnet18()
traced = symbolic_trace(m)
a = torch.rand(64, 3, 7, 7)
module_with_submodules = split_module(traced, m, lambda node: 0)
module_with_submodules(a)
def test_split_module_default_arg(self):
class ModelToTrace(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(512, 512)
def forward(self, x, targets=None):
x = self.lin(x)
if targets is not None:
x = x + targets
return x
mtt = ModelToTrace()
traced = torch.fx.symbolic_trace(mtt, concrete_args={'targets': None})
split = split_module(traced, mtt, lambda node: 0)
x = torch.randn(50, 512)
torch.testing.assert_allclose(split(x), traced(x))
def test_normalize_binary_operators(self):
ops_to_test = {
torch.add,
torch.mul,
torch.sub,
torch.div,
torch.floor_divide,
torch.remainder,
torch.eq,
torch.ne,
torch.lt,
torch.le,
torch.gt,
torch.ge,
}
# Test Tensor/Tensor callsite
for op in ops_to_test:
class WrapperMod(torch.nn.Module):
def forward(self, x, y):
return op(x, y)
traced = symbolic_trace(WrapperMod())
normalized = NormalizeOperators(traced).transform()
x, y = torch.randn(3, 4), torch.randn(3, 4)
torch.testing.assert_close(traced(x, y), normalized(x, y))
self.assertFalse(
any(n.target in ops_to_test for n in normalized.graph.nodes)
)
# Test Tensor/scalar callsite
for op in ops_to_test:
class WrapperMod(torch.nn.Module):
def forward(self, x):
return op(x, 42)
traced = symbolic_trace(WrapperMod())
normalized = NormalizeOperators(traced).transform()
x = torch.randn(3, 4)
torch.testing.assert_close(traced(x), normalized(x))
self.assertFalse(
any(n.target in ops_to_test for n in normalized.graph.nodes)
)
@skipIfNoTorchVision
def test_normalize_args(self):
m = resnet18()
class FunctionalTracer(torch.fx.Tracer):
def is_leaf_module(
self, m: torch.nn.Module, module_qualified_name: str
) -> bool:
# `leaves` contains the set of standard `nn.Modules` that are not
# currently symbolically traceable. Ideally this set would be empty
leaves = set([torch.nn.BatchNorm2d])
return type(m) in leaves
traced = torch.fx.GraphModule(m, FunctionalTracer().trace(m))
input = torch.randn(5, 3, 224, 224)
ref_outs = traced(input)
ShapeProp(traced).propagate(input)
traced = NormalizeArgs(traced).transform()
modules = dict(traced.named_modules())
for node in traced.graph.nodes:
if node.op == "call_function" and node.target != operator.add:
self.assertEqual(len(node.args), 0)
elif node.op == "call_module":
submod_class = modules[node.target].__class__
nn_class = getattr(torch.nn, submod_class.__name__)
if submod_class == nn_class:
self.assertEqual(len(node.args), 0)
traced(input)
self.assertEqual(traced(input), ref_outs)
def test_normalize_modules_exhaustive(self):
"""
Exhaustively test `Node.normalized_arguments` on all standard
torch.nn Module classes
"""
for test_params in module_tests + new_module_tests:
if "constructor" not in test_params:
constructor = getattr(torch.nn, test_params["module_name"])
else:
constructor = test_params["constructor"]
if "constructor_args" not in test_params:
args = ()
else:
args = test_params["constructor_args"]
mod = constructor(*args)
# Skip modules that are not standard `torch.nn`
# instances, including functionals. (functionals
# are tested in test_normalize_args)
if mod.__class__.__name__ not in dir(torch.nn):
continue
if "input_fn" not in test_params:
inputs = torch.randn(test_params["input_size"])
else:
inputs = test_params["input_fn"]()
if not isinstance(inputs, (tuple, list)):
inputs = (inputs,)
params = ", ".join(f"v{i}" for i in range(len(inputs)))
# Generate a class to wrap this standard `nn.Module` instance
test_classname = f"Test{mod.__class__.__name__}"
test_mod_code = f"""
class {test_classname}(torch.nn.Module):
def __init__(self, mod):
super().__init__()
self.mod = mod
def forward(self, {params}):
return self.mod({params})
"""
gbls = {"torch": torch}
exec(test_mod_code, gbls)
test_instance = gbls[test_classname](mod)
traced = symbolic_trace(test_instance)
# Use `Node.normalized_arguments` to get a new set of arguments
# to feed to the Module. Then, rewrite the node to only take
# in those arguments as kwargs
modules = dict(traced.named_modules())
for node in traced.graph.nodes:
if node.op == "call_module":
submod_class = modules[node.target].__class__
nn_class = getattr(torch.nn, submod_class.__name__)
if submod_class == nn_class:
normalized_args = node.normalized_arguments(traced)
normalized_args2 = normalize_module(
traced, node.target, node.args, node.kwargs
)
assert normalized_args == normalized_args2
assert normalized_args
node.args = normalized_args.args
node.kwargs = normalized_args.kwargs
traced.recompile()
# These Modules have an RNG in their forward, so testing
# correctness by comparing outputs is not correct. Skip that
# check for these
stochastic_modules = {"FractionalMaxPool2d", "FractionalMaxPool3d", "RReLU"}
if mod.__class__.__name__ not in stochastic_modules:
self.assertEqual(traced(*inputs), mod(*inputs))
traced = NormalizeArgs(symbolic_trace(test_instance)).transform()
modules = dict(traced.named_modules())
for node in traced.graph.nodes:
if node.op == "call_module":
submod_class = modules[node.target].__class__
nn_class = getattr(torch.nn, submod_class.__name__)
if submod_class == nn_class:
self.assertEqual(len(node.args), 0)
def test_normalize_args_preserve_meta(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, a):
return torch.add(a, 3)
m = MyModule()
traced = symbolic_trace(m)
for node in traced.graph.nodes:
if node.op == "call_function" and node.target == torch.add:
node.meta["my_key"] = 7
break
else:
self.fail("Didn't find call_function torch.add")
input = torch.randn(2, 3)
ShapeProp(traced).propagate(input)
traced = NormalizeArgs(traced).transform()
for node in traced.graph.nodes:
if node.op == "call_function" and node.target == torch.add:
self.assertTrue("my_key" in node.meta)
self.assertEqual(node.meta["my_key"], 7)
break
else:
self.fail("Didn't find call_function torch.add")
@skipIfNoTorchVision
def test_annotate_returns_with_schema(self):
m = resnet18()
traced_modules = symbolic_trace(m)
traced_modules_annotated = AnnotateTypesWithSchema(traced_modules).transform()
for node in traced_modules_annotated.graph.nodes:
if node.type is None:
check = (node.op, node.target)
self.assertIn(
check,
{
("placeholder", "x"),
("call_module", "maxpool"),
("call_function", operator.add),
("call_function", torch.flatten),
("output", "output"),
}
)
# Smoke test torchscript compilation since now we're emitting type annotations
torch.jit.script(traced_modules_annotated)
class FunctionalTracer(torch.fx.Tracer):
def is_leaf_module(
self, m: torch.nn.Module, module_qualified_name: str
) -> bool:
# `leaves` contains the set of standard `nn.Modules` that are not
# currently symbolically traceable. Ideally this set would be empty
leaves = set([torch.nn.BatchNorm2d])
return type(m) in leaves
traced_functionals = torch.fx.GraphModule(m, FunctionalTracer().trace(m))
traced_functionals_annotated = AnnotateTypesWithSchema(
traced_functionals
).transform()
for node in traced_functionals_annotated.graph.nodes:
if node.type is None:
check = (node.op, node.target)
excluded_nodes = {
("placeholder", "x"),
# Return type differs based on boolean dispatch :(
("call_function", torch.nn.functional.max_pool2d),
("output", "output"),
}
# AnnotateTypesWithSchema doesn't work with bound C++ functions
if not isinstance(node.target, BuiltinFunctionType):
self.assertIn(check, excluded_nodes)
# Smoke test torchscript compilation since now we're emitting type annotations
torch.jit.script(traced_functionals_annotated)
def test_subgraph_uniquename(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, a, b, c, d):
add_1 = a + b
add_2 = add_1 + c
linear_1 = self.linear(add_1)
add_3 = add_2 + d
add_4 = add_2 + linear_1
add_5 = add_3 + add_4
return add_5
a, b, c, d = torch.ones(4), torch.ones(4), torch.ones(4), torch.ones(4)
mm = MyModule()
traced = symbolic_trace(mm)
def split_cb(node: torch.fx.Node):
if node.name == "a" or node.name == "b" or node.name == "add":
return 0
else:
return 1
module_with_submodule = split_module(traced, mm, split_cb)
self.assertEqual(module_with_submodule(a, b, c, d), traced(a, b, c, d))
def test_split_qualname_mapping(self):
d_hid = 4
class ExampleCode(torch.nn.Module):
def __init__(self):
super().__init__()
self.mm_param = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin = torch.nn.Linear(d_hid, d_hid)
def forward(self, x):
x = torch.mm(x, self.mm_param)
x = torch.relu(x)
x = torch.mm(x, self.mm_param)
x = self.lin(x)
x = torch.relu(x)
x = torch.mm(x, self.mm_param2)
x = self.lin(x)
return x
my_module = ExampleCode()
my_module_traced = symbolic_trace(my_module)
part_idx = 0
def split_callback(n : torch.fx.Node):
nonlocal part_idx
if (n.op, n.target) == ('call_module', 'lin'):
part_idx += 1
return part_idx
# split module in module with submodules
qualname_map : Dict[str, str] = {}
module_with_submodules = split_module(
my_module_traced, my_module, split_callback, qualname_map
)
expected_qualname_map = {
'submod_1.lin': 'lin', 'submod_2.lin': 'lin'
}
self.assertEqual(qualname_map, expected_qualname_map)
def test_traceable_function_with_nonstandard_name(self):
def foo(x):
return torch.relu(x)
traced = symbolic_trace_with_rewrite(foo)
def test_to_folder(self):
class Test(torch.nn.Module):
def __init__(self):
super(Test, self).__init__()
self.W = torch.nn.Parameter(torch.randn(2))
self.seq = torch.nn.Sequential(torch.nn.BatchNorm1d(2, 2))
self.linear = torch.nn.Linear(2, 2)
self.attr = torch.randn(2)
self.register_buffer("attr2", torch.randn(2))
self.register_buffer("attr3", torch.ones(2, dtype=torch.int32))
def forward(self, x):
return self.linear(self.seq(self.W + self.attr + self.attr2 + self.attr3 + x))
mod = symbolic_trace(Test())
module_name = "Foo"
import tempfile
from pathlib import Path
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_dir = Path(tmp_dir)
mod.to_folder(tmp_dir, module_name)
# Recipe taken from here:
# https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
import importlib.util
spec = importlib.util.spec_from_file_location(
module_name, tmp_dir / "__init__.py"
)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
t = torch.randn(2, 2)
self.assertEqual(module.Foo()(t), mod(t))
def test_fetch(self):
attrs_for_lowering: Dict[str, List[str]] = {
"torch.nn.modules.conv.Conv2d": [
"weight",
"bias",
"kernel_size",
"stride",
"padding",
"dilation",
"groups",
"padding_mode",
],
"torch.nn.modules.batchnorm.BatchNorm2d": [
"weight",
"bias",
"running_mean",
"running_var",
"eps",
],
}
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 2)
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, a):
a = self.conv(a)
a += a
return self.bn(a)
mod = TestModule()
traced = symbolic_trace(mod)
lift_lowering_attrs_to_nodes(traced)
for node in traced.graph.nodes:
if node.op == "call_module":
assert hasattr(node, "attrs_for_lowering")
para_list = attrs_for_lowering[node.attrs_for_lowering["name"]]
# node.attrs_for_lowering has an addition field of class name
assert len(para_list) + 1 == len(node.attrs_for_lowering)
for p_name in para_list:
assert p_name in node.attrs_for_lowering
def test_merge_matmuls(self):
"""
A collection of test cases for torch.fx.experimental.merge_matmul,
a graph transformation that merges matrix multiplication operations.
"""
# Utility function for counting matmuls for test assertions.
def _count_matmuls(mod):
gm = torch.fx.symbolic_trace(mod)
num_matmuls = 0
for node in gm.graph.nodes:
if node.target == torch.matmul:
num_matmuls += 1
return num_matmuls
# Simple test case in which there are two matmuls of the same size to merge.
class SimpleMergeMatmulModule(torch.nn.Module):
def __init__(self, rhs):
super().__init__()
self.rhs = rhs
def forward(self, x, y):
a = torch.matmul(x, self.rhs)
b = torch.matmul(y, self.rhs)
return a + b
# Initialize inputs.
a = torch.randn(3, 3)
b = torch.randn(3, 3)
# Initialize RHS for matmuls.
rhs = torch.randn(3, 4)
# Construct SimpleMergeMatmulModule and call merge_matmul on it.
module = SimpleMergeMatmulModule(rhs)
opt_module = merge_matmul.merge_matmul(module)
# Numerical correctness check.
before = module(a, b)
after = opt_module(a, b)
before.allclose(after)
# Basic graph structure check; original module should have 2 matmuls
# and optimized module should have 1.
self.assertEqual(_count_matmuls(module), 2)
self.assertEqual(_count_matmuls(opt_module), 1)
# Test case in which there are multiple matmuls of different sizes to merge.
class FiveMergeMatmulModule(torch.nn.Module):
def __init__(self, rhs):
super().__init__()
self.rhs = rhs
def forward(self, a, b, c, d, e):
s = torch.tensor([])
matmuls = []
# For some reason using a list comprehension or for-loop for this
# doesn't work.
matmuls.append(torch.matmul(a, self.rhs))
matmuls.append(torch.matmul(b, self.rhs))
matmuls.append(torch.matmul(c, self.rhs))
matmuls.append(torch.matmul(d, self.rhs))
matmuls.append(torch.matmul(e, self.rhs))
for m in matmuls:
s += torch.sum(m)
return s
# Initialize inputs.
inputs = [torch.randn(2 * i + 1, 5) for i in range(5)]
# Initialize RHS.
rhs = torch.randn(5, 4)
# Construct FiveMergeMatmulModule and call merge_matmul on it.
module = FiveMergeMatmulModule(rhs)
opt_module = merge_matmul.merge_matmul(module)
# Numerical correctness check.
before = module(*inputs)
after = opt_module(*inputs)
before.allclose(after)
# Basic graph structure check; original module should have len(inputs) matmuls
# and optimized module should have 1.
self.assertEqual(_count_matmuls(module), len(inputs))
self.assertEqual(_count_matmuls(opt_module), 1)
# Simple test case in which two matmuls cannot be merged due to a data dependency between
# the LHS operands.
class UnmergeableMatmulModule(torch.nn.Module):
def __init__(self, rhs):
super().__init__()
self.rhs = rhs
def forward(self, x):
a = torch.matmul(x, self.rhs)
a_abs = torch.abs(a)
b = torch.matmul(a_abs.transpose(1, 0), self.rhs)
return b
# Initialize inputs.
a = torch.randn(3, 3)
# Initialize RHS for matmuls.
rhs = torch.randn(3, 4)
# Construct UnmergeableMatmulModule and call merge_matmul on it.
module = UnmergeableMatmulModule(rhs)
opt_module = merge_matmul.merge_matmul(module)
# Numerical correctness check.
before = module(a)
after = opt_module(a)
before.allclose(after)
# Basic graph structure check; the number of matrix multiplcations should not have changed.
self.assertEqual(_count_matmuls(module), 2)
self.assertEqual(_count_matmuls(opt_module), 2)
def test_type_matches(self):
should_be_equal = [
(int, type(5)),
(numbers.Number, type(5)),
(numbers.Number, type(5.0)),
(int, type(torch.float)),
(Union[int, float], type(5)),
(Union[int, float], type(5.0)),
(List[int], type(5)),
(List[int], create_type_hint([int, int])),
(List[int], create_type_hint((int, int))),
(List[torch.Tensor], create_type_hint([torch.Tensor, torch.Tensor])),
(
List[torch.Tensor],
create_type_hint([torch.nn.Parameter, torch.nn.Parameter]),
),
(torch.Tensor, torch.nn.Parameter),
(List[torch.Tensor], create_type_hint([torch.nn.Parameter, torch.Tensor])),
(List[torch.Tensor], create_type_hint([torch.Tensor, torch.nn.Parameter])),
(List[torch.Tensor], create_type_hint((torch.Tensor, torch.Tensor))),
(
List[torch.Tensor],
create_type_hint((torch.nn.Parameter, torch.nn.Parameter)),
),
(torch.Tensor, torch.nn.Parameter),
(List[torch.Tensor], create_type_hint((torch.nn.Parameter, torch.Tensor))),
(List[torch.Tensor], create_type_hint((torch.Tensor, torch.nn.Parameter))),
(Optional[List[torch.Tensor]], List[torch.Tensor]),
(Optional[List[int]], List[int]),
]
for sig_type, arg_type in should_be_equal:
self.assertTrue(type_matches(sig_type, arg_type))
should_fail = [
(int, float),
(Union[int, float], str),
(List[torch.Tensor], List[int]),
]
for sig_type, arg_type in should_fail:
self.assertFalse(type_matches(sig_type, arg_type))
@skipIfNoMkldnn
def test_optimize_for_inference_cpu(self):
import torch.nn as nn
class Foo(nn.Module):
def __init__(self):
super().__init__()
layers = []
layers2 = []
for _ in range(10):
layers.append(nn.Conv2d(3, 3, 1))
layers.append(nn.BatchNorm2d(3))
layers.append(nn.ReLU())
layers2.append(nn.Conv2d(3, 3, 1))
layers2.append(nn.BatchNorm2d(3))
layers2.append(nn.ReLU())
self.model = nn.Sequential(*layers)
self.model2 = nn.Sequential(*layers2)
def forward(self, x):
return self.model(x) + self.model2(x)
N, C, H, W, = (
1,
3,
224,
224,
)
inp = torch.randn(N, C, H, W)
with torch.no_grad():
model = Foo().eval()
optimized_model = optimization.optimize_for_inference(model)
torch.testing.assert_close(model(inp), optimized_model(inp))
optimized_model2 = optimization.optimize_for_inference(
model, pass_config={"remove_dropout": False}
)
torch.testing.assert_close(model(inp), optimized_model2(inp))
@skipIfNoTorchVision
@skipIfNoMkldnn
def test_optimize_for_inference_cpu_torchvision(self):
models = [
torchvision.models.resnet18,
torchvision.models.resnet50,
torchvision.models.densenet121,
torchvision.models.shufflenet_v2_x1_0,
torchvision.models.vgg16,
torchvision.models.mobilenet_v2,
torchvision.models.mnasnet1_0,
torchvision.models.resnext50_32x4d,
]
with torch.no_grad():
for model_type in models:
model = model_type()
C, H, W, = (
3,
224,
224,
)
inp = torch.randn(3, C, H, W)
model(inp)
model.eval()
inp = torch.randn(1, C, H, W)
heuristic = optimization.gen_mkl_autotuner(inp, iters=0, warmup=0)
optimized_model = optimization.optimize_for_inference(model)
orig_out = model(inp)
new_out = optimized_model(inp)
torch.testing.assert_close(orig_out, new_out)
class TestNormalizeOperators(JitTestCase):
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_normalize_operator_exhaustive(self, device, dtype, op):
# These ops currently don't trace in FX for various reasons (i.e. they take a list of tensors)
fx_fail = {"cat", "stack", "hstack", "vstack", "dstack", "linalg.multi_dot"}
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
unsupported_arg_type = False
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
arg_types = []
kwarg_types = {}
def jit_infer_type(v):
inferred_arg_type = torch._C._jit_try_infer_type(v)
assert inferred_arg_type.success()
t = _torchscript_type_to_python_type(inferred_arg_type.type())
return t
for v in arg_values:
if isinstance(v, torch.Tensor):
arg_types.append(type(v))
else:
if isinstance(v, complex):
# Complex type not supported in FX
unsupported_arg_type = True
arg_types.append(jit_infer_type(v))
for k, v in kwarg_values.items():
if isinstance(v, torch.Tensor):
kwarg_types[k] = type(v)
else:
if isinstance(v, complex):
# Complex type not supported in FX
unsupported_arg_type = True
kwarg_types[k] = jit_infer_type(v)
if unsupported_arg_type:
continue
# Test normalize_function by itself
ref_out = op.op(*arg_values, **kwarg_values)
norm_args_and_kwargs = normalize_function(
op.op, arg_values, kwarg_values, arg_types, kwarg_types
)
if norm_args_and_kwargs is None:
raise RuntimeError(
"""
FX failed to normalize op - add the op to the op_skip list.
A common reason is if your OpInfo was implemented with a lambda
- otherwise, file an issue
"""
)
test_out = op.op(*norm_args_and_kwargs.args, **norm_args_and_kwargs.kwargs)
self.assertEqual(test_out, ref_out)
# Test normalized_arguments as part of FX
if op.name in fx_fail:
continue
param_names = []
param_values = []
fx_args = []
for idx, v in enumerate(arg_values):
if isinstance(v, torch.Tensor):
param_names.append(f"arg_{idx}")
param_values.append(v)
fx_args.append(param_names[-1])
else:
fx_args.append(f"{repr(v)}")
for k, v in kwarg_values.items():
if isinstance(v, torch.Tensor):
param_names.append(k)
param_values.append(v)
fx_args.append(f"{k} = {k}")
else:
fx_args.append(f"{k} = {repr(v)}")
code = f"""
class TestModule(torch.nn.Module):
def forward(self, {', '.join(param_names)}):
return torch.{op.name}({', '.join(fx_args)})
"""
g = {"torch": torch, "inf": math.inf}
exec(code, g)
TestModule = g["TestModule"]
m = TestModule()
traced = torch.fx.symbolic_trace(m)
ref_out = traced(*param_values)
for node in traced.graph.nodes:
if node.op == "call_function":
normalized_args = node.normalized_arguments(
traced, arg_types, kwarg_types
)
assert normalized_args
node.args = normalized_args.args
node.kwargs = normalized_args.kwargs
traced.recompile()
test_out = traced(*param_values)
self.assertEqual(test_out, ref_out)
def test_normalize_quantized_eb(self):
target = torch.ops.quantized.embedding_bag_byte_rowwise_offsets
args = (
torch.empty((2, 3), dtype=torch.uint8),
torch.empty((2,), dtype=torch.int64),
torch.empty((2,), dtype=torch.int64),
)
norm_args_and_kwargs = normalize_function(
target, args, normalize_to_only_use_kwargs=True
)
self.assertTrue(norm_args_and_kwargs is not None)
self.assertEqual(
set(norm_args_and_kwargs.kwargs.keys()),
{
"weight",
"indices",
"offsets",
"scale_grad_by_freq",
"mode",
"pruned_weights",
"per_sample_weights",
"compressed_indices_mapping",
"include_last_offset",
},
)
self.assertEqual(norm_args_and_kwargs.args, tuple())
def test_normalize_args_op_overload(self):
for target in [torch.ops.aten.resize_as_.default, torch.ops.aten.resize_as_]:
inp1 = torch.rand([1])
inp2 = torch.rand([4])
args, kwargs = normalize_function(target, (inp1,), {"the_template": inp2}, normalize_to_only_use_kwargs=True)
self.assertIs(kwargs["input"], inp1)
self.assertIs(kwargs["the_template"], inp2)
instantiate_device_type_tests(TestNormalizeOperators, globals())
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/test_fx_experimental.py
|
# Owner(s): ["module: tests"]
import torch
import numpy as np
import itertools
from itertools import product
import math
import random
from numbers import Number
import unittest
import warnings
import operator
from functools import partial
import torch.autograd.forward_ad as fwAD
from torch._six import inf, nan
from torch.testing._internal.common_utils import (
TestCase,
slowTest,
iter_indices,
TEST_WITH_ASAN,
run_tests,
gradcheck,
torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict,
TEST_SCIPY,
set_default_dtype,
)
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
dtypes,
dtypesIfCUDA,
dtypesIfCPU,
deviceCountAtLeast,
precisionOverride,
onlyNativeDeviceTypes,
skipIf,
ops,
OpDTypes,
skipMeta,
)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
all_types_and,
integral_types,
complex_types,
integral_types_and,
floating_types_and,
floating_and_complex_types,
get_all_math_dtypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs,
binary_ufuncs_and_refs,
generate_elementwise_binary_tensors,
generate_elementwise_binary_small_value_tensors,
generate_elementwise_binary_large_value_tensors,
generate_elementwise_binary_extremal_value_tensors,
generate_elementwise_binary_broadcasting_tensors,
generate_elementwise_binary_with_scalar_samples,
generate_elementwise_binary_with_scalar_and_type_promotion_samples,
)
if TEST_SCIPY:
import scipy.special
import scipy.integrate
# TODO: update to use opinfos consistently
class TestBinaryUfuncs(TestCase):
# Generic tests for elementwise binary (AKA binary universal (u) functions (funcs))
# TODO: below contiguous tensor results are compared with a variety of noncontiguous results.
# It would be interesting to have the lhs and rhs have different discontiguities.
# Helper for comparing torch tensors and NumPy arrays
# TODO: should this or assertEqual also validate that strides are equal?
def assertEqualHelper(
self, actual, expected, msg, *, dtype, exact_dtype=True, **kwargs
):
assert isinstance(actual, torch.Tensor)
# Some NumPy functions return scalars, not arrays
if isinstance(expected, Number):
self.assertEqual(actual.item(), expected, msg=msg, **kwargs)
elif isinstance(expected, np.ndarray):
# Handles exact dtype comparisons between arrays and tensors
if exact_dtype:
# Allows array dtype to be float32 when comparing with bfloat16 tensors
# since NumPy doesn't support the bfloat16 dtype
# Also ops like scipy.special.erf, scipy.special.erfc, etc, promote float16
# to float32
if expected.dtype == np.float32:
assert actual.dtype in (
torch.float16,
torch.bfloat16,
torch.float32,
)
else:
assert expected.dtype == torch_to_numpy_dtype_dict[actual.dtype]
self.assertEqual(
actual,
torch.from_numpy(expected).to(actual.dtype),
msg,
exact_device=False,
**kwargs,
)
else:
self.assertEqual(actual, expected, msg, exact_device=False, **kwargs)
# Tests that the function and its (array-accepting) reference produce the same
# values on given tensors
def _test_reference_numerics(self, dtype, op, gen, equal_nan=True):
def _helper_reference_numerics(
expected, actual, msg, exact_dtype, equal_nan=True
):
if not torch.can_cast(
numpy_to_torch_dtype_dict[expected.dtype.type], dtype
):
exact_dtype = False
if dtype is torch.bfloat16 and expected.dtype == np.float32:
# Ref: https://github.com/pytorch/pytorch/blob/master/torch/testing/_internal/common_utils.py#L1149
self.assertEqualHelper(
actual,
expected,
msg,
dtype=dtype,
exact_dtype=exact_dtype,
rtol=16e-3,
atol=1e-5,
)
else:
self.assertEqualHelper(
actual,
expected,
msg,
dtype=dtype,
equal_nan=equal_nan,
exact_dtype=exact_dtype,
)
for sample in gen:
# Each sample input acquired from the generator is just one lhs tensor
# and one rhs tensor
l = sample.input
r = sample.args[0]
numpy_sample = sample.numpy()
l_numpy = numpy_sample.input
r_numpy = numpy_sample.args[0]
actual = op(l, r)
expected = op.ref(l_numpy, r_numpy)
# Crafts a custom error message for smaller, printable tensors
def _numel(x):
if isinstance(x, torch.Tensor):
return x.numel()
# Assumes x is a scalar
return 1
if _numel(l) <= 100 and _numel(r) <= 100:
msg = (
"Failed to produce expected results! Input lhs tensor was"
" {0}, rhs tensor was {1}, torch result is {2}, and reference result is"
" {3}."
).format(l, r, actual, expected)
else:
msg = None
exact_dtype = True
if isinstance(actual, torch.Tensor):
_helper_reference_numerics(
expected, actual, msg, exact_dtype, equal_nan
)
else:
for x, y in zip(expected, actual):
# testing multi-outputs results
_helper_reference_numerics(x, y, msg, exact_dtype, equal_nan)
# The following tests only apply to elementwise binary operators with references
binary_ufuncs_with_references = list(
filter(lambda op: op.ref is not None and op.ref is not None, binary_ufuncs)
)
@ops(binary_ufuncs_with_references)
def test_reference_numerics(self, device, dtype, op):
gen = generate_elementwise_binary_tensors(op, device=device, dtype=dtype)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
# runtime error: 128 is outside the range of representable values of type 'signed char'
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@ops(binary_ufuncs_with_references)
def test_reference_numerics_small_values(self, device, dtype, op):
if dtype is torch.bool:
self.skipTest("Doesn't support bool!")
gen = generate_elementwise_binary_small_value_tensors(
op, device=device, dtype=dtype
)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
# TODO: review if this skip is necessary
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@ops(
binary_ufuncs_with_references,
allowed_dtypes=(
torch.int16,
torch.int32,
torch.int64,
torch.float16,
torch.bfloat16,
torch.float32,
torch.float64,
torch.complex64,
torch.complex128,
),
)
def test_reference_numerics_large_values(self, device, dtype, op):
gen = generate_elementwise_binary_large_value_tensors(
op, device=device, dtype=dtype
)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
# TODO: review if this skip is necessary
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@ops(
binary_ufuncs_with_references,
allowed_dtypes=(
torch.float16,
torch.bfloat16,
torch.float32,
torch.float64,
torch.complex64,
torch.complex128,
),
)
def test_reference_numerics_extremal_values(self, device, dtype, op):
gen = generate_elementwise_binary_extremal_value_tensors(
op, device=device, dtype=dtype
)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
# tests broadcasting and noncontiguous broadcasting behavior
@ops(
binary_ufuncs_with_references,
allowed_dtypes=(
torch.long,
torch.float32,
),
)
def test_broadcasting(self, device, dtype, op):
gen = generate_elementwise_binary_broadcasting_tensors(
op, device=device, dtype=dtype
)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
@ops(
binary_ufuncs_with_references,
allowed_dtypes=(torch.long, torch.float32, torch.complex64),
)
def test_scalar_support(self, device, dtype, op):
gen = generate_elementwise_binary_with_scalar_samples(
op, device=device, dtype=dtype
)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
gen = generate_elementwise_binary_with_scalar_and_type_promotion_samples(
op, device=device, dtype=dtype
)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
@ops(binary_ufuncs)
def test_contig_vs_every_other(self, device, dtype, op):
lhs = make_tensor(
(1026,), device=device, dtype=dtype, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
(1026,), device=device, dtype=dtype, **op.rhs_make_tensor_kwargs
)
lhs_non_contig = lhs[::2]
rhs_non_contig = rhs[::2]
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertFalse(lhs_non_contig.is_contiguous())
self.assertFalse(rhs_non_contig.is_contiguous())
expected = op(lhs, rhs)[::2]
actual = op(lhs_non_contig, rhs_non_contig)
self.assertEqual(expected, actual)
@ops(binary_ufuncs)
def test_contig_vs_transposed(self, device, dtype, op):
lhs = make_tensor(
(789, 357), device=device, dtype=dtype, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
(789, 357), device=device, dtype=dtype, **op.rhs_make_tensor_kwargs
)
lhs_non_contig = lhs.T
rhs_non_contig = rhs.T
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertFalse(lhs_non_contig.is_contiguous())
self.assertFalse(rhs_non_contig.is_contiguous())
expected = op(lhs, rhs).T
actual = op(lhs_non_contig, rhs_non_contig)
self.assertEqual(expected, actual)
@ops(binary_ufuncs)
def test_non_contig(self, device, dtype, op):
shapes = ((5, 7), (1024,))
for shape in shapes:
lhs = make_tensor(
shape, dtype=dtype, device=device, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape, dtype=dtype, device=device, **op.rhs_make_tensor_kwargs
)
lhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[
..., 0
]
lhs_non_contig.copy_(lhs)
rhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[
..., 0
]
rhs_non_contig.copy_(rhs)
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertFalse(lhs_non_contig.is_contiguous())
self.assertFalse(rhs_non_contig.is_contiguous())
expected = op(lhs, rhs)
actual = op(lhs_non_contig, rhs_non_contig)
self.assertEqual(expected, actual)
@ops(binary_ufuncs)
def test_non_contig_index(self, device, dtype, op):
shape = (2, 2, 1, 2)
lhs = make_tensor(
shape, dtype=dtype, device=device, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape, dtype=dtype, device=device, **op.rhs_make_tensor_kwargs
)
lhs_non_contig = lhs[:, 1, ...]
lhs = lhs_non_contig.contiguous()
rhs_non_contig = rhs[:, 1, ...]
rhs = rhs_non_contig.contiguous()
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertFalse(lhs_non_contig.is_contiguous())
self.assertFalse(rhs_non_contig.is_contiguous())
expected = op(lhs, rhs)
actual = op(lhs_non_contig, rhs_non_contig)
self.assertEqual(expected, actual)
@ops(binary_ufuncs)
def test_non_contig_expand(self, device, dtype, op):
shapes = [(1, 3), (1, 7), (5, 7)]
for shape in shapes:
lhs = make_tensor(
shape, dtype=dtype, device=device, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape, dtype=dtype, device=device, **op.rhs_make_tensor_kwargs
)
lhs_non_contig = lhs.clone().expand(3, -1, -1)
rhs_non_contig = rhs.clone().expand(3, -1, -1)
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertFalse(lhs_non_contig.is_contiguous())
self.assertFalse(rhs_non_contig.is_contiguous())
expected = op(lhs, rhs)
actual = op(lhs_non_contig, rhs_non_contig)
for i in range(3):
self.assertEqual(expected, actual[i])
@ops(binary_ufuncs)
def test_contig_size1(self, device, dtype, op):
shape = (5, 100)
lhs = make_tensor(
shape, dtype=dtype, device=device, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape, dtype=dtype, device=device, **op.rhs_make_tensor_kwargs
)
lhs = lhs[:1, :50]
lhs_alt = torch.empty(lhs.size(), device=device, dtype=dtype)
lhs_alt.copy_(lhs)
rhs = rhs[:1, :50]
rhs_alt = torch.empty(rhs.size(), device=device, dtype=dtype)
rhs_alt.copy_(rhs)
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertTrue(lhs_alt.is_contiguous())
self.assertTrue(rhs_alt.is_contiguous())
expected = op(lhs, rhs)
actual = op(lhs_alt, rhs_alt)
self.assertEqual(expected, actual)
@ops(binary_ufuncs)
def test_contig_size1_large_dim(self, device, dtype, op):
shape = (5, 2, 3, 1, 4, 5, 3, 2, 1, 2, 3, 4)
lhs = make_tensor(
shape, dtype=dtype, device=device, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape, dtype=dtype, device=device, **op.rhs_make_tensor_kwargs
)
lhs = lhs[:1, :, :, :, :, :, :, :, :, :, :, :]
lhs_alt = torch.empty(lhs.size(), device=device, dtype=dtype)
lhs_alt.copy_(lhs)
rhs = rhs[:1, :, :, :, :, :, :, :, :, :, :, :]
rhs_alt = torch.empty(rhs.size(), device=device, dtype=dtype)
rhs_alt.copy_(rhs)
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertTrue(lhs_alt.is_contiguous())
self.assertTrue(rhs_alt.is_contiguous())
expected = op(lhs, rhs)
actual = op(lhs_alt, rhs_alt)
self.assertEqual(expected, actual)
@ops(binary_ufuncs)
def test_batch_vs_slicing(self, device, dtype, op):
shape = (32, 512)
lhs = make_tensor(
shape, dtype=dtype, device=device, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape, dtype=dtype, device=device, **op.rhs_make_tensor_kwargs
)
expected = op(lhs, rhs)
actual = []
for idx in range(32):
actual.append(op(lhs[idx], rhs[idx]))
actual = torch.stack(actual)
self.assertEqual(expected, actual)
# Tests that elementwise binary operators participate in type promotion properly
# NOTE: because the cross-product of all possible type promotion tests is huge, this
# just spot checks some handwritten cases.
# NOTE: It may be possible to refactor this test into something simpler
@ops(binary_ufuncs_and_refs, dtypes=OpDTypes.none)
def test_type_promotion(self, device, op):
supported_dtypes = op.supported_dtypes(torch.device(device).type)
make_lhs = partial(
make_tensor, (5,), device=device, **op.lhs_make_tensor_kwargs
)
make_rhs = partial(
make_tensor, (5,), device=device, **op.rhs_make_tensor_kwargs
)
make_lhs_scalar_tensor = partial(
make_tensor, (), device='cpu', **op.lhs_make_tensor_kwargs
)
make_rhs_scalar_tensor = partial(
make_tensor, (), device='cpu', **op.rhs_make_tensor_kwargs
)
def _supported(dtypes):
return all(map(lambda x: x in supported_dtypes, dtypes))
# int x int type promotion
if _supported((torch.int16, torch.int32, torch.int64)):
lhs_i16 = make_lhs(dtype=torch.int16)
lhs_i32 = make_lhs(dtype=torch.int32)
lhs_i64 = make_lhs(dtype=torch.int64)
rhs_i16 = make_rhs(dtype=torch.int16)
rhs_i32 = make_rhs(dtype=torch.int32)
rhs_i64 = make_rhs(dtype=torch.int64)
if op.promotes_int_to_float:
default_dtype = torch.get_default_dtype()
self.assertEqual(op(lhs_i16, rhs_i32).dtype, default_dtype)
self.assertEqual(
op(lhs_i16, rhs_i32),
op(lhs_i16.to(default_dtype), rhs_i32.to(default_dtype)),
)
self.assertEqual(op(lhs_i32, rhs_i64).dtype, default_dtype)
self.assertEqual(
op(lhs_i32, rhs_i64),
op(lhs_i32.to(default_dtype), rhs_i64.to(default_dtype)),
)
elif op.always_returns_bool:
self.assertEqual(op(lhs_i16, rhs_i32).dtype, torch.bool)
self.assertEqual(op(lhs_i32, rhs_i64).dtype, torch.bool)
else: # standard type promotion
self.assertEqual(op(lhs_i16, rhs_i32).dtype, torch.int32)
self.assertEqual(
op(lhs_i16, rhs_i32), op(lhs_i16.to(torch.int32), rhs_i32)
)
self.assertEqual(op(lhs_i32, rhs_i64).dtype, torch.int64)
self.assertEqual(
op(lhs_i32, rhs_i64), op(lhs_i32.to(torch.int64), rhs_i64)
)
if op.supports_out:
if not op.promotes_int_to_float:
# Integers can be safely cast to other integer types
out = torch.empty_like(lhs_i64)
self.assertEqual(op(lhs_i16, rhs_i32, out=out).dtype, torch.int64)
self.assertEqual(op(lhs_i16, rhs_i32), out, exact_dtype=False)
out = torch.empty_like(lhs_i16)
self.assertEqual(op(lhs_i32, rhs_i64, out=out).dtype, torch.int16)
else:
# Float outs cannot be safely cast to integer types
with self.assertRaisesRegex(RuntimeError, "can't be cast"):
op(lhs_i16, rhs_i32, out=torch.empty_like(lhs_i64))
if not op.always_returns_bool:
# Neither integer nor float outs can be cast to bool
with self.assertRaisesRegex(RuntimeError, "can't be cast"):
op(
lhs_i16,
rhs_i32,
out=torch.empty_like(lhs_i64, dtype=torch.bool),
)
# All these output types can be cast to any float or complex type
out = torch.empty_like(lhs_i64, dtype=torch.float16)
self.assertEqual(op(lhs_i16, rhs_i32, out=out).dtype, torch.float16)
out = torch.empty_like(lhs_i64, dtype=torch.bfloat16)
self.assertEqual(op(lhs_i16, rhs_i32, out=out).dtype, torch.bfloat16)
out = torch.empty_like(lhs_i64, dtype=torch.float32)
self.assertEqual(op(lhs_i16, rhs_i32, out=out).dtype, torch.float32)
self.assertEqual(op(lhs_i16, rhs_i32), out, exact_dtype=False)
out = torch.empty_like(lhs_i64, dtype=torch.complex64)
self.assertEqual(op(lhs_i16, rhs_i32, out=out).dtype, torch.complex64)
self.assertEqual(op(lhs_i16, rhs_i32), out, exact_dtype=False)
# float x float type promotion
if _supported((torch.float32, torch.float64)):
lhs_f32 = make_lhs(dtype=torch.float32)
lhs_f64 = make_lhs(dtype=torch.float64)
rhs_f32 = make_rhs(dtype=torch.float32)
rhs_f64 = make_rhs(dtype=torch.float64)
if op.always_returns_bool:
self.assertEqual(op(lhs_f32, rhs_f64).dtype, torch.bool)
else: # normal float type promotion
self.assertEqual(op(lhs_f32, rhs_f64).dtype, torch.float64)
self.assertEqual(
op(lhs_f32, rhs_f64), op(lhs_f32.to(torch.float64), rhs_f64)
)
if op.supports_out:
# All these output types can be cast to any float or complex type
out = torch.empty_like(lhs_f64, dtype=torch.float16)
self.assertEqual(op(lhs_f32, rhs_f64, out=out).dtype, torch.float16)
out = torch.empty_like(lhs_f64, dtype=torch.bfloat16)
self.assertEqual(op(lhs_f32, rhs_f64, out=out).dtype, torch.bfloat16)
self.assertEqual(op(lhs_f32, rhs_f64), out, exact_dtype=False)
out = torch.empty_like(lhs_f64, dtype=torch.float32)
self.assertEqual(op(lhs_f32, rhs_f64, out=out).dtype, torch.float32)
self.assertEqual(op(lhs_f32, rhs_f64), out, exact_dtype=False)
out = torch.empty_like(lhs_f64, dtype=torch.complex64)
self.assertEqual(op(lhs_f32, rhs_f64, out=out).dtype, torch.complex64)
self.assertEqual(op(lhs_f32, rhs_f64), out, exact_dtype=False)
if not op.always_returns_bool:
# float outs can't be cast to an integer dtype
with self.assertRaisesRegex(RuntimeError, "can't be cast"):
op(
lhs_f32,
rhs_f64,
out=torch.empty_like(lhs_f64, dtype=torch.int64),
)
else:
# bool outs can be cast to an integer dtype
out = torch.empty_like(lhs_f64, dtype=torch.int64)
self.assertEqual(op(lhs_f32, rhs_f64, out=out).dtype, torch.int64)
self.assertEqual(op(lhs_f32, rhs_f64), out, exact_dtype=False)
# complex x complex type promotion
if _supported((torch.complex64, torch.complex128)):
lhs_c64 = make_lhs(dtype=torch.complex64)
lhs_c128 = make_lhs(dtype=torch.complex128)
rhs_c64 = make_rhs(dtype=torch.complex64)
rhs_c128 = make_rhs(dtype=torch.complex128)
if op.always_returns_bool:
self.assertEqual(op(lhs_c64, lhs_c128).dtype, torch.bool)
else: # normal complex type promotion
self.assertEqual(op(lhs_c64, rhs_c128).dtype, torch.complex128)
self.assertEqual(
op(lhs_c64, rhs_c128), op(lhs_c64.to(torch.complex128), rhs_c128)
)
if op.supports_out:
# All these output types can be cast to any or complex type
out = torch.empty_like(lhs_c64, dtype=torch.complex64)
self.assertEqual(op(lhs_c64, rhs_c128, out=out).dtype, torch.complex64)
result = op(lhs_c64, rhs_c128)
self.assertEqual(result, out.to(result.dtype))
if not op.always_returns_bool:
# complex outs can't be cast to float types
with self.assertRaisesRegex(RuntimeError, "can't be cast"):
op(
lhs_c64,
rhs_c128,
out=torch.empty_like(lhs_c64, dtype=torch.float64),
)
# complex outs can't be cast to an integer dtype
with self.assertRaisesRegex(RuntimeError, "can't be cast"):
op(
lhs_c64,
rhs_c128,
out=torch.empty_like(lhs_c64, dtype=torch.int64),
)
else:
# bool outs can be cast to a float type
out = torch.empty_like(lhs_c64, dtype=torch.float64)
self.assertEqual(
op(lhs_c64, rhs_c128, out=out).dtype, torch.float64
)
self.assertEqual(op(lhs_c64, rhs_c128), out, exact_dtype=False)
# bool outs can be cast to an integer dtype
out = torch.empty_like(lhs_f64, dtype=torch.int64)
self.assertEqual(op(lhs_f32, rhs_f64, out=out).dtype, torch.int64)
self.assertEqual(op(lhs_f32, rhs_f64), out, exact_dtype=False)
# int x float type promotion
# Note: float type is the result dtype
if _supported((torch.long, torch.float32)):
lhs_i64 = make_lhs(dtype=torch.int64)
rhs_f32 = make_rhs(dtype=torch.float32)
result = op(lhs_i64, rhs_f32)
expected_dtype = torch.float32 if not op.always_returns_bool else torch.bool
self.assertEqual(result.dtype, expected_dtype)
# float x complex type promotion
# Note: complex type with highest "value type" is the result dtype
if _supported((torch.float64, torch.complex64)):
lhs_f64 = make_lhs(dtype=torch.float64)
rhs_c64 = make_rhs(dtype=torch.complex64)
result = op(lhs_f64, rhs_c64)
expected_dtype = (
torch.complex128 if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# int x float scalar type promotion
# Note: default float dtype is the result dtype
if _supported((torch.int64, torch.float32)) and op.supports_rhs_python_scalar:
lhs_i64 = make_lhs(dtype=torch.int64)
rhs_f_scalar = 1.0
result = op(lhs_i64, rhs_f_scalar)
expected_dtype = (
torch.get_default_dtype() if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# repeats with a scalar float tensor, which should set the dtype
rhs_f32_scalar_tensor = make_rhs_scalar_tensor(dtype=torch.float32)
result = op(lhs_i64, rhs_f32_scalar_tensor)
expected_dtype = torch.float32 if not op.always_returns_bool else torch.bool
self.assertEqual(result.dtype, expected_dtype)
# Additional test with double
if _supported((torch.float64,)):
rhs_f64_scalar_tensor = make_rhs_scalar_tensor(dtype=torch.float64)
result = op(lhs_i64, rhs_f64_scalar_tensor)
expected_dtype = (
torch.float64 if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# float x complex scalar type promotion
# Note: result dtype is complex with highest "value type" among all tensors
if (
_supported((torch.float32, torch.complex64))
and op.supports_rhs_python_scalar
):
lhs_f32 = make_lhs(dtype=torch.float32)
rhs_c_scalar = complex(1, 1)
result = op(lhs_f32, rhs_c_scalar)
expected_dtype = (
torch.complex64 if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# repeats with a scalar complex tensor
rhs_c64_scalar_tensor = make_rhs_scalar_tensor(dtype=torch.complex64)
result = op(lhs_f32, rhs_c64_scalar_tensor)
expected_dtype = (
torch.complex64 if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# Additional test with complexdouble
if _supported((torch.complex128,)):
rhs_c128_scalar_tensor = make_rhs_scalar_tensor(dtype=torch.complex128)
result = op(lhs_f32, rhs_c128_scalar_tensor)
# Value type of 1D+ Tensor (lhs_f32) takes priority over scalar tensor (rhs_c128).
expected_dtype = (
torch.complex64 if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# float x float scalar tensor
# Note: result dtype is the type of the float tensor
if _supported((torch.float32, torch.float64)) and op.supports_rhs_python_scalar:
lhs_f32 = make_lhs(dtype=torch.float32)
rhs_f64_scalar_tensor = make_rhs_scalar_tensor(dtype=torch.float64)
result = op(lhs_f32, rhs_f64_scalar_tensor)
expected_dtype = torch.float32 if not op.always_returns_bool else torch.bool
self.assertEqual(result.dtype, expected_dtype)
# complex x complex scalar tensor
# Note: result dtype is the type of the complex tensor
if (
_supported((torch.complex64, torch.complex128))
and op.supports_rhs_python_scalar
):
lhs_c64 = make_lhs(dtype=torch.complex64)
rhs_c128_scalar_tensor = make_rhs_scalar_tensor(dtype=torch.complex128)
result = op(lhs_c64, rhs_c128_scalar_tensor)
expected_dtype = (
torch.complex64 if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# scalar int x scalar float
# Note: result dtype is default float type
# TODO: FIXME: re-enable this, scalar x scalar type promotion is currently broken
# https://github.com/pytorch/pytorch/issues/76801
# if op.supports_two_python_scalars and _supported((torch.long, torch.float32)):
# lhs_i_scalar = 1
# rhs_f_scalar = 2.
# result = op(lhs_i_scalar, rhs_f_scalar)
# expected_dtype = torch.get_default_dtype() if not op.always_returns_bool else torch.bool
# self.assertEqual(result.dtype, expected_dtype)
# TODO: move to error input test
@ops(binary_ufuncs, allowed_dtypes=(torch.float32,))
def test_not_broadcastable(self, device, dtype, op):
for shape_lhs, shape_rhs in (
((2,), (3,)),
((3, 1), (2, 1)),
((1, 3, 2), (3,)),
((3, 1, 2), (2, 1, 2)),
):
lhs = make_tensor(
shape_lhs, device=device, dtype=dtype, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape_rhs, device=device, dtype=dtype, **op.rhs_make_tensor_kwargs
)
try:
broadcasted_shape = op(lhs, rhs).shape
except RuntimeError:
continue
msg = (
f"On {device}, torch.{op.name} broadcasts inputs shapes {shape_lhs} and {shape_rhs} into "
f"{broadcasted_shape}, although they are not broadcastable."
)
raise AssertionError(msg)
def test_add_broadcast_empty(self, device):
# empty + empty
self.assertRaises(
RuntimeError,
lambda: torch.randn(5, 0, device=device) + torch.randn(0, 5, device=device),
)
self.assertEqual(
torch.randn(5, 0, device=device),
torch.randn(0, device=device) + torch.randn(5, 0, device=device),
)
self.assertEqual(
torch.randn(5, 0, 0, device=device),
torch.randn(0, device=device) + torch.randn(5, 0, 1, device=device),
)
# scalar + empty
self.assertEqual(
torch.randn(5, 0, 6, device=device),
torch.randn((), device=device) + torch.randn(5, 0, 6, device=device),
)
# non-empty, empty
self.assertEqual(
torch.randn(0, device=device),
torch.randn(0, device=device) + torch.randn(1, device=device),
)
self.assertEqual(
torch.randn(0, 7, 0, 6, 5, 0, 7, device=device),
torch.randn(0, 7, 0, 6, 5, 0, 1, device=device)
+ torch.randn(1, 1, 5, 1, 7, device=device),
)
self.assertRaises(
RuntimeError,
lambda: torch.randn(7, 0, device=device) + torch.randn(2, 1, device=device),
)
def test_addcmul_scalars_as_floats(self, device):
# zero-dim variables that don't require grad should bind to scalar arguments
x = torch.tensor(2.0)
y = torch.tensor(3.0, device=device)
# 3 + (3 * 3) * 2
self.assertEqual(y.addcmul(y, y, value=x), 21)
x = torch.tensor(2.0, requires_grad=True)
self.assertRaises(Exception, lambda: y.addcmul(y, y, value=x))
# Tests that the binary operators and, or, and xor (as well as their reflected and inplace versions)
# work properly (AKA &, ||, ^ and &=, |=, ^=)
@dtypes(*integral_types_and(torch.bool))
def test_bitwise_ops(self, device, dtype):
# Tensor x Tensor and Tensor x Scalar ops
ops = (
operator.and_,
operator.iand,
operator.or_,
operator.ior,
operator.xor,
operator.ixor,
)
inplace_ops = (operator.iand, operator.ior, operator.ixor)
shapes = ((5,), (15, 15), (500, 500))
for op, shape in itertools.product(ops, shapes):
# Tests tensor x tensor case
a = make_tensor(shape, device=device, dtype=dtype)
b = make_tensor(shape, device=device, dtype=dtype)
a_np = a.cpu().clone().numpy()
b_np = b.cpu().clone().numpy()
self.assertEqual(op(a, b), op(a_np, b_np))
# Tests tensor x scalar case
a = make_tensor(shape, device=device, dtype=dtype)
b_scalar = make_tensor((), device="cpu", dtype=dtype).item()
a_np = a.cpu().clone().numpy()
self.assertEqual(op(a, b_scalar), op(a_np, b_scalar))
# Tests scalar x tensor case
a_scalar = make_tensor((), device="cpu", dtype=dtype).item()
b = make_tensor(shape, device=device, dtype=dtype)
b_np = b.cpu().clone().numpy()
self.assertEqual(op(a_scalar, b), op(a_scalar, b_np))
# Tests scalar x tensor case (for ops which aren't inplace)
if op in inplace_ops:
# Tests tensor x tensor case
a = make_tensor(shape, device=device, dtype=dtype)
b = make_tensor(shape, device=device, dtype=dtype)
a_np = a.cpu().clone().numpy()
b_np = b.cpu().clone().numpy()
op(a, b)
op(a_np, b_np)
self.assertEqual(a, a_np)
# Tests tensor x scalar case
a = make_tensor(shape, device=device, dtype=dtype)
b_scalar = make_tensor((), device="cpu", dtype=dtype).item()
a_np = a.cpu().clone().numpy()
op(a, b_scalar)
op(a_np, b_scalar)
self.assertEqual(a, a_np)
def test_inplace_division(self, device):
t = torch.rand(5, 5, device=device)
id_before = id(t)
t /= 2
id_after = id(t)
self.assertEqual(id_before, id_after)
@dtypes(*all_types_and(torch.half, torch.bfloat16))
def test_div_rounding_modes(self, device, dtype):
if dtype.is_floating_point:
low, high = -10.0, 10.0
else:
info = torch.iinfo(dtype)
low, high = info.min, info.max
a = make_tensor((100,), dtype=dtype, device=device, low=low, high=high)
b = make_tensor((100,), dtype=dtype, device=device, low=low, high=high)
# Avoid division by zero so we can test (a / b) * b == a
if dtype.is_floating_point:
eps = 0.1
b[(-eps < b) & (b < eps)] = eps
else:
b[b == 0] = 1
if not dtype.is_floating_point:
# floor(a / b) * b can be < a, so fixup slightly to avoid underflow
a = torch.where(a < 0, a + b, a)
d_true = torch.divide(a, b, rounding_mode=None)
self.assertTrue(d_true.is_floating_point())
self.assertEqual(d_true * b, a.to(d_true.dtype))
d_floor = torch.divide(a, b, rounding_mode="floor")
if dtype not in (torch.bfloat16, torch.half):
self.assertEqual(d_floor * b + torch.remainder(a, b), a)
else:
self.assertEqual(
d_floor * b + torch.remainder(a.float(), b.float()),
a,
exact_dtype=False,
)
d_trunc = torch.divide(a, b, rounding_mode="trunc")
rounding_unsupported = (
dtype == torch.half
and device != "cuda"
or dtype == torch.bfloat16
and device != "cpu"
)
d_ref = d_true.float() if rounding_unsupported else d_true
self.assertEqual(d_trunc, d_ref.trunc().to(dtype))
@dtypes(torch.bfloat16, torch.half, torch.float32, torch.float64)
def test_div_rounding_nonfinite(self, device, dtype):
# Compare division of special floating point values against NumPy
num = torch.tensor(
[1.0, -1.0, 0, 0.1, -0.1, np.pi, -np.pi, np.inf, -np.inf, np.nan],
dtype=dtype,
)
# Divide by zero is tested separately
denom = num[num != 0]
a, b = num[None, :].clone(), denom[:, None].clone()
# Compare bfloat16 against NumPy float
exact_dtype = dtype != torch.bfloat16
if exact_dtype:
an, bn = a.cpu().numpy(), b.cpu().numpy()
else:
an, bn = a.float().cpu().numpy(), b.float().cpu().numpy()
for mode, np_ref in ((None, np.true_divide), ("floor", np.floor_divide)):
with np.errstate(all="ignore"):
expect = np_ref(an, bn)
kwargs = dict(rounding_mode=mode) if mode is not None else {}
with set_default_dtype(torch.double):
actual = torch.divide(a, b, **kwargs)
self.assertEqual(
actual,
torch.from_numpy(expect),
exact_device=False,
exact_dtype=exact_dtype,
)
# Compare contiguous (likely vectorized) against non-contiguous (not vectorized)
a_noncontig = torch.empty([2 * i for i in a.shape], dtype=dtype, device=device)[
::2, ::2
]
a_noncontig[:] = a
b_noncontig = torch.empty([2 * i for i in b.shape], dtype=dtype, device=device)[
::2, ::2
]
b_noncontig[:] = b
for rounding_mode in (None, "trunc", "floor"):
expect = torch.divide(a_noncontig, b_noncontig, rounding_mode=rounding_mode)
actual = torch.divide(a, b, rounding_mode=rounding_mode)
self.assertEqual(actual, expect)
@dtypes(torch.bfloat16, torch.half, torch.float32, torch.float64)
def test_divide_by_zero_rounding(self, device, dtype):
a = torch.tensor(
[1.0, -1.0, 0, 0.1, -0.1, np.pi, -np.pi, np.inf, -np.inf, np.nan],
dtype=dtype,
)
exact_dtype = dtype != torch.bfloat16
if exact_dtype:
an = a.cpu().numpy()
else:
an = a.float().cpu().numpy()
zero = torch.zeros_like(a)
# NOTE: NumPy's floor_divide rounding changed in 1.20.0 to be consistent with divide
expect = np.divide(an, 0)
for rounding_mode in (None, "floor"):
# CPU scalar
actual = torch.divide(a, 0, rounding_mode=rounding_mode)
self.assertEqual(actual, expect, exact_dtype=exact_dtype)
# Device tensor
actual = torch.divide(a, zero, rounding_mode=rounding_mode)
self.assertEqual(actual, expect, exact_dtype=exact_dtype)
@dtypes(*all_types_and(torch.half))
def test_div_rounding_numpy(self, device, dtype):
info = torch.finfo(dtype) if dtype.is_floating_point else torch.iinfo(dtype)
low, high = info.min, info.max
# Compare division of random values against NumPy
a = make_tensor((4096,), dtype=dtype, device=device, low=low, high=high)
b = make_tensor((4096,), dtype=dtype, device=device, low=low, high=high)
# Avoid division by zero which raises for integers and, for floats,
# NumPy 1.20 changed floor_divide to follow IEEE rules for inf/nan
# after dividing by zero.
b[b == 0] = 1
# Compare bfloat16 against NumPy float
exact_dtype = dtype != torch.bfloat16
if exact_dtype:
an, bn = a.cpu().numpy(), b.cpu().numpy()
else:
an, bn = a.float().cpu().numpy(), b.float().cpu().numpy()
for mode, np_ref in (
(None, np.true_divide),
("floor", np.floor_divide),
("trunc", lambda a, b: np.trunc(np.true_divide(a, b)).astype(a.dtype)),
):
with np.errstate(all="ignore"):
expect = torch.from_numpy(np_ref(an, bn))
kwargs = dict(rounding_mode=mode) if mode is not None else {}
# Contiguous (likely vectorized)
with set_default_dtype(torch.double):
actual = torch.divide(a, b, **kwargs)
self.assertEqual(
actual, expect, exact_device=False, exact_dtype=exact_dtype
)
# Non-contiguous (not vectorized)
expect = expect[::2]
with set_default_dtype(torch.double):
actual = torch.divide(a[::2], b[::2], **kwargs)
self.assertEqual(
actual, expect, exact_device=False, exact_dtype=exact_dtype
)
# Tests that trying to add, inplace, a CUDA tensor to a CPU tensor
# throws the correct error message
@onlyCUDA
def test_cross_device_inplace_error_msg(self, device):
a = torch.tensor(2.0)
b = torch.tensor(2.0, device=device)
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
a += b
# TODO: refactor this test into a more generic one, it's parked here currently
@onlyNativeDeviceTypes
def test_out_resize_warning(self, device):
a = torch.tensor((1, 2, 3), device=device, dtype=torch.float32)
b = torch.tensor((4, 5, 6), device=device, dtype=torch.float32)
unary_inputs = (a,)
binary_inputs = (a, b)
unary_ops = (torch.ceil, torch.exp)
binary_ops = (torch.add, torch.sub)
for op in unary_ops + binary_ops:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
inputs = unary_inputs if op in unary_ops else binary_inputs
# No warnings
op(*inputs, out=torch.empty(3, device=device))
op(*inputs, out=torch.empty(0, device=device))
self.assertEqual(len(w), 0)
# Cases that throw warnings
op(*inputs, out=torch.empty(2, device=device))
self.assertEqual(len(w), 1)
# Verifies that the inplace dunders (like idiv) actually are in place
@expectedFailureMeta # UserWarning not triggered
@onlyNativeDeviceTypes
def test_inplace_dunders(self, device):
t = torch.randn((1,), device=device)
expected = t.data_ptr()
t += 1
t -= 1
t *= 1
t /= 1
t **= 1
t //= 1
t %= 1
self.assertEqual(expected, t.data_ptr())
def check_internal_mem_overlap(
self, inplace_op, num_inputs, dtype, device, expected_failure=False
):
if isinstance(inplace_op, str):
inplace_op = getattr(torch.Tensor, inplace_op)
input = torch.randn(1, dtype=dtype, device=device).expand(3, 3)
inputs = [input] + [torch.randn_like(input) for i in range(num_inputs - 1)]
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, "single memory location"):
inplace_op(*inputs)
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, "single memory location"):
inplace_op(*inputs)
def unary_check_input_output_mem_overlap(
self, data, sz, op, expected_failure=False
):
def _test(op, output, input):
output_exp = torch.empty_like(output)
op(input, out=output_exp)
self.assertEqual(op(input, out=output), output_exp, msg=op.__name__)
# output is identical to input:
_test(op, output=data[0:sz], input=data[0:sz])
# output and input are independent:
_test(op, output=data[0:sz], input=data[sz : 2 * sz])
# output partially overlaps with input:
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, "unsupported operation"):
_test(op, data[0:sz], data[1 : sz + 1])
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, "unsupported operation"):
_test(op, data[0:sz], data[1 : sz + 1])
def binary_check_input_output_mem_overlap(self, op, device, expected_failure=False):
sz = 3
data = torch.randn(2 * sz, device=device)
other = torch.randn(sz, device=device)
self.unary_check_input_output_mem_overlap(
data,
sz,
lambda input, out: op(other, input, out=out),
expected_failure=expected_failure,
)
self.unary_check_input_output_mem_overlap(
data,
sz,
lambda input, out: op(input, other, out=out),
expected_failure=expected_failure,
)
@dtypes(torch.double)
def test_binary_op_mem_overlap(self, device, dtype):
ops = [
("add", True, True, "cpu"),
("add", True, True, "cuda"),
("mul", True, True, "cpu"),
("mul", True, True, "cuda"),
("sub", True, True, "cpu"),
("sub", True, True, "cuda"),
("div", True, True, "cpu"),
("div", True, True, "cuda"),
("pow", True, True, "cpu"),
("pow", True, True, "cuda"),
("fmod", True, True, "cpu"),
("fmod", True, True, "cuda"),
("atan2", True, True, "cpu"),
("atan2", True, True, "cuda"),
("hypot", True, True, "cpu"),
("hypot", True, True, "cuda"),
("igamma", True, True, "cpu"),
("igamma", True, True, "cuda"),
("igammac", True, True, "cpu"),
("igammac", True, True, "cuda"),
("nextafter", True, True, "cpu"),
("nextafter", True, True, "cuda"),
("le", True, True, "cpu"),
("le", True, True, "cuda"),
("lt", True, True, "cpu"),
("lt", True, True, "cuda"),
("ge", True, True, "cpu"),
("ge", True, True, "cuda"),
("gt", True, True, "cpu"),
("gt", True, True, "cuda"),
("eq", True, True, "cpu"),
("eq", True, True, "cuda"),
("ne", True, True, "cpu"),
("ne", True, True, "cuda"),
("logical_and", True, True, "cpu"),
("logical_and", True, True, "cuda"),
("logical_or", True, True, "cpu"),
("logical_or", True, True, "cuda"),
("logical_xor", True, True, "cpu"),
("logical_xor", True, True, "cuda"),
]
for (
fn,
has_input_output_mem_overlap_check,
has_internal_mem_overlap_check,
dev,
) in ops:
if dev != device:
continue
out_op = getattr(torch, fn)
inplace_op = getattr(torch.Tensor, fn + "_")
self.check_internal_mem_overlap(
inplace_op,
2,
dtype,
device,
expected_failure=not has_internal_mem_overlap_check,
)
self.binary_check_input_output_mem_overlap(
out_op, device, expected_failure=not has_input_output_mem_overlap_check
)
def _do_pow_for_exponents(self, m1, exponents, pow_fn, atol):
for num in exponents:
if (
isinstance(num, int)
and num < 0
and not m1.is_floating_point()
and not m1.is_complex()
):
with self.assertRaisesRegex(
RuntimeError,
r"Integers to negative integer powers are not allowed\.",
):
torch.pow(m1[4], num)
else:
# base - tensor, exponent - number
# contiguous
res1 = torch.pow(m1[4], num)
res2 = res1.clone().zero_()
# `math.pow` has issues with complex exponentiation so we need to resort to normal `pow`.
for i in range(res2.size(0)):
res2[i] = pow_fn(m1[4][i], num)
rtol = 0 if atol is not None else None
self.assertEqual(res1, res2, atol=atol, rtol=rtol)
# non-contiguous
res1 = torch.pow(m1[:, 4], num)
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow_fn(m1[i, 4], num)
self.assertEqual(res1, res2, atol=atol, rtol=rtol)
# scalar ** tensor to enforce correct handling of dtypes for __rpow__().
expected_dtype = torch.result_type(num, m1)
res1 = num ** m1[4]
res2 = (
torch.tensor(num, dtype=expected_dtype, device=m1.device) ** m1[4]
)
self.assertEqual(res1, res2)
self.assertEqual(res1.dtype, expected_dtype)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_pow(self, device, dtype):
m1 = torch.empty(0, dtype=dtype, device=device)
if m1.is_floating_point() or m1.is_complex():
m1 = (
make_tensor((100, 100), low=0, high=1, dtype=dtype, device=device) + 0.5
)
else:
# math.pow will overflow and throw exceptions for large integers
range_high = 4 if dtype in (torch.int8, torch.uint8) else 10
m1 = make_tensor(
(100, 100), low=1, high=range_high, dtype=dtype, device=device
)
exponents = [-2.8, -2, -1, -0.5, 0, 0.5, 1, 2, 3, 4, 3.3]
complex_exponents = [
-2.5j,
-1.0j,
0j,
1.0j,
2.5j,
1.0 + 1.0j,
-1.0 - 1.5j,
3.3j,
]
if m1.is_complex():
self._do_pow_for_exponents(m1, exponents + complex_exponents, pow, 10e-4)
else:
self._do_pow_for_exponents(m1, exponents, math.pow, None)
will_raise_error = dtype is torch.half and torch.device(device).type == 'cpu'
if will_raise_error:
# On CPU,
# Half Tensor with complex exponents leads to computation dtype
# of ComplexHalf for which this ops is not supported yet
with self.assertRaisesRegex(RuntimeError, "not implemented for 'ComplexHalf'"):
self._do_pow_for_exponents(m1, complex_exponents, pow, 10e-4)
else:
self._do_pow_for_exponents(m1, complex_exponents, pow, 10e-4)
# base - number, exponent - tensor
# contiguous
res1 = torch.pow(3, m1[4])
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow(3, m1[4, i])
self.assertEqual(res1, res2)
# non-contiguous
res1 = torch.pow(3, m1[:, 4])
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow(3, m1[i][4])
self.assertEqual(res1, res2)
# TODO: refactor all these tests using opinfos properly
def _test_pow(self, base, exponent, np_exponent=None):
if np_exponent is None:
np_exponent = exponent
def to_np(value):
if isinstance(value, torch.Tensor):
return value.cpu().numpy()
return value
try:
np_res = np.power(to_np(base), to_np(np_exponent))
expected = (
torch.from_numpy(np_res)
if isinstance(np_res, np.ndarray)
else torch.tensor(np_res, dtype=base.dtype)
)
except ValueError as e:
err_msg = "Integers to negative integer powers are not allowed."
self.assertEqual(str(e), err_msg)
out = torch.empty_like(base)
test_cases = [
lambda: base.pow(exponent),
lambda: base.pow_(exponent),
lambda: torch.pow(base, exponent),
lambda: torch.pow(base, exponent, out=out),
]
for test_case in test_cases:
self.assertRaisesRegex(RuntimeError, err_msg, test_case)
else:
if isinstance(base, torch.Tensor):
actual = base.pow(exponent)
self.assertEqual(actual, expected.to(actual))
actual = base.clone()
# When base is a 0-dim cpu tensor and exp is a cuda tensor, we exp `pow` to work but `pow_` to fail, since
# `pow` will try to create the output tensor on a cuda device, but `pow_` needs to use the cpu tensor as the output
if (
isinstance(exponent, torch.Tensor)
and base.dim() == 0
and base.device.type == "cpu"
and exponent.device.type == "cuda"
):
regex = "Expected all tensors to be on the same device, but found at least two devices, cuda.* and cpu!"
self.assertRaisesRegex(RuntimeError, regex, base.pow_, exponent)
elif torch.can_cast(torch.result_type(base, exponent), base.dtype):
actual2 = actual.pow_(exponent)
self.assertEqual(actual, expected)
self.assertEqual(actual2, expected)
else:
self.assertRaisesRegex(
RuntimeError,
"Found dtype \\w+ but expected \\w+",
lambda: actual.pow_(exponent),
)
actual = torch.pow(base, exponent)
self.assertEqual(actual, expected.to(actual))
actual2 = torch.pow(base, exponent, out=actual)
self.assertEqual(actual, expected.to(actual))
self.assertEqual(actual2, expected.to(actual))
# We can potentially merge this into OpInfo, but one blocker is that the
# first input must be a scalar. It is not as simple as just wrapping this in
# a lambada that switches the inputs, because we also want to test samples inputs
# where the second input is a scalar. The wrapper would need some more logic.
def test_pow_scalar_base(self, device):
a = (
torch.arange(1, 13, dtype=torch.double, device=device)
.view(3, 4)
.requires_grad_()
)
gradcheck(lambda a: torch.pow(2, a), (a,))
# Tests pow() for integral, floating-type tensors, with integral, floating-type
# exponents (tensor or scalar), respectively. noncontiguous tensors are also tested.
def test_int_and_float_pow(self, device):
def _test_int_and_float_pow(dt, low, high, dev):
test_cases = (
((4, 4), 0, (4, 1)),
((3, 1), 4, (3, 1)),
((2,), 4, (1,)),
((1,), 2, ()),
((513, 513), 4, (513,)),
((5, 5, 5), 5, (5,)),
((), 2, ()),
)
for base_shape, exp_scalar, exp_shape in test_cases:
base_tensor = make_tensor(
base_shape, dtype=dt, device=dev, low=low, high=high
)
# int tensors don't take negative exponents
if dt in [
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
]:
exp_tensor = make_tensor(
exp_shape, dtype=dt, device=dev, low=0, high=high
)
else:
exp_tensor = make_tensor(
exp_shape, dtype=dt, device=dev, low=low, high=high
)
self._test_pow(base_tensor, exp_scalar)
self._test_pow(base_tensor, exp_tensor)
# test non-contiguous tensors as well
base_tensor = make_tensor(
base_shape,
dtype=dt,
device=dev,
low=low,
high=high,
noncontiguous=True,
)
if dt in [
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
]:
exp_tensor = make_tensor(
exp_shape,
dtype=dt,
device=dev,
low=0,
high=high,
noncontiguous=True,
)
else:
exp_tensor = make_tensor(
exp_shape,
dtype=dt,
device=dev,
low=low,
high=high,
noncontiguous=True,
)
self._test_pow(base_tensor, exp_scalar)
self._test_pow(base_tensor, exp_tensor)
_test_int_and_float_pow(torch.int8, -2, 2, device)
_test_int_and_float_pow(torch.uint8, 0, 3, device)
_test_int_and_float_pow(torch.int16, -5, 5, device)
_test_int_and_float_pow(torch.int64, -10, 10, device)
_test_int_and_float_pow(torch.int32, -10, 10, device)
_test_int_and_float_pow(torch.float16, 0.0, 5.0, device)
_test_int_and_float_pow(torch.float32, 0.0, 10.0, device)
_test_int_and_float_pow(torch.float64, 0.0, 10.0, device)
# pow's output would have some NaNs as well
_test_int_and_float_pow(torch.float32, -10.0, 10.0, device)
_test_int_and_float_pow(torch.float64, -10.0, 10.0, device)
# Tests that a Runtime error occurs when a base tensor cannot be resized
# by pow's inplace variant due to PyTorch's broadcasting semantics.
def test_pow_inplace_resizing_exception(self, device):
test_cases = (
((), (3,)),
((2,), (2, 1)),
((2, 1), (2, 2)),
((2, 2), (2, 1, 1)),
)
test_inputs = list(
(
make_tensor(
base_size, dtype=torch.float64, device=device, high=10.0, low=0.0
),
make_tensor(
exp_size, dtype=torch.float64, device=device, high=10.0, low=0.0
),
)
for base_size, exp_size in test_cases
)
for base, exponent in test_inputs:
regex = "doesn't match the broadcast shape"
self.assertRaisesRegex(RuntimeError, regex, base.pow_, exponent)
def test_int_tensor_pow_neg_ints(self, device):
ints = [
torch.iinfo(torch.int32).min,
-3,
-2,
-1,
0,
1,
2,
3,
torch.iinfo(torch.int32).max,
]
neg_ints = [torch.iinfo(torch.int32).min, -3, -2, -1]
tensor = torch.tensor(ints, dtype=torch.int32, device=device)
for pow in neg_ints:
self._test_pow(tensor, pow)
def test_long_tensor_pow_floats(self, device):
ints = [0, 1, 23, 4567]
floats = [0.0, 1 / 3, 1 / 2, 1.0, 3 / 2, 2.0]
tensor = torch.tensor(ints, dtype=torch.int64, device=device)
for pow in floats:
self._test_pow(tensor, pow)
@dtypes(*[torch.float32, torch.float64])
def test_float_scalar_pow_float_tensor(self, device, dtype):
floats = [2.0, -3 / 2, -1.0, -1 / 2, -1 / 3, 0.0, 1 / 3, 1 / 2, 1.0, 3 / 2, 2.0]
exponent_shapes = (
(1,),
(2, 2),
(2, 1),
(2, 2, 2),
)
tensors = list(
make_tensor(shape, dtype=dtype, device=device, low=0)
for shape in exponent_shapes
)
floats_tensor = torch.tensor(floats, dtype=dtype, device=device)
for base in floats:
self._test_pow(base, floats_tensor)
for tensor in tensors:
self._test_pow(base, tensor)
@onlyCUDA
def test_cuda_tensor_pow_scalar_tensor(self, device):
cuda_tensors = [
torch.randn((3, 3), device=device),
torch.tensor(3.0, device=device),
]
scalar_tensors = [
torch.tensor(5.0, device="cpu"),
torch.tensor(-3),
torch.tensor(1),
]
for base, exp in product(cuda_tensors, scalar_tensors):
self._test_pow(base, exp)
@onlyCUDA
def test_cpu_tensor_pow_cuda_scalar_tensor(self, device):
cuda_tensors = [
torch.tensor(5.0, device="cuda"),
torch.tensor(-3, device="cuda"),
]
for exp in cuda_tensors:
base = torch.randn((3, 3), device="cpu")
regex = "Expected all tensors to be on the same device, but found at least two devices, cuda.* and cpu!"
self.assertRaisesRegex(RuntimeError, regex, torch.pow, base, exp)
for exp in cuda_tensors:
# Binary ops with a cpu + cuda tensor are allowed if the cpu tensor has 0 dimension
base = torch.tensor(3.0, device="cpu")
self._test_pow(base, exp)
@onlyCUDA
@dtypes(torch.complex64, torch.complex128)
def test_pow_cuda_complex_extremal_failing(self, device, dtype):
t = torch.tensor(complex(-1.0, float("inf")), dtype=dtype, device=device)
with self.assertRaises(AssertionError):
cuda_out = t.pow(2)
cpu_out = t.cpu().pow(2)
self.assertEqual(cpu_out, cuda_out)
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half))
def test_complex_scalar_pow_tensor(self, device, dtype):
complexes = [0.5j, 1.0 + 1.0j, -1.5j, 2.2 - 1.6j, 1 + 0j]
first_exp = make_tensor((100,), dtype=dtype, device=device, low=-2, high=2)
second_exp = make_tensor(
(100,), dtype=dtype, device=device, low=-2, high=2, noncontiguous=True
)
first_exp[0] = first_exp[10] = first_exp[20] = 0
second_exp[0] = second_exp[10] = second_exp[20] = 0
for base in complexes:
# On CPU,
# Half Tensor with complex base leads to computation dtype
# of ComplexHalf for which this ops is not supported yet
# NOTE: pow has fast-path when base is 1 which supports
# ComplexHalf
will_raise_error = torch.device(device).type == 'cpu' and \
dtype is torch.half and base != (1 + 0j)
if will_raise_error:
with self.assertRaisesRegex(RuntimeError, "not implemented for 'ComplexHalf'"):
self._test_pow(base, first_exp)
self._test_pow(base, second_exp)
else:
self._test_pow(base, first_exp)
self._test_pow(base, second_exp)
@onlyNativeDeviceTypes
@skipMeta
def test_pow_scalar_type_promotion(self, device):
# Test against a scalar and non-scalar input
inputs = [17, [17]]
for input in inputs:
# We expect the computation to be performed in uint8 (overflowing to 0), and then cast to int64
input_tensor_uint8 = torch.tensor(input, dtype=torch.uint8, device=device)
out_uint8_computation = torch.pow(
2,
input_tensor_uint8,
out=torch.tensor(0, dtype=torch.int64, device=device),
)
# Computation should run in int64, and not overflow
input_tensor_int64 = torch.tensor(input, dtype=torch.int64, device=device)
out_int64_computation = torch.pow(
2,
input_tensor_int64,
out=torch.tensor(0, dtype=torch.int64, device=device),
)
self.assertNotEqual(out_uint8_computation, out_int64_computation)
self.assertEqual(
out_uint8_computation.to(dtype=torch.uint8),
out_int64_computation.to(dtype=torch.uint8),
)
def test_tensor_pow_tensor(self, device):
def rotate(l, n):
return l[-n:] + l[:-n]
def test_tensor_pow_tensor(values, torch_type, numpy_type):
vals_tensor = torch.tensor(values, dtype=torch_type, device=device)
for i in range(len(values)):
pows = rotate(values, i)
pows_tensor = torch.tensor(pows, dtype=torch_type, device=device)
self._test_pow(vals_tensor, pows_tensor)
ints = [0, 1, 2, 3]
test_tensor_pow_tensor(ints, torch.uint8, np.uint8)
test_tensor_pow_tensor(ints, torch.int8, np.int8)
test_tensor_pow_tensor(ints, torch.int16, np.int16)
test_tensor_pow_tensor(ints, torch.int32, np.int32)
test_tensor_pow_tensor(ints, torch.int64, np.int64)
floats = [-3.0, -2.0, -1.0, -1 / 2, -1 / 3, 0.0, 1 / 3, 1 / 2, 1.0, 2.0, 3.0]
test_tensor_pow_tensor(floats, torch.float16, np.float16)
test_tensor_pow_tensor(floats, torch.float32, np.float32)
test_tensor_pow_tensor(floats, torch.float64, np.float64)
def test_logical_xor_with_nontrivial_alignment(self, device):
# test tensor that is not aligned to multiple of 16 bytes
size = 128
a = torch.randn(size, device=device) > 0
b = torch.randn(size, device=device) > 0
c = torch.randn(size, device=device) > 0
non_trivial_alignment = [1, 2, 4, 8, 15]
for i in non_trivial_alignment:
for j in non_trivial_alignment:
for k in non_trivial_alignment:
a_ = a[i : 100 + i]
b_ = b[j : 100 + j]
c_ = c[k : 100 + k]
torch.logical_xor(a_, b_, out=c_)
for x, y, z in zip(a_.tolist(), b_.tolist(), c_.tolist()):
self.assertEqual(x ^ y, z)
@dtypes(torch.float)
def test_add_with_tail(self, device, dtype):
# test tensor where there is a tail which is not a multiple
# of GPU warp size
for tail_size in [1, 63, 67, 130]:
size = 4096 + tail_size
a = torch.randn(size, device=device, dtype=dtype)
b = torch.randn(size, device=device, dtype=dtype)
c = a + b
for x, y, z in zip(a.tolist(), b.tolist(), c.tolist()):
self.assertEqual(x + y, z)
# Tests that CUDA tensors on different devices cannot be used in the same
# binary operation, and that CUDA "scalars" cannot be used in the same
# binary operation as non-scalar CPU tensors.
@deviceCountAtLeast(2)
@onlyCUDA
def test_cross_device_binary_ops(self, devices):
vals = (1.0, (2.0,))
cpu_tensor = torch.randn(2, 2)
def do_test(op, a, b):
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(a, b)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(b, a)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(a, cpu_tensor)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(cpu_tensor, a)
for op in (
operator.add,
torch.add,
operator.sub,
torch.sub,
operator.mul,
torch.mul,
operator.truediv,
torch.true_divide,
operator.floordiv,
torch.floor_divide,
):
for a, b in product(vals, vals):
a = torch.tensor(a, device=devices[0])
b = torch.tensor(b, device=devices[1])
do_test(op, a, b)
# This test ensures that a scalar Tensor can be safely used
# in a binary operation in conjunction with a Tensor on all
# available CUDA devices
@deviceCountAtLeast(2)
@onlyCUDA
def test_binary_op_scalar_device_unspecified(self, devices):
scalar_val = torch.tensor(1.0)
for default_device in devices:
with torch.cuda.device(default_device):
for device in devices:
device_obj = torch.device(device)
x = torch.rand(3, device=device)
y0 = x * scalar_val
self.assertEqual(y0.device, device_obj)
y1 = scalar_val * x
self.assertEqual(y1.device, device_obj)
self.assertEqual(y0, y1)
def test_div_and_floordiv_vs_python(self, device):
# Tests torch division ops which can handle both arguments being
# scalars.
def _scalar_helper(python_op, torch_op):
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * 0.5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0:
continue
expected = python_op(a, b)
for op in (operator.truediv, torch.true_divide):
actual_scalar = torch_op(a, b)
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
actual_tensor = torch_op(a_t, b_t)
actual_first_tensor = torch_op(a_t, b)
actual_second_tensor = torch_op(a, b_t)
self.assertEqual(actual_scalar, expected)
self.assertEqual(actual_tensor.item(), expected)
self.assertEqual(actual_first_tensor, actual_tensor)
self.assertEqual(actual_second_tensor, actual_tensor)
_scalar_helper(operator.truediv, operator.truediv)
_scalar_helper(operator.truediv, torch.true_divide)
_scalar_helper(lambda a, b: math.floor(a / b), operator.floordiv)
_scalar_helper(lambda a, b: math.floor(a / b), torch.floor_divide)
@onlyNativeDeviceTypes
def test_div_and_floordiv_script_vs_python(self, device):
# Creates jitted functions of two tensors
def _wrapped_div(a, b):
return a / b
def _wrapped_floordiv(a, b):
return a // b
scripted_div = torch.jit.script(_wrapped_div)
scripted_floordiv = torch.jit.script(_wrapped_floordiv)
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * 0.5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0:
continue
expected_div = a / b
expected_floordiv = math.floor(a / b)
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
self.assertEqual(scripted_div(a_t, b_t), expected_div)
self.assertEqual(scripted_floordiv(a_t, b_t), expected_floordiv)
# Creates jitted functions of one tensor
def _wrapped_div_scalar(a):
return a / 5
# NOTE: the JIT implements division as torch.reciprocal(a) * 5
def _wrapped_rdiv_scalar(a):
return 5 / a
def _wrapped_floordiv_scalar(a):
return a // 5
# NOTE: this fails if the input is not an integer tensor
# See https://github.com/pytorch/pytorch/issues/45199
def _wrapped_rfloordiv_scalar(a):
return 5 // a
scripted_div_scalar = torch.jit.script(_wrapped_div_scalar)
scripted_rdiv_scalar = torch.jit.script(_wrapped_rdiv_scalar)
scripted_floordiv_scalar = torch.jit.script(_wrapped_floordiv_scalar)
scripted_rfloordiv_scalar = torch.jit.script(_wrapped_rfloordiv_scalar)
for a in range(-10, 10):
for op in (lambda x: x * 0.5, lambda x: math.floor(x)):
a = op(a)
a_t = torch.tensor(a, device=device)
self.assertEqual(a / 5, scripted_div_scalar(a_t))
# Skips zero divisors
if a == 0:
continue
self.assertEqual(5 / a, scripted_rdiv_scalar(a_t))
# Handles Issue 45199 (see comment above)
if a_t.is_floating_point():
with self.assertRaises(RuntimeError):
scripted_rfloordiv_scalar(a_t)
else:
# This should emit a UserWarning, why doesn't it?
# See issue gh-52387
self.assertEqual(5 // a, scripted_rfloordiv_scalar(a_t))
@onlyNativeDeviceTypes
def test_idiv_and_ifloordiv_vs_python(self, device):
def _wrapped_idiv_tensor(a, b):
a /= b
return a
def _wrapped_idiv_scalar(a):
a /= 5
return a
def _wrapped_true_divide__tensor(a, b):
a.true_divide_(b)
return a
def _wrapped_true_divide__scalar(a):
a.true_divide_(5)
return a
def _wrapped_floor_divide__tensor(a, b):
a.floor_divide_(b)
return a
def _wrapped_floor_divide__scalar(a):
a.floor_divide_(5)
return a
# The following functions are unsupported by the JIT
def _wrapped_ifloordiv_tensor(a, b):
a //= b
return a
def _wrapped_ifloordiv_scalar(a):
a //= 5
return a
with self.assertRaises(torch.jit.frontend.NotSupportedError):
scripted_ifloordiv_tensor = torch.jit.script(_wrapped_ifloordiv_tensor)
with self.assertRaises(torch.jit.frontend.NotSupportedError):
scripted_ifloordiv_scalar = torch.jit.script(_wrapped_ifloordiv_scalar)
scripted_idiv_tensor = torch.jit.script(_wrapped_idiv_tensor)
scripted_idiv_scalar = torch.jit.script(_wrapped_idiv_scalar)
scripted_true_divide__tensor = torch.jit.script(_wrapped_true_divide__tensor)
scripted_true_divide__scalar = torch.jit.script(_wrapped_true_divide__scalar)
scripted_floor_divide__tensor = torch.jit.script(_wrapped_floor_divide__tensor)
scripted_floor_divide__scalar = torch.jit.script(_wrapped_floor_divide__scalar)
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * 0.5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0:
continue
expected_idiv = a / b
expected_ifloordiv = a // b
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
if a_t.is_floating_point():
tmp0 = a_t.clone()
tmp0 /= b
tmp1 = a_t.clone()
tmp1 /= b_t
self.assertEqual(tmp0.item(), expected_idiv)
self.assertEqual(tmp1.item(), expected_idiv)
self.assertEqual(
scripted_true_divide__tensor(a_t.clone(), b_t).item(),
expected_idiv,
)
self.assertEqual(
scripted_true_divide__scalar(a_t.clone()).item(), a / 5
)
else:
tmp = a_t.clone()
with self.assertRaises(RuntimeError):
tmp /= b
with self.assertRaises(RuntimeError):
tmp /= b_t
with self.assertRaises(RuntimeError):
scripted_true_divide__tensor(tmp, b_t)
with self.assertRaises(RuntimeError):
scripted_true_divide__scalar(tmp)
if not a_t.is_floating_point() and b_t.is_floating_point():
# Inplace modification fails because a float tensor is required
# if the divisor is a float tensor
a_t.clone().floor_divide_(b_t)
scripted_floor_divide__tensor(a_t.clone(), b_t)
tmp = a_t.clone()
tmp //= b_t
else:
# Inplace modification is OK when both or neither tensor is
# a float tensor
self.assertEqual(
a_t.clone().floor_divide_(b_t).item(), expected_ifloordiv
)
self.assertEqual(
scripted_floor_divide__tensor(a_t.clone(), b_t).item(),
expected_ifloordiv,
)
tmp = a_t.clone()
tmp //= b_t
self.assertEqual(tmp.item(), expected_ifloordiv)
self.assertEqual(
scripted_floor_divide__scalar(a_t), math.floor(a / 5)
)
# Tests binary op equivalence with Python builtin ops
# Also tests that reverse operations are equivalent to forward ops
# NOTE: division ops are tested separately above
def test_binary_ops_with_scalars(self, device):
for python_op, torch_op in (
(operator.add, torch.add),
(operator.sub, torch.sub),
(operator.mul, torch.mul),
(operator.truediv, torch.div),
):
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * 0.5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0 or a == 0:
continue
a_tensor = torch.tensor(a, device=device)
b_tensor = torch.tensor(b, device=device)
a_tensor_cpu = a_tensor.cpu()
b_tensor_cpu = b_tensor.cpu()
vals = (a, b, a_tensor, b_tensor, a_tensor_cpu, b_tensor_cpu)
for args in product(vals, vals):
first, second = args
first_scalar = (
first
if not isinstance(first, torch.Tensor)
else first.item()
)
second_scalar = (
second
if not isinstance(second, torch.Tensor)
else second.item()
)
expected = python_op(first_scalar, second_scalar)
self.assertEqual(expected, python_op(first, second))
self.assertEqual(expected, torch_op(first, second))
@dtypes(
*product(
all_types_and(torch.half, torch.bfloat16, torch.bool),
all_types_and(torch.half, torch.bfloat16, torch.bool),
)
)
def test_maximum_minimum_type_promotion(self, device, dtypes):
a = torch.tensor((0, 1), device=device, dtype=dtypes[0])
b = torch.tensor((1, 0), device=device, dtype=dtypes[1])
for op in (
torch.maximum,
torch.max,
torch.fmax,
torch.minimum,
torch.min,
torch.fmin,
):
result = op(a, b)
self.assertEqual(result.dtype, torch.result_type(a, b))
@dtypes(*integral_types_and(torch.bool))
def test_maximum_minimum_int_and_bool(self, device, dtype):
ops = (
(torch.maximum, torch.max, np.maximum),
(torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax),
(torch.fmin, None, np.fmin),
)
rng = np.random.default_rng()
a_np = np.array(
rng.integers(-100, 100, size=10), dtype=torch_to_numpy_dtype_dict[dtype]
)
b_np = np.array(
rng.integers(-100, 100, size=10), dtype=torch_to_numpy_dtype_dict[dtype]
)
for torch_op, alias, numpy_op in ops:
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
numpy_result = numpy_op(a_np, b_np)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result)
self.assertEqual(tensor_result, numpy_result)
self.assertEqual(out, numpy_result)
@precisionOverride({torch.bfloat16: 1e-2})
@dtypes(*(floating_types_and(torch.half, torch.bfloat16)))
def test_maximum_minimum_float(self, device, dtype):
ops = (
(torch.maximum, torch.max, np.maximum),
(torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax),
(torch.fmin, None, np.fmin),
)
if dtype == torch.bfloat16:
a_np = np.random.randn(10).astype(np.float64)
b_np = np.random.randn(10).astype(np.float64)
else:
a_np = np.random.randn(10).astype(torch_to_numpy_dtype_dict[dtype])
b_np = np.random.randn(10).astype(torch_to_numpy_dtype_dict[dtype])
for torch_op, alias, numpy_op in ops:
numpy_result = numpy_op(a_np, b_np)
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result, exact_dtype=False)
self.assertEqual(tensor_result, numpy_result, exact_dtype=False)
self.assertEqual(out, numpy_result, exact_dtype=False)
@dtypes(*(floating_types_and(torch.half, torch.bfloat16)))
def test_maximum_minimum_float_nan_and_inf(self, device, dtype):
# np.maximum and np.minimum functions compare input arrays element-wisely.
# if one of the elements being compared is a NaN, then that element is returned.
ops = (
(torch.maximum, torch.max, np.maximum),
(torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax),
(torch.fmin, None, np.fmin),
)
a_vals = (
float("inf"),
-float("inf"),
float("nan"),
float("inf"),
float("nan"),
float("nan"),
1,
float("nan"),
)
b_vals = (
-float("inf"),
float("inf"),
float("inf"),
float("nan"),
float("nan"),
0,
float("nan"),
-5,
)
if dtype == torch.bfloat16:
a_np = np.array(a_vals, dtype=np.float64)
b_np = np.array(b_vals, dtype=np.float64)
else:
a_np = np.array(a_vals, dtype=torch_to_numpy_dtype_dict[dtype])
b_np = np.array(b_vals, dtype=torch_to_numpy_dtype_dict[dtype])
for torch_op, alias, numpy_op in ops:
numpy_result = numpy_op(a_np, b_np)
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result)
if dtype == torch.bfloat16:
self.assertEqual(tensor_result, numpy_result, exact_dtype=False)
self.assertEqual(out, numpy_result, exact_dtype=False)
else:
self.assertEqual(tensor_result, numpy_result)
self.assertEqual(out, numpy_result)
@dtypes(
*product(
complex_types(),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
)
)
def test_maximum_minimum_complex(self, device, dtypes):
for torch_op in (
torch.maximum,
torch.minimum,
torch.max,
torch.min,
torch.fmax,
torch.fmin,
):
with self.assertRaisesRegex(RuntimeError, ".+not implemented for.+"):
torch_op(
torch.ones(1, device=device, dtype=dtypes[0]),
torch.ones(1, device=device, dtype=dtypes[1]),
)
with self.assertRaisesRegex(RuntimeError, ".+not implemented for.+"):
torch_op(
torch.ones(1, device=device, dtype=dtypes[1]),
torch.ones(1, device=device, dtype=dtypes[0]),
)
@onlyCUDA
def test_maximum_minimum_cross_device(self, device):
a = torch.tensor((1, 2, -1))
b = torch.tensor((3, 0, 4), device=device)
ops = (torch.maximum, torch.minimum)
for torch_op in ops:
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
torch_op(a, b)
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
torch_op(b, a)
# test cuda tensor and cpu scalar
ops = ((torch.maximum, np.maximum), (torch.minimum, np.minimum))
a_np = np.array(1)
b_np = np.array([3, 0, 4])
for torch_op, numpy_op in ops:
a_tensor = torch.from_numpy(a_np)
b_tensor = torch.from_numpy(b_np).to(device=device)
tensor_result_1 = torch_op(a_tensor, b_tensor)
numpy_result_1 = numpy_op(a_np, b_np)
tensor_result_2 = torch_op(b_tensor, a_tensor)
numpy_result_2 = numpy_op(b_np, a_np)
self.assertEqual(tensor_result_1, numpy_result_1)
self.assertEqual(tensor_result_2, numpy_result_2)
@dtypes(
*product(
floating_types_and(torch.half, torch.bfloat16),
floating_types_and(torch.half, torch.bfloat16),
)
)
def test_maximum_and_minimum_subgradient(self, device, dtypes):
def run_test(f, a, b, expected_a_grad, expected_b_grad):
a = torch.tensor(a, requires_grad=True, device=device, dtype=dtypes[0])
b = torch.tensor(b, requires_grad=True, device=device, dtype=dtypes[1])
z = f(a, b)
z.sum().backward()
self.assertEqual(a.grad, expected_a_grad)
self.assertEqual(b.grad, expected_b_grad)
run_test(
torch.maximum,
[0.0, 1.0, 2.0],
[1.0, 1.0, 1.0],
[0.0, 0.5, 1.0],
[1.0, 0.5, 0.0],
)
run_test(
torch.minimum,
[0.0, 1.0, 2.0],
[1.0, 1.0, 1.0],
[1.0, 0.5, 0.0],
[0.0, 0.5, 1.0],
)
def test_maximum_minimum_forward_ad_float32(self, device):
# TODO: This should really be covered by OpInfo but it isn't. The problem
# is that our gradient tests test using float64 but it should also test
# float32
x = torch.randn(3, device=device, dtype=torch.float32)
y = torch.randn(3, device=device, dtype=torch.float32)
tx = torch.randn(3, device=device, dtype=torch.float32)
ty = torch.randn(3, device=device, dtype=torch.float32)
with fwAD.dual_level():
x_dual = fwAD.make_dual(x, tx)
y_dual = fwAD.make_dual(y, ty)
result = torch.maximum(x_dual, y_dual)
_, result_tangent = fwAD.unpack_dual(result)
expected = torch.where(x > y, tx, ty)
self.assertEqual(result_tangent, expected)
with fwAD.dual_level():
x_dual = fwAD.make_dual(x, tx)
y_dual = fwAD.make_dual(y, ty)
result = torch.minimum(x_dual, y_dual)
_, result_tangent = fwAD.unpack_dual(result)
expected = torch.where(x < y, tx, ty)
self.assertEqual(result_tangent, expected)
# TODO: tests like this should be generic
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_mul_intertype_scalar(self, device, dtype):
x = torch.tensor(1.5, dtype=dtype, device=device)
y = torch.tensor(3, dtype=torch.int32, device=device)
self.assertEqual(x * y, 4.5)
self.assertEqual(y * x, 4.5)
with self.assertRaisesRegex(
RuntimeError, "can't be cast to the desired output type"
):
y *= x
x *= y
self.assertEqual(x, 4.5)
@onlyCPU
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_sub(self, device, dtype):
if dtype in integral_types():
# Before Python 3.10, floats were implicitly converted to ints, but with
# DeprecationWarning: an integer is required (got type float).
# Implicit conversion to integers using __int__ is deprecated,
# and may be removed in a future version of Python.
# Since Python 3.10, that attempt gives an error.
m1 = torch.tensor([2, 4], dtype=dtype, device=device)
m2 = torch.tensor([1, 2], dtype=dtype, device=device)
diff = torch.tensor([1, 2], dtype=dtype)
else:
m1 = torch.tensor([2.34, 4.44], dtype=dtype, device=device)
m2 = torch.tensor([1.23, 2.33], dtype=dtype, device=device)
diff = torch.tensor([1.11, 2.11], dtype=dtype)
if dtype == torch.bool:
self.assertRaises(RuntimeError, lambda: m1 - m2)
elif dtype == torch.bfloat16 or dtype == torch.half:
# bfloat16 has a lower precision so we have to have a separate check for it
self.assertEqual(m1 - m2, diff, atol=0.01, rtol=0)
else:
self.assertEqual(m1 - m2, diff)
# TODO: what is this test testing?
@onlyCPU
@dtypes(torch.float)
def test_csub(self, device, dtype):
# with a tensor
a = torch.randn(100, 90, dtype=dtype, device=device)
b = a.clone().normal_()
res_add = torch.add(a, b, alpha=-1)
res_csub = a.clone()
res_csub.sub_(b)
self.assertEqual(res_add, res_csub)
# with a scalar
a = torch.randn(100, 100, dtype=dtype, device=device)
scalar = 123.5
res_add = torch.add(a, -scalar)
res_csub = a.clone()
res_csub.sub_(scalar)
self.assertEqual(res_add, res_csub)
# TODO: reconcile with minimum/maximum tests
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_min_max_binary_op_nan(self, device, dtype):
a = torch.rand(1000, dtype=dtype, device=device)
b = torch.rand(1000, dtype=dtype, device=device)
# 0:250: a -- nan, b -- not nan
a[:250] = float("nan")
# 250:500: a -- not nan, b -- nan
b[250:500] = float("nan")
# 500:750: a and b both nan
a[500:750] = float("nan")
b[500:750] = float("nan")
# 750:1000: neither nan
ma = torch.max(a, b)
mi = torch.min(a, b)
for i in range(750):
self.assertTrue(
torch.isnan(ma[i]),
"max(a, b): {}, a: {}, b: {}".format(ma[i], a[i], b[i]),
)
self.assertTrue(
torch.isnan(mi[i]),
"min(a, b): {}, a: {}, b: {}".format(mi[i], a[i], b[i]),
)
for i in range(750, 1000):
self.assertFalse(
torch.isnan(ma[i]),
"max(a, b): {}, a: {}, b: {}".format(ma[i], a[i], b[i]),
)
self.assertFalse(
torch.isnan(mi[i]),
"min(a, b): {}, a: {}, b: {}".format(mi[i], a[i], b[i]),
)
@dtypes(
*product(
all_types_and(torch.half, torch.bfloat16, torch.bool),
all_types_and(torch.half, torch.bfloat16, torch.bool),
)
)
def test_copysign(self, device, dtypes):
def _test_copysign_numpy(a, b):
torch_result = torch.copysign(a, b)
if a.dtype == torch.bfloat16:
np_a = a.to(torch.float).cpu().numpy()
else:
np_a = a.cpu().numpy()
if b.dtype == torch.bfloat16:
np_b = b.to(torch.float).cpu().numpy()
else:
np_b = b.cpu().numpy()
expected = torch.from_numpy(np.copysign(np_a, np_b))
# To handle inconsistencies of type promotion between PyTorch and Numpy
# Applied for both arguments having integral precision and bfloat16
types = integral_types_and(torch.bool, torch.bfloat16)
if a.dtype in types or b.dtype in types:
promoted_type = torch.promote_types(torch_result.dtype, expected.dtype)
torch_result = torch_result.to(promoted_type)
expected = expected.to(promoted_type)
# Verify Value
self.assertEqual(torch_result, expected)
# Verify Sign
# Use double copysign to verify the correctnes of 0.0 and -0.0, since
# it always True for self.assertEqual(0.0 == -0.0). So, we use 1 as the
# magnitude to verify the sign between torch and numpy results, elementwise.
# Special case: NaN conversions between FP32 and FP16 is not bitwise
# equivalent to pass this assertion.
if a.dtype != torch.float16 and b.dtype != torch.float16:
self.assertEqual(
torch.copysign(torch.tensor(1.0), torch_result),
torch.copysign(torch.tensor(1.0), expected),
)
# Compare Result with NumPy
# Type promotion
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
# Broadcast
a = make_tensor((10, 1, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 1, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
# 0.0/-0.0/inf/-inf/nan
cases = [0.0, -0.0, float("inf"), float("-inf"), float("nan")]
# torch.bfloat16 can not hold '-nan'
# torch.half can not hold '-nan' on CUDA
types = [torch.float32, torch.float64]
if device == "cpu":
types.append(torch.float16)
if dtypes[0] in types:
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
for case in cases:
_test_copysign_numpy(
torch.tensor([case], device=device, dtype=dtypes[0]), b
)
if dtypes[1] in floating_types_and(torch.half, torch.bfloat16):
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
for case in cases:
_test_copysign_numpy(
a, torch.tensor([case], device=device, dtype=dtypes[1])
)
@dtypes(
*product(
floating_types_and(torch.half, torch.bfloat16),
floating_types_and(torch.half, torch.bfloat16),
)
)
def test_copysign_subgradient(self, device, dtypes):
# Input is 0.0
x = torch.tensor(
[0.0, 0.0, 0.0], dtype=dtypes[0], device=device, requires_grad=True
)
y = torch.tensor(
[-1.0, 0.0, 1.0], dtype=dtypes[1], device=device, requires_grad=True
)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Input is -0.0
x = torch.tensor(
[-0.0, -0.0, -0.0], dtype=dtypes[0], device=device, requires_grad=True
)
y = torch.tensor(
[-1.0, 0.0, 1.0], dtype=dtypes[1], device=device, requires_grad=True
)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is 0.0
x = torch.tensor(
[-1.0, 0.0, 1.0], dtype=dtypes[0], device=device, requires_grad=True
)
y = torch.tensor(
[0.0, 0.0, 0.0], dtype=dtypes[1], device=device, requires_grad=True
)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [-1.0, 0.0, 1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is -0.0
x = torch.tensor(
[-1.0, 0.0, 1.0], dtype=dtypes[0], device=device, requires_grad=True
)
y = torch.tensor(
[-0.0, -0.0, -0.0], dtype=dtypes[1], device=device, requires_grad=True
)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [1.0, 0.0, -1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
@dtypes(torch.bfloat16, torch.float)
def test_div(self, device, dtype):
for op, method, inplace in (
(torch.div, torch.Tensor.div, torch.Tensor.div_),
(torch.true_divide, torch.Tensor.true_divide, torch.Tensor.true_divide_),
):
m1 = torch.randn(10, 10, dtype=torch.float, device=device).to(dtype=dtype)
res1 = m1.clone()
inplace(res1[:, 3], 2)
res2 = m1.clone()
for i in range(m1.size(0)):
res2[i, 3] = res2[i, 3] / 2
self.assertEqual(res1, res2)
if dtype == torch.bfloat16:
a1 = torch.tensor([4.2, 6.2], dtype=dtype, device=device)
a2 = torch.tensor([2.0, 2.0], dtype=dtype, device=device)
self.assertEqual(
op(a1, a2),
torch.tensor([2.1, 3.1], dtype=dtype, device=device),
atol=0.01,
rtol=0,
)
self.assertEqual(method(a1, a2), op(a1, a2))
@dtypes(torch.bfloat16, torch.float)
def test_true_divide_out(self, device, dtype):
a1 = torch.tensor([4.2, 6.2], dtype=dtype, device=device)
a2 = torch.tensor([2.0, 2.0], dtype=dtype, device=device)
res = torch.empty_like(a1)
self.assertEqual(
torch.true_divide(a1, a2, out=res),
torch.tensor([2.1, 3.1], dtype=dtype, device=device),
atol=0.01,
rtol=0,
)
@onlyCUDA
@dtypes(torch.half)
def test_divmul_scalar(self, device, dtype):
x = torch.tensor(100.0, device=device, dtype=dtype)
x_ref = x.float()
scale = 1e5
res = x.div(scale)
expected = x_ref.div(scale)
self.assertEqual(res, expected.to(dtype), atol=0.0, rtol=0.0)
x = torch.tensor(1e-5, device=device, dtype=dtype)
x_ref = x.float()
res = x.mul(scale)
expected = x_ref.mul(scale)
self.assertEqual(res, expected.to(dtype), atol=0.0, rtol=0.0)
res = scale * x
self.assertEqual(res, expected.to(dtype), atol=0.0, rtol=0.0)
@dtypesIfCUDA(
*set(get_all_math_dtypes("cuda")) - {torch.complex64, torch.complex128}
)
@dtypes(*set(get_all_math_dtypes("cpu")) - {torch.complex64, torch.complex128})
def test_floor_divide_tensor(self, device, dtype):
x = torch.randn(10, device=device).mul(30).to(dtype)
y = torch.arange(1, 11, dtype=dtype, device=device)
z = x // y
z_alt = torch.floor(x.double() / y.double()).to(dtype)
self.assertEqual(z.dtype, x.dtype)
self.assertEqual(z, z_alt)
@dtypesIfCUDA(
*set(get_all_math_dtypes("cuda")) - {torch.complex64, torch.complex128}
)
@dtypes(*set(get_all_math_dtypes("cpu")) - {torch.complex64, torch.complex128})
def test_floor_divide_scalar(self, device, dtype):
x = torch.randn(100, device=device).mul(10).to(dtype)
z = x // 3
z_alt = torch.tensor(
[math.floor(v.item() / 3.0) for v in x], dtype=x.dtype, device=device
)
self.assertEqual(z.dtype, x.dtype)
self.assertEqual(z, z_alt)
@onlyCPU
@dtypes(*get_all_math_dtypes("cpu"))
def test_rdiv(self, device, dtype):
if dtype is torch.float16:
return
elif dtype.is_complex:
x = torch.rand(100, dtype=dtype, device=device).add(1).mul(4)
else:
x = torch.rand(100, device=device).add(1).mul(4).to(dtype)
y = 30 / x
z = torch.tensor([30 / v.item() for v in x], device=device)
self.assertEqual(y, z, exact_dtype=False)
@dtypes(*floating_types_and(torch.half))
def test_fmod_remainder_by_zero_float(self, device, dtype):
fn_list = (torch.fmod, torch.remainder)
for fn in fn_list:
# check floating-point tensor fmod/remainder to zero is nan on both CPU and GPU
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
zero = torch.zeros_like(x)
self.assertTrue(torch.all(fn(x, 0.0).isnan()))
self.assertTrue(torch.all(fn(x, zero).isnan()))
@onlyNativeDeviceTypes # Check Issue https://github.com/pytorch/pytorch/issues/48130
@dtypes(*integral_types())
def test_fmod_remainder_by_zero_integral(self, device, dtype):
fn_list = (torch.fmod, torch.remainder)
for fn in fn_list:
# check integral tensor fmod/remainder to zero
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
zero = torch.zeros_like(x)
# RuntimeError on CPU
if self.device_type == "cpu":
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError"):
fn(x, zero)
elif torch.version.hip is not None:
# ROCm behavior: x % 0 is a no-op; x is returned
self.assertEqual(fn(x, zero), x)
else:
# CUDA behavior: Different value for different dtype
# Due to it's an undefined behavior, CUDA returns a pattern of all 1s
# for integral dividend (other than int64) divided by zero. For int64,
# CUDA returns all 1s for negative dividend, half 1s for positive dividend.
# uint8: 0xff -> 255
# int32: 0xffffffff -> -1
if dtype == torch.int64:
self.assertEqual(fn(x, zero) == 4294967295, x >= 0)
self.assertEqual(fn(x, zero) == -1, x < 0)
else:
value = 255 if dtype == torch.uint8 else -1
self.assertTrue(torch.all(fn(x, zero) == value))
@dtypes(*all_types_and(torch.half))
def test_fmod_remainder(self, device, dtype):
# Use numpy as reference
def _helper(x, mod, fns_list):
for fn, inplace_fn, ref_fn in fns_list:
np_x = x.cpu().numpy() if torch.is_tensor(x) else x
np_mod = mod.cpu().numpy() if torch.is_tensor(mod) else mod
exp = ref_fn(np_x, np_mod)
exp = torch.from_numpy(exp)
res = fn(x, mod)
self.assertEqual(res, exp, exact_dtype=False)
if torch.is_tensor(x):
# out
out = torch.empty(0, device=device, dtype=res.dtype)
fn(x, mod, out=out)
self.assertEqual(out, exp, exact_dtype=False)
self.assertEqual(out.size(), torch.Size([10, 10]))
# in-place (Type cast runtime error)
try:
inplace_fn(x, mod)
self.assertEqual(x, exp, exact_dtype=False)
except RuntimeError as e:
self.assertRegex(
str(e),
"result type (Half|Float|Double) "
"can't be cast to the desired output "
"type (Byte|Char|Short|Int|Long)",
)
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
# mod with same dtype as x
mod = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
# Exclude 0
mod[mod == 0] = 1
# Mods: Integer, Float, Tensor, Non-contiguous Tensor
mods = [3, 2.3, mod, mod.t()]
# mod with floating-point dtype
if dtype in integral_types():
mod_float = make_tensor(
(10, 10), device=device, dtype=torch.float, low=-9, high=9
)
mod[mod == 0] = 1
mods.append(mod_float)
for dividend, mod in product([x, x.t()], mods):
_helper(
dividend,
mod,
(
(torch.fmod, torch.Tensor.fmod_, np.fmod),
(torch.remainder, torch.Tensor.remainder_, np.remainder),
),
)
# Tests for torch.remainder(scalar, tensor)
for dividend, mod in product([5, 3.14], mods):
if torch.is_tensor(mod):
_helper(
dividend,
mod,
((torch.remainder, torch.Tensor.remainder_, np.remainder),),
)
@dtypes(torch.float, torch.double)
def test_remainder_fmod_large_dividend(self, device, dtype):
alarge = 1e9
pi = 3.14159265358979
for avalue in [alarge, -alarge]:
for bvalue in [pi, -pi]:
a = torch.tensor([avalue], dtype=dtype, device=device)
b = torch.tensor([bvalue], dtype=dtype, device=device)
c = torch.remainder(a, b)
d = torch.fmod(a, b)
self.assertTrue(
(b[0] > 0) == (c[0] > 0)
) # remainder has same sign as divisor
self.assertTrue(
(a[0] > 0) == (d[0] > 0)
) # fmod has same sign as dividend
self.assertTrue(
abs(c[0]) < abs(b[0])
) # remainder is within range of divisor
self.assertTrue(
abs(d[0]) < abs(b[0])
) # fmod is within range of divisor
if (a[0] > 0) == (b[0] > 0):
self.assertTrue(c[0] == d[0]) # remainder is same as fmod
else:
self.assertTrue(
abs(c[0] - d[0]) == abs(b[0])
) # differ by one divisor
@dtypesIfCPU(torch.bfloat16, torch.float32, torch.float64)
@dtypes(torch.float32, torch.float64)
def test_hypot(self, device, dtype):
inputs = [
(
torch.randn(10, device=device).to(dtype),
torch.randn(10, device=device).to(dtype),
),
(
torch.randn((3, 3, 3), device=device).to(dtype),
torch.randn((3, 3, 3), device=device).to(dtype),
),
(
torch.randn((10, 1), device=device).to(dtype),
torch.randn((10, 1), device=device).to(dtype).transpose(0, 1),
),
(
torch.randint(100, (10,), device=device, dtype=torch.long),
torch.randn(10, device=device).to(dtype),
),
]
for input in inputs:
actual = torch.hypot(input[0], input[1])
if dtype == torch.bfloat16:
expected = torch.sqrt(input[0] * input[0] + input[1] * input[1])
else:
expected = np.hypot(input[0].cpu().numpy(), input[1].cpu().numpy())
self.assertEqual(actual, expected, exact_dtype=False)
@onlyNativeDeviceTypes
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_gcd(self, device, dtype):
# Tests gcd(0, 0), gcd(0, a) cases
t1 = torch.tensor([0, 10, 0], dtype=dtype, device=device)
t2 = torch.tensor([0, 0, 10], dtype=dtype, device=device)
actual = torch.gcd(t1, t2)
expected = np.gcd([0, 10, 0], [0, 0, 10])
self.assertEqual(actual, expected, exact_dtype=False)
if dtype == torch.uint8:
# Test unsigned integers with potential sign issues (i.e., uint8 with value >= 128)
a = torch.tensor([190, 210], device=device, dtype=dtype)
b = torch.tensor([190, 220], device=device, dtype=dtype)
actual = torch.gcd(a, b)
expected = torch.tensor([190, 10], device=device, dtype=dtype)
self.assertEqual(actual, expected)
else:
# Compares with NumPy
a = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
b = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
actual = torch.gcd(a, b)
expected = np.gcd(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected)
@onlyNativeDeviceTypes
@dtypes(torch.int16, torch.int32, torch.int64)
def test_lcm(self, device, dtype):
# Tests lcm(0, 0), lcm(0, a) cases
t1 = torch.tensor([0, 10, 0], dtype=dtype, device=device)
t2 = torch.tensor([0, 0, 10], dtype=dtype, device=device)
actual = torch.lcm(t1, t2)
expected = np.lcm([0, 10, 0], [0, 0, 10])
self.assertEqual(actual, expected, exact_dtype=False)
# Compares with NumPy
a = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
b = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
actual = torch.lcm(a, b)
expected = np.lcm(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected, exact_dtype=False)
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
def test_nextafter(self, device, dtype):
# Test special cases
t1 = torch.tensor([0, 0, 10], device=device, dtype=dtype)
t2 = torch.tensor([inf, -inf, 10], device=device, dtype=dtype)
actual = torch.nextafter(t1, t2)
expected = np.nextafter(t1.cpu().numpy(), t2.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
actual = torch.nextafter(t2, t1)
expected = np.nextafter(t2.cpu().numpy(), t1.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
t1 = torch.tensor([0, nan], device=device, dtype=dtype)
t2 = torch.tensor([nan, 0], device=device, dtype=dtype)
self.assertTrue(torch.nextafter(t1, t2).isnan().all())
a = torch.randn(100, device=device, dtype=dtype)
b = torch.randn(100, device=device, dtype=dtype)
actual = torch.nextafter(a, b)
expected = np.nextafter(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
@onlyNativeDeviceTypes
@dtypes(torch.bfloat16)
def test_nextafter_bfloat16(self, device, dtype):
nan = float("nan")
inf = float("inf")
cases = (
# (from, to, expected)
(0, 1, 9.183549615799121e-41),
(0, -1, -9.183549615799121e-41),
(1, -2, 0.99609375),
(1, 0, 0.99609375),
(1, 2, 1.0078125),
(-1, -2, -1.0078125),
(-1, 0, -0.99609375),
(2, -1, 1.9921875),
(2, 1, 1.9921875),
(20, 3000, 20.125),
(20, -3000, 19.875),
(3000, -20, 2992.0),
(-3000, 20, -2992.0),
(65536, 0, 65280.0),
(65536, inf, 66048.0),
(-65536, 0, -65280.0),
(-65536, -inf, -66048.0),
(nan, 0, nan),
(0, nan, nan),
(nan, nan, nan),
(nan, inf, nan),
(inf, nan, nan),
(inf, -inf, 3.3895313892515355e38),
(-inf, inf, -3.3895313892515355e38),
(inf, 0, 3.3895313892515355e38),
(0, inf, 9.183549615799121e-41),
(-inf, 0, -3.3895313892515355e38),
(0, -inf, -9.183549615799121e-41),
)
for from_v, to_v, expected in cases:
from_t = torch.tensor([from_v], device=device, dtype=dtype)
to_t = torch.tensor([to_v], device=device, dtype=dtype)
actual = torch.nextafter(from_t, to_t).item()
self.assertEqual(actual, expected, atol=0, rtol=0)
def _test_cop(self, torchfn, mathfn, dtype, device):
def reference_implementation(res2):
for i, j in iter_indices(sm1):
idx1d = i * sm1.size(0) + j
res2[i, j] = mathfn(sm1[i, j], sm2[idx1d])
return res2
# contiguous
m1 = torch.randn(10, 10, 10, dtype=dtype, device=device)
m2 = torch.randn(10, 10 * 10, dtype=dtype, device=device)
sm1 = m1[4]
sm2 = m2[4]
res1 = torchfn(sm1, sm2.view(10, 10))
res2 = reference_implementation(res1.clone())
self.assertEqual(res1, res2)
# non-contiguous
m1 = torch.randn(10, 10, 10, dtype=dtype, device=device)
m2 = torch.randn(10 * 10, 10 * 10, dtype=dtype, device=device)
sm1 = m1[:, 4]
sm2 = m2[:, 4]
# view as sm1.size()
sm2.set_(
sm2.storage(),
sm2.storage_offset(),
sm1.size(),
(sm2.stride()[0] * 10, sm2.stride()[0]),
)
res1 = torchfn(sm1, sm2)
# reference_implementation assumes 1-d sm2
sm2.set_(
sm2.storage(), sm2.storage_offset(), m2[:, 4].size(), m2[:, 4].stride()
)
res2 = reference_implementation(res1.clone())
self.assertEqual(res1, res2)
@onlyCPU
@dtypes(torch.float)
def test_cdiv(self, device, dtype):
self._test_cop(torch.div, lambda x, y: x / y, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cremainder(self, device, dtype):
self._test_cop(torch.remainder, lambda x, y: x % y, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cmul(self, device, dtype):
self._test_cop(torch.mul, lambda x, y: x * y, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cpow(self, device, dtype):
self._test_cop(
torch.pow, lambda x, y: nan if x < 0 else math.pow(x, y), dtype, device
)
@onlyCPU
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_floor_divide_zero(self, device, dtype):
a = torch.tensor([0, 1], dtype=dtype, device=device)
b = torch.tensor([0, 1], dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError"):
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
a // b
@unittest.skipIf(TEST_WITH_ASAN, "Integer overflows are not allowed under ASAN")
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_muldiv_scalar(self, device, dtype):
x = make_tensor((10, 3), dtype=dtype, device=device, low=None, high=None)
s = make_tensor((1,), dtype=dtype, device="cpu", low=None, high=None).item()
y = torch.full_like(x, s)
self.assertEqual(x * s, x * y)
self.assertEqual(s * x, y * x)
self.assertEqual(x / s, x / y)
self.assertEqual(s / x, y / x)
# TODO: update make_tensor to support extremal additions and remove this in favor of make_tensor
def _generate_input(self, shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
# work around torch.randn not being implemented for bfloat16
if dtype == torch.bfloat16:
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
x = torch.randn(
*shape, dtype=dtype, device=device
) * random.randint(30, 100)
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
# Use extremal values
x[torch.randn(*shape) > 0.5] = float("nan")
x[torch.randn(*shape) > 0.5] = float("inf")
x[torch.randn(*shape) > 0.5] = float("-inf")
elif with_extremal and dtype.is_complex:
x[torch.randn(*shape) > 0.5] = complex("nan")
x[torch.randn(*shape) > 0.5] = complex("inf")
x[torch.randn(*shape) > 0.5] = complex("-inf")
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
else:
x = torch.randint(15, 100, shape, dtype=dtype, device=device)
return x
@dtypes(
*tuple(
itertools.combinations_with_replacement(
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), 2
)
)
)
def test_comparison_ops_type_promotion_and_broadcasting(self, device, dtypes):
# issue #42660
# testing all combinations of broadcasting and type promotion
# with a range of dtypes and input shapes, and with extremal values
def compare_with_numpy_bin_op(torch_fn, np_fn, x, y, out=None):
# working around the fact that numpy doesn't support bfloat16
# by letting numpy treat them as float32's
x_np = x if x.dtype != torch.bfloat16 else x.to(torch.float32)
y_np = (
y.cpu().numpy()
if y.dtype != torch.bfloat16
else y.to(torch.float32).cpu().numpy()
)
self.compare_with_numpy(
lambda inp: torch_fn(inp, y, out=out) if out else torch_fn(inp, y),
lambda inp: np_fn(inp, y_np, out=out) if out else np_fn(inp, y_np),
x_np,
)
complex_op_denylist = [
torch.lt,
torch.le,
torch.gt,
torch.ge,
] # complex not supported
input_sizes = [(1,), (10,), (10, 1), (1, 10), (4, 10), (64, 10), (12, 3)]
op_pairs = [
(torch.lt, np.less),
(torch.le, np.less_equal),
(torch.gt, np.greater),
(torch.ge, np.greater_equal),
(torch.eq, np.equal),
(torch.ne, np.not_equal),
(torch.logical_and, np.logical_and),
(torch.logical_or, np.logical_or),
(torch.logical_xor, np.logical_xor),
]
for size1 in input_sizes:
size2 = (2,) + size1 # perform broadcasting
for with_extremal in [False, True]:
a = self._generate_input(size1, dtypes[0], device, with_extremal)
b = self._generate_input(size2, dtypes[1], device, with_extremal)
for torch_op, numpy_op in op_pairs:
if (
dtypes[0].is_complex or dtypes[1].is_complex
) and torch_op in complex_op_denylist:
continue
# functional version of op
compare_with_numpy_bin_op(torch_op, numpy_op, a, b)
# functional comparison ops always return bool tensors
self.assertEqual(torch_op(a, b).dtype, torch.bool)
# out version of op
out = torch.zeros(
1, dtype=torch.complex128
) # all casts to complex128 are safe
compare_with_numpy_bin_op(torch_op, numpy_op, a, b, out=out)
@onlyNativeDeviceTypes
@dtypes(torch.int8, torch.int16, torch.int32, torch.int64)
def test_signed_shift(self, device, dtype):
"Ensure that signed integer bit shifting works as expected."
a = torch.tensor([-10, 10], device=device, dtype=dtype) # [11...1110110, 1010]
expected_l = torch.tensor(
[-40, 40], device=device, dtype=dtype
) # [11...11011000, 101000]
self.assertEqual(a << 2, expected_l)
self.compare_with_numpy(lambda x: x << 2, lambda x: np.left_shift(x, 2), a)
expected_r = torch.tensor(
[-5, 5], device=device, dtype=dtype
) # [1111...111011, 101]
self.assertEqual(a >> 1, expected_r)
self.compare_with_numpy(lambda x: x >> 1, lambda x: np.right_shift(x, 1), a)
@onlyNativeDeviceTypes
@dtypes(
*list(
product(
all_types_and(torch.half, torch.bfloat16, torch.bool),
all_types_and(torch.half, torch.bfloat16, torch.bool),
)
)
)
def test_heaviside(self, device, dtypes):
input_dtype = dtypes[0]
values_dtype = dtypes[1]
rng = np.random.default_rng()
input = np.array(
rng.integers(-10, 10, size=10),
dtype=torch_to_numpy_dtype_dict[
input_dtype if (input_dtype != torch.bfloat16) else torch.float64
],
)
input[0] = input[3] = input[7] = 0
values = np.array(
rng.integers(-10, 10, size=10),
dtype=torch_to_numpy_dtype_dict[
values_dtype if (values_dtype != torch.bfloat16) else torch.float64
],
)
np_result = torch.from_numpy(np.heaviside(input, values)).to(
device=device, dtype=input_dtype
)
input = torch.from_numpy(input).to(device=device, dtype=input_dtype)
values = torch.from_numpy(values).to(device=device, dtype=values_dtype)
out = torch.empty_like(input)
if input_dtype == values_dtype:
torch_result = torch.heaviside(input, values)
self.assertEqual(np_result, torch_result)
torch_result = input.heaviside(values)
self.assertEqual(np_result, torch_result)
torch.heaviside(input, values, out=out)
self.assertEqual(np_result, out)
input.heaviside_(values)
self.assertEqual(np_result, input)
else:
with self.assertRaisesRegex(
RuntimeError,
"heaviside is not yet implemented for tensors with different dtypes.",
):
torch.heaviside(input, values)
with self.assertRaisesRegex(
RuntimeError,
"heaviside is not yet implemented for tensors with different dtypes.",
):
input.heaviside(values)
with self.assertRaisesRegex(
RuntimeError,
"heaviside is not yet implemented for tensors with different dtypes.",
):
torch.heaviside(input, values, out=out)
with self.assertRaisesRegex(
RuntimeError,
"heaviside is not yet implemented for tensors with different dtypes.",
):
input.heaviside_(values)
@onlyCUDA
def test_heaviside_cross_device(self, device):
x = torch.tensor([-9, 5, 0, 6, -2, 2], device=device)
y = torch.tensor(0)
result = torch.heaviside(x, y)
expect = torch.tensor([0, 1, 0, 1, 0, 1], device=device)
self.assertEqual(result, expect)
result = torch.heaviside(y, x)
expect = torch.tensor([-9, 5, 0, 6, -2, 2], device=device)
self.assertEqual(result, expect)
x = torch.tensor([-9, 5, 0, 6, -2, 2])
y = torch.tensor(0, device=device)
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
torch.heaviside(x, y)
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
torch.heaviside(y, x)
@dtypes(*list(product(complex_types(), complex_types())))
def test_heaviside_complex(self, device, dtypes):
input_dtype = dtypes[0]
values_dtype = dtypes[1]
data = (complex(0, -6), complex(-1, 3), complex(1, 1))
input = torch.tensor(data, device=device, dtype=input_dtype)
values = torch.tensor(data, device=device, dtype=values_dtype)
out = torch.empty_like(input)
real = input.real
with self.assertRaisesRegex(
RuntimeError, "heaviside is not yet implemented for complex tensors."
):
torch.heaviside(input, real)
with self.assertRaisesRegex(
RuntimeError, "heaviside is not yet implemented for complex tensors."
):
real.heaviside(values)
with self.assertRaisesRegex(
RuntimeError, "heaviside is not yet implemented for complex tensors."
):
input.heaviside_(values)
with self.assertRaisesRegex(
RuntimeError, "heaviside is not yet implemented for complex tensors."
):
torch.heaviside(real, real, out=out)
def _test_logical(self, device, dtypes, op, a_, b_, expected_res_):
expected_res = torch.tensor(expected_res_, dtype=dtypes[0], device=device)
a = torch.tensor(a_, dtype=dtypes[0], device=device)
b = torch.tensor(b_, dtype=dtypes[1], device=device)
# new tensor
self.assertEqual(expected_res.bool(), getattr(a, op)(b))
# out
c = torch.empty(0, dtype=torch.bool, device=device)
getattr(torch, op)(a, b, out=c)
self.assertEqual(expected_res.bool(), c)
getattr(a, op + "_")(b)
self.assertEqual(expected_res, a)
@dtypes(
*product(
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
)
)
def test_logical_xor(self, device, dtypes):
self._test_logical(
device, dtypes, "logical_xor", [10, 0, 1, 0], [1, 0, 0, 10], [0, 0, 1, 1]
)
@dtypes(
*product(
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
)
)
def test_logical_and(self, device, dtypes):
self._test_logical(
device, dtypes, "logical_and", [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 0, 0]
)
@dtypes(
*product(
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
)
)
def test_logical_or(self, device, dtypes):
self._test_logical(
device, dtypes, "logical_or", [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 1, 1]
)
def test_remainder_overflow(self, device):
# Check Integer Overflows
x = torch.tensor(23500, dtype=torch.int64, device=device)
q = 392486996410368
self.assertEqual(x % q, x)
self.assertEqual(-x % q, q - x)
self.assertEqual(x % -q, x - q)
self.assertEqual(-x % -q, -x)
def test_rpow(self, device):
m = torch.randn(10, 10, device=device)
self.assertEqual(torch.pow(2, m), 2**m)
# test with scalar
m = torch.randn(1, device=device).squeeze()
assert m.dim() == 0, "m is intentionally a scalar"
self.assertEqual(torch.pow(2, m), 2**m)
@onlyCPU
def test_ldexp(self, device):
# random values
mantissas = torch.randn(64, device=device)
exponents = torch.randint(-31, 31, (64,), device=device, dtype=torch.int32)
# basic test
np_outcome = np.ldexp(mantissas.numpy(), exponents.numpy())
pt_outcome_1 = torch.ldexp(mantissas, exponents)
pt_outcome_2 = mantissas.ldexp(exponents)
self.assertEqual(np_outcome, pt_outcome_1)
self.assertEqual(np_outcome, pt_outcome_2)
mantissas.ldexp_(exponents)
self.assertEqual(np_outcome, mantissas)
# test bounds
mantissas = torch.tensor(
[float("inf"), float("-inf"), float("inf"), float("nan")], device=device
)
exponents = torch.randint(0, 31, (4,), device=device, dtype=torch.int32)
np_outcome = np.ldexp(mantissas.numpy(), exponents.numpy())
pt_outcome = torch.ldexp(mantissas, exponents)
self.assertEqual(np_outcome, pt_outcome)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_lerp(self, device, dtype):
start_end_weight_shapes = [(), (5,), (5, 5)]
for shapes in product(
start_end_weight_shapes, start_end_weight_shapes, start_end_weight_shapes
):
start = torch.randn(shapes[0], device=device, dtype=dtype)
end = torch.randn(shapes[1], device=device, dtype=dtype)
# Tensor weights
weights = [
torch.randn(shapes[2], device=device, dtype=dtype),
random.random(),
]
if dtype.is_complex:
weights += [complex(0, 1), complex(0.4, 1.2)]
for weight in weights:
actual = torch.lerp(start, end, weight)
actual_method = start.lerp(end, weight)
self.assertEqual(actual, actual_method)
actual_out = torch.tensor(1.0, dtype=dtype, device=device)
torch.lerp(start, end, weight, out=actual_out)
self.assertEqual(actual, actual_out)
expected = start + weight * (end - start)
self.assertEqual(expected, actual)
@onlyCUDA
@dtypes(torch.half, torch.bfloat16)
def test_lerp_lowp(self, device, dtype):
ref_dtype = torch.float
xvals = (0.0, -30000.0)
yvals = (0.1, -20000.0)
xs = [torch.full((4,), xval, device=device, dtype=dtype) for xval in xvals]
ys = [torch.full((4,), yval, device=device, dtype=dtype) for yval in yvals]
weights = [70000, torch.full((4,), 8, device=device, dtype=dtype)]
for x, y, w in zip(xs, ys, weights):
xref = x.float()
yref = y.float()
wref = w.float() if isinstance(w, torch.Tensor) else w
actual = torch.lerp(x, y, w)
expected = torch.lerp(xref, yref, wref).to(dtype)
self.assertEqual(actual, expected, atol=0.0, rtol=0.0)
def _test_logaddexp(self, device, dtype, base2):
if base2:
ref_func = np.logaddexp2
our_func = torch.logaddexp2
else:
ref_func = np.logaddexp
our_func = torch.logaddexp
def _test_helper(a, b):
if dtype == torch.bfloat16:
ref = ref_func(a.cpu().float().numpy(), b.cpu().float().numpy())
v = our_func(a, b)
self.assertEqual(ref, v.float(), atol=0.01, rtol=0.01)
else:
ref = ref_func(a.cpu().numpy(), b.cpu().numpy())
v = our_func(a, b)
self.assertEqual(ref, v)
# simple test
a = torch.randn(64, 2, dtype=dtype, device=device) - 0.5
b = torch.randn(64, 2, dtype=dtype, device=device) - 0.5
_test_helper(a, b)
_test_helper(a[:3], b[:3])
# large value test for numerical stability
a *= 10000
b *= 10000
_test_helper(a, b)
_test_helper(a[:3], b[:3])
a = torch.tensor(
[float("inf"), float("-inf"), float("inf"), float("nan")],
dtype=dtype,
device=device,
)
b = torch.tensor(
[float("inf"), float("-inf"), float("-inf"), float("nan")],
dtype=dtype,
device=device,
)
_test_helper(a, b)
@dtypes(torch.float32, torch.float64, torch.bfloat16)
def test_logaddexp(self, device, dtype):
self._test_logaddexp(device, dtype, base2=False)
@dtypes(torch.float32, torch.float64, torch.bfloat16)
def test_logaddexp2(self, device, dtype):
self._test_logaddexp(device, dtype, base2=True)
def test_add(self, device):
dtypes = floating_and_complex_types()
for dtype in dtypes:
# [res] torch.add([res,] tensor1, tensor2)
m1 = torch.randn(100, 100, dtype=dtype, device=device)
v1 = torch.randn(100, dtype=dtype, device=device)
# contiguous
res1 = torch.add(m1[4], v1)
res2 = res1.clone().zero_()
for i in range(m1.size(1)):
res2[i] = m1[4, i] + v1[i]
self.assertEqual(res1, res2)
m1 = torch.randn(100, 100, device=device)
v1 = torch.randn(100, device=device)
# non-contiguous
res1 = torch.add(m1[:, 4], v1)
res2 = res1.clone().zero_()
for i in range(m1.size(0)):
res2[i] = m1[i, 4] + v1[i]
self.assertEqual(res1, res2)
# [res] torch.add([res,] tensor, value)
m1 = torch.randn(10, 10, device=device)
# contiguous
res1 = m1.clone()
res1[3].add_(2)
res2 = m1.clone()
for i in range(m1.size(1)):
res2[3, i] = res2[3, i] + 2
self.assertEqual(res1, res2)
# non-contiguous
m1 = torch.randn(10, 10, device=device)
res1 = m1.clone()
res1[:, 3].add_(2)
res2 = m1.clone()
for i in range(m1.size(0)):
res2[i, 3] = res2[i, 3] + 2
self.assertEqual(res1, res2)
# inter-type
m1 = torch.randn(10, 10, dtype=dtype, device=device)
self.assertEqual(m1 + 3, m1 + torch.tensor(3))
self.assertEqual(3 + m1, torch.tensor(3) + m1)
# contiguous + non-contiguous
m1 = torch.randn(10, 10, dtype=dtype, device=device)
m2 = torch.randn(10, 10, dtype=dtype, device=device).t()
res = m1 + m2
self.assertTrue(res.is_contiguous())
self.assertEqual(res, m1 + m2.contiguous())
# 1d + empty
m1 = torch.tensor([1.0], dtype=dtype, device=device)
m2 = torch.tensor([], dtype=dtype, device=device)
self.assertEqual(m1 + m2, [])
# inter-type unint8
one = torch.tensor(1, dtype=torch.uint8, device=device)
self.assertEqual(torch.add(one, 1), 2)
self.assertEqual(torch.add(one, 1).dtype, torch.uint8)
# bool
m1 = torch.tensor(
[True, False, False, True, False, False], dtype=torch.bool, device=device
)
m2 = torch.tensor(
[True, True, False, False, False, True], dtype=torch.bool, device=device
)
expected = torch.tensor(
[True, True, False, True, False, True], dtype=torch.bool, device=device
)
self.assertEqual(m1 + m2, expected)
# fused multiply add
a = torch.zeros(2, 3, dtype=torch.bool, device=device)
res = torch.add(a, a, alpha=0)
expected = torch.zeros(2, 3, device=device).bool()
self.assertEqual(res, expected)
# bfloat16
m1 = torch.tensor([1.0, 2.0], dtype=torch.bfloat16)
m2 = torch.tensor([3.0, 4.0], dtype=torch.bfloat16)
self.assertEqual(m1 + m2, torch.tensor([4.0, 6.0], dtype=torch.bfloat16))
# different alpha types
m1 = torch.tensor([2 + 3j, 4 + 5j], dtype=torch.complex64, device=device)
m2 = torch.tensor([4 + 5j, 2 + 3j], dtype=torch.complex64, device=device)
# add complex numbers with float alpha
res = torch.add(m1, m2, alpha=0.1)
expected = torch.tensor(
[2.4000 + 3.5000j, 4.2000 + 5.3000j], dtype=torch.complex64, device=device
)
self.assertEqual(res, expected)
# add complex numbers with complex alpha
res = torch.add(m1, m2, alpha=complex(0.1, 0.2))
expected = torch.tensor(
[1.4000 + 4.3000j, 3.6000 + 5.7000j], dtype=torch.complex64, device=device
)
self.assertEqual(res, expected)
# add complex numbers with integer alpha
res = torch.add(m1, m2, alpha=2)
expected = torch.tensor(
[10.0 + 13.0j, 8.0 + 11.0j], dtype=torch.complex64, device=device
)
self.assertEqual(res, expected)
# mismatched alpha
m1 = torch.tensor([1], dtype=torch.int8, device=device)
m2 = torch.tensor([2], dtype=torch.int8, device=device)
self.assertRaisesRegex(
RuntimeError,
r"Boolean alpha only supported for Boolean results\.",
lambda: torch.add(m1, m2, alpha=True),
)
self.assertRaisesRegex(
RuntimeError,
r"For integral input tensors, argument alpha must not be a floating point number\.",
lambda: torch.add(m1, m2, alpha=1.0),
)
# mismatched alpha, float / double tensor and complex alpha
msg = r"For non-complex input tensors, argument alpha must not be a complex number\."
m1 = torch.tensor([3.0, 4.0], device=device)
m2 = torch.tensor([4.0, 3.0], device=device)
self.assertRaisesRegex(
RuntimeError, msg, lambda: torch.add(m1, m2, alpha=complex(0.1, 0.2))
)
m1 = torch.tensor([3.0, 4.0], dtype=torch.double, device=device)
m2 = torch.tensor([4.0, 3.0], dtype=torch.double, device=device)
self.assertRaisesRegex(
RuntimeError, msg, lambda: torch.add(m1, m2, alpha=complex(0.1, 0.2))
)
# complex
m1 = torch.tensor((4.0000 + 4.0000j), dtype=torch.complex64)
m2 = torch.tensor(4.0, dtype=torch.float64)
self.assertRaisesRegex(
RuntimeError,
r"result type ComplexFloat can't be cast to the desired output type Double",
lambda: torch.add(m1, m1, out=m2),
)
@onlyCUDA
def test_addsub_half_tensor(self, device):
x = torch.tensor([60000.0], dtype=torch.half, device=device)
for op, y, alpha in (
(torch.add, torch.tensor([-60000.0], dtype=torch.half, device=device), 2),
(torch.sub, torch.tensor([60000.0], dtype=torch.half, device=device), 2),
(torch.add, -70000.0, 1),
(torch.sub, 70000.0, 1),
):
actual = op(x, y, alpha=alpha)
self.assertTrue(not (actual.isnan() or actual.isinf()))
def test_sub_typing(self, device):
m1 = torch.tensor(
[True, False, False, True, False, False], dtype=torch.bool, device=device
)
m2 = torch.tensor(
[True, True, False, False, False, True], dtype=torch.bool, device=device
)
self.assertRaisesRegex(
RuntimeError,
r"Subtraction, the `\-` operator, with two bool tensors is not supported. "
r"Use the `\^` or `logical_xor\(\)` operator instead.",
lambda: m1 - m2,
)
self.assertRaisesRegex(
RuntimeError,
r"Subtraction, the `\-` operator, with a bool tensor is not supported. "
r"If you are trying to invert a mask, use the `\~` or `logical_not\(\)` operator instead.",
lambda: 1 - m1,
)
self.assertRaisesRegex(
RuntimeError,
r"Subtraction, the `\-` operator, with a bool tensor is not supported. "
r"If you are trying to invert a mask, use the `\~` or `logical_not\(\)` operator instead.",
lambda: m2 - 1,
)
# mismatched alpha
m1 = torch.tensor([1], dtype=torch.int8, device=device)
m2 = torch.tensor([2], dtype=torch.int8, device=device)
self.assertRaisesRegex(
RuntimeError,
r"Boolean alpha only supported for Boolean results\.",
lambda: torch.sub(m1, m2, alpha=True),
)
self.assertRaisesRegex(
RuntimeError,
r"For integral input tensors, argument alpha must not be a floating point number\.",
lambda: torch.sub(m1, m2, alpha=1.0),
)
def test_mul(self, device):
m1 = torch.randn(10, 10, device=device)
res1 = m1.clone()
res1[:, 3].mul_(2)
res2 = m1.clone()
for i in range(res1.size(0)):
res2[i, 3] = res2[i, 3] * 2
self.assertEqual(res1, res2)
a1 = torch.tensor([True, False, False, True], dtype=torch.bool, device=device)
a2 = torch.tensor([True, False, True, False], dtype=torch.bool, device=device)
self.assertEqual(
a1 * a2,
torch.tensor([True, False, False, False], dtype=torch.bool, device=device),
)
if device == "cpu":
a1 = torch.tensor([0.1, 0.1], dtype=torch.bfloat16, device=device)
a2 = torch.tensor([1.1, 0.1], dtype=torch.bfloat16, device=device)
self.assertEqual(
a1 * a2,
torch.tensor([0.11, 0.01], dtype=torch.bfloat16, device=device),
atol=0.01,
rtol=0,
)
self.assertEqual(a1.mul(a2), a1 * a2)
def test_bool_tensor_comparison_ops(self, device):
a = torch.tensor(
[True, False, True, False, True, False], dtype=torch.bool, device=device
)
b = torch.tensor(
[True, False, True, True, True, True], dtype=torch.bool, device=device
)
self.assertEqual(
a == b, torch.tensor([1, 1, 1, 0, 1, 0], dtype=torch.bool, device=device)
)
self.assertEqual(
a != b, torch.tensor([0, 0, 0, 1, 0, 1], dtype=torch.bool, device=device)
)
self.assertEqual(
a < b, torch.tensor([0, 0, 0, 1, 0, 1], dtype=torch.bool, device=device)
)
self.assertEqual(
a > b, torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.bool, device=device)
)
self.assertEqual(
a >= b, torch.tensor([1, 1, 1, 0, 1, 0], dtype=torch.bool, device=device)
)
self.assertEqual(
a <= b, torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.bool, device=device)
)
self.assertEqual(
a > False, torch.tensor([1, 0, 1, 0, 1, 0], dtype=torch.bool, device=device)
)
self.assertEqual(
a == torch.tensor(True, dtype=torch.bool, device=device),
torch.tensor([1, 0, 1, 0, 1, 0], dtype=torch.bool, device=device),
)
self.assertEqual(
a == torch.tensor(0, dtype=torch.bool, device=device),
torch.tensor([0, 1, 0, 1, 0, 1], dtype=torch.bool, device=device),
)
self.assertFalse(a.equal(b))
@dtypes(*all_types_and(torch.half, torch.bfloat16, torch.bool))
def test_logical(self, device, dtype):
if dtype != torch.bool:
x = torch.tensor([1, 2, 3, 4], device=device, dtype=dtype)
b = torch.tensor([2], device=device, dtype=dtype)
self.assertEqual(x.lt(2), torch.tensor([True, False, False, False]))
self.assertEqual(x.le(2), torch.tensor([True, True, False, False]))
self.assertEqual(x.ge(2), torch.tensor([False, True, True, True]))
self.assertEqual(x.gt(2), torch.tensor([False, False, True, True]))
self.assertEqual(x.eq(2), torch.tensor([False, True, False, False]))
self.assertEqual(x.ne(2), torch.tensor([True, False, True, True]))
self.assertEqual(x.lt(b), torch.tensor([True, False, False, False]))
self.assertEqual(x.le(b), torch.tensor([True, True, False, False]))
self.assertEqual(x.ge(b), torch.tensor([False, True, True, True]))
self.assertEqual(x.gt(b), torch.tensor([False, False, True, True]))
self.assertEqual(x.eq(b), torch.tensor([False, True, False, False]))
self.assertEqual(x.ne(b), torch.tensor([True, False, True, True]))
else:
x = torch.tensor([True, False, True, False], device=device)
self.assertEqual(x.lt(True), torch.tensor([False, True, False, True]))
self.assertEqual(x.le(True), torch.tensor([True, True, True, True]))
self.assertEqual(x.ge(True), torch.tensor([True, False, True, False]))
self.assertEqual(x.gt(True), torch.tensor([False, False, False, False]))
self.assertEqual(x.eq(True), torch.tensor([True, False, True, False]))
self.assertEqual(x.ne(True), torch.tensor([False, True, False, True]))
def test_atan2(self, device):
def _test_atan2_with_size(size, device):
a = torch.rand(size=size, device=device, dtype=torch.double)
b = torch.rand(size=size, device=device, dtype=torch.double)
actual = a.atan2(b)
x = a.view(-1)
y = b.view(-1)
expected = torch.tensor(
[math.atan2(x[i].item(), y[i].item()) for i in range(x.numel())],
device=device,
dtype=torch.double,
)
self.assertEqual(expected, actual.view(-1), rtol=0, atol=0.02)
# bfloat16
a_bf16 = a.bfloat16()
b_bf16 = b.bfloat16()
actual_bf16 = a_bf16.atan2(b_bf16)
self.assertEqual(actual_bf16, actual.bfloat16())
self.assertEqual(expected, actual_bf16.view(-1), exact_dtype=False, rtol=0, atol=0.02)
_test_atan2_with_size((2, 2), device)
_test_atan2_with_size((3, 3), device)
_test_atan2_with_size((5, 5), device)
def test_atan2_edgecases(self, device):
def _test_atan2(x, y, expected, device, dtype):
expected_tensor = torch.tensor([expected], dtype=dtype, device=device)
x_tensor = torch.tensor([x], dtype=dtype, device=device)
y_tensor = torch.tensor([y], dtype=dtype, device=device)
actual = torch.atan2(y_tensor, x_tensor)
self.assertEqual(expected_tensor, actual, rtol=0, atol=0.02)
for dtype in [torch.float, torch.double]:
_test_atan2(0, 0, 0, device, dtype)
_test_atan2(0, 1, math.pi / 2, device, dtype)
_test_atan2(0, -1, math.pi / -2, device, dtype)
_test_atan2(-1, 0, math.pi, device, dtype)
_test_atan2(1, 0, 0, device, dtype)
_test_atan2(-1, -1, math.pi * -3 / 4, device, dtype)
_test_atan2(1, 1, math.pi / 4, device, dtype)
_test_atan2(1, -1, math.pi / -4, device, dtype)
_test_atan2(-1, 1, math.pi * 3 / 4, device, dtype)
def test_trapezoid(self, device):
def test_dx(sizes, dim, dx, device):
t = torch.randn(sizes, device=device)
actual = torch.trapezoid(t, dx=dx, dim=dim)
expected = np.trapz(t.cpu().numpy(), dx=dx, axis=dim)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual, exact_dtype=False)
def test_x(sizes, dim, x, device):
t = torch.randn(sizes, device=device)
actual = torch.trapezoid(t, x=torch.tensor(x, device=device), dim=dim)
expected = np.trapz(t.cpu().numpy(), x=x, axis=dim)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual.cpu(), exact_dtype=False)
test_dx((2, 3, 4), 1, 1, device)
test_dx((10, 2), 0, 0.1, device)
test_dx((1, 10), 0, 2.3, device)
test_dx((0, 2), 0, 1.0, device)
test_dx((0, 2), 1, 1.0, device)
test_x((2, 3, 4), 1, [1.0, 2.0, 3.0], device)
test_x(
(10, 2), 0, [2.0, 3.0, 4.0, 7.0, 11.0, 14.0, 22.0, 26.0, 26.1, 30.3], device
)
test_x((1, 10), 0, [1.0], device)
test_x((0, 2), 0, [], device)
test_x((0, 2), 1, [1.0, 2.0], device)
test_x((2, 3, 4), -1, [1.0, 2.0, 3.0, 4.0], device)
test_x((2, 3, 4), 0, [1.0, 2.0], device)
test_x((2, 3, 4), 1, [1.0, 2.0, 3.0], device)
test_x((2, 3, 4), 2, [1.0, 2.0, 3.0, 4.0], device)
test_x((2, 2, 4), -1, [[1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0]], device)
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
test_x((2, 3), 2, [], device)
test_dx((2, 3), 2, 1.0, device)
with self.assertRaisesRegex(
RuntimeError, "There must be one `x` value for each sample point"
):
test_x((2, 3), 1, [1.0, 2.0], device)
test_x((2, 3), 1, [1.0, 2.0, 3.0, 4.0], device)
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
def test_cumulative_trapezoid(self, device):
import scipy.integrate
if hasattr(scipy.integrate, "cumulative_trapezoid"):
scipy_cumulative_trapezoid = scipy.integrate.cumulative_trapezoid
else: # Older version of SciPy uses a different name
scipy_cumulative_trapezoid = scipy.integrate.cumtrapz
def test_dx(sizes, dim, dx, device):
t = torch.randn(sizes, device=device)
y = t.cpu().numpy()
actual = torch.cumulative_trapezoid(t, dx=dx, dim=dim)
expected = scipy_cumulative_trapezoid(t.cpu().numpy(), dx=dx, axis=dim)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual, exact_dtype=False, atol=1e-4, rtol=1e-4)
def test_x(sizes, dim, x, device):
t = torch.randn(sizes, device=device)
actual = torch.cumulative_trapezoid(
t, x=torch.tensor(x, device=device), dim=dim
)
expected = scipy_cumulative_trapezoid(t.cpu().numpy(), x=x, axis=dim)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(
expected, actual.cpu(), exact_dtype=False, atol=1e-4, rtol=1e-4
)
def test_empty_x(sizes, dim, x, device):
t = torch.randn(sizes, device=device)
actual = torch.cumulative_trapezoid(
t, x=torch.tensor(x, device=device), dim=dim
)
self.assertEqual(torch.empty(actual.shape), actual)
test_dx((2,), -1, 1, device)
test_dx((3, 3), -1, 1, device)
test_dx((4, 2), 0, 1, device)
test_dx((2, 3, 4), 1, 1, device)
test_dx((10, 2), 0, 0.1, device)
test_dx((1, 10), 0, 2.3, device)
test_dx((0, 2), 0, 1.0, device)
test_dx((0, 2), 1, 1.0, device)
test_dx((512, 512), 1, 1.0, device)
test_dx((100, 100, 100), 1, 1.0, device)
test_x((2,), -1, [100, 50], device)
test_x((4, 2), 0, [2, 3, 4, 5], device)
test_x((2, 3, 4), 1, [1.0, 2.0, 3.0], device)
test_x(
(10, 2), 0, [2.0, 3.0, 4.0, 7.0, 11.0, 14.0, 22.0, 26.0, 26.1, 30.3], device
)
test_x((1, 10), 0, [1.0], device)
test_x((0, 2), 1, [1, 2], device)
test_x((2, 3, 4), -1, [1.0, 2.0, 3.0, 4.0], device)
test_x((2, 3, 4), 0, [1.0, 2.0], device)
test_x((2, 3, 4), 1, [1.0, 2.0, 3.0], device)
test_x((2, 3, 4), 2, [1.0, 2.0, 3.0, 4.0], device)
test_empty_x(
(0, 2), 0, [], device
) # SciPy failing when x == [], but our version returns empty
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
test_x((2, 3), 2, [], device)
test_dx((2, 3), 2, 1.0, device)
with self.assertRaisesRegex(
RuntimeError, "There must be one `x` value for each sample point"
):
test_x((2, 3), 1, [1.0, 2.0], device)
test_x((0, 2), 0, [1.0, 2.0], device)
test_x((2, 3), 1, [1.0, 2.0, 3.0, 4.0], device)
with self.assertRaisesRegex(
RuntimeError, "Currently, we only support dx as a real number"
):
test_dx((2, 2), -1, complex(1, 1), device)
with self.assertRaisesRegex(
TypeError, "received an invalid combination of arguments"
):
actual = torch.cumulative_trapezoid(
torch.randn((3, 3)), x=torch.randn((3, 3)), dx=3
)
@skipMeta
@dtypes(torch.double)
def test_pow_scalar_overloads_mem_overlap(self, device, dtype):
sz = 3
doubles = torch.randn(2 * sz, dtype=dtype, device=device)
self.check_internal_mem_overlap(lambda t: t.pow_(42), 1, dtype, device)
self.unary_check_input_output_mem_overlap(
doubles, sz, lambda input, out: torch.pow(input, 42, out=out)
)
self.unary_check_input_output_mem_overlap(
doubles, sz, lambda input, out: torch.pow(42, input, out=out)
)
@dtypes(
*list(
product(
all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16),
)
)
)
def test_float_power(self, device, dtypes):
def to_np(value):
if isinstance(value, torch.Tensor) and value.dtype == torch.bfloat16:
return value.to(torch.float).cpu().numpy()
return value.cpu().numpy() if isinstance(value, torch.Tensor) else value
base_dtype = dtypes[0]
exp_dtype = dtypes[1]
out_dtype = (
torch.complex128
if base_dtype.is_complex or exp_dtype.is_complex
else torch.float64
)
base = make_tensor((30,), dtype=base_dtype, device=device, low=1, high=100)
# Complex and real results do not agree between PyTorch and NumPy when computing negative and zero power of 0
# Related: https://github.com/pytorch/pytorch/issues/48000
# base[0] = base[3] = base[7] = 0
exp = make_tensor((30,), dtype=exp_dtype, device=device, low=-2, high=2)
exp[0] = exp[4] = exp[6] = 0
expected = torch.from_numpy(np.float_power(to_np(base), to_np(exp)))
exponents = [-2.8, -2, -1, -0.5, 0.5, 1, 2]
complex_exponents = exponents + [
-2.5j,
-1.0j,
1.0j,
2.5j,
1.0 + 1.0j,
-1.0 - 1.5j,
3.3j,
]
for op in (
torch.float_power,
torch.Tensor.float_power,
torch.Tensor.float_power_,
):
# Case of Tensor x Tensor
if op is torch.Tensor.float_power_ and base_dtype != out_dtype:
with self.assertRaisesRegex(
RuntimeError, "operation's result requires dtype"
):
op(base.clone(), exp)
else:
result = op(base.clone(), exp)
self.assertEqual(expected, result)
if op is torch.float_power:
out = torch.empty_like(base).to(device=device, dtype=out_dtype)
op(base, exp, out=out)
self.assertEqual(expected, out)
# Case of Tensor x Scalar
for i in complex_exponents if exp_dtype.is_complex else exponents:
out_dtype_scalar_exp = (
torch.complex128
if base_dtype.is_complex or type(i) == complex
else torch.float64
)
expected_scalar_exp = torch.from_numpy(np.float_power(to_np(base), i))
if (
op is torch.Tensor.float_power_
and base_dtype != out_dtype_scalar_exp
):
with self.assertRaisesRegex(
RuntimeError, "operation's result requires dtype"
):
op(base.clone(), i)
else:
result = op(base.clone(), i)
self.assertEqual(expected_scalar_exp, result)
if op is torch.float_power:
out = torch.empty_like(base).to(
device=device, dtype=out_dtype_scalar_exp
)
op(base, i, out=out)
self.assertEqual(expected_scalar_exp, out)
# Case of Scalar x Tensor
for i in complex_exponents if base_dtype.is_complex else exponents:
out_dtype_scalar_base = (
torch.complex128
if exp_dtype.is_complex or type(i) == complex
else torch.float64
)
expected_scalar_base = torch.from_numpy(np.float_power(i, to_np(exp)))
result = torch.float_power(i, exp)
self.assertEqual(expected_scalar_base, result)
out = torch.empty_like(exp).to(device=device, dtype=out_dtype_scalar_base)
torch.float_power(i, exp, out=out)
self.assertEqual(expected_scalar_base, out)
def test_float_power_exceptions(self, device):
def _promo_helper(x, y):
for i in (x, y):
if type(i) == complex:
return torch.complex128
elif type(i) == torch.Tensor and i.is_complex():
return torch.complex128
return torch.double
test_cases = (
(torch.tensor([-2, -1, 0, 1, 2], device=device), -0.25),
(
torch.tensor([-1.0j, 0j, 1.0j, 1.0 + 1.0j, -1.0 - 1.5j], device=device),
2.0,
),
)
for base, exp in test_cases:
for out_dtype in (torch.long, torch.float, torch.double, torch.cdouble):
out = torch.empty(1, device=device, dtype=out_dtype)
required_dtype = _promo_helper(base, exp)
if out.dtype == required_dtype:
torch.float_power(base, exp, out=out)
else:
with self.assertRaisesRegex(
RuntimeError, "operation's result requires dtype"
):
torch.float_power(base, exp, out=out)
if base.dtype == required_dtype:
torch.Tensor.float_power_(base.clone(), exp)
else:
with self.assertRaisesRegex(
RuntimeError, "operation's result requires dtype"
):
torch.Tensor.float_power_(base.clone(), exp)
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
@dtypes(
*product(
all_types_and(torch.half, torch.bool), all_types_and(torch.half, torch.bool)
)
)
def test_xlogy_xlog1py(self, device, dtypes):
x_dtype, y_dtype = dtypes
def out_variant_helper(torch_fn, x, y):
expected = torch_fn(x, y)
out = torch.empty_like(expected)
torch_fn(x, y, out=out)
self.assertEqual(expected, out)
def xlogy_inplace_variant_helper(x, y):
if x.dtype in integral_types_and(torch.bool):
with self.assertRaisesRegex(
RuntimeError, "can't be cast to the desired output type"
):
x.clone().xlogy_(y)
else:
expected = torch.empty_like(x)
torch.xlogy(x, y, out=expected)
inplace_out = x.clone().xlogy_(y)
self.assertEqual(expected, inplace_out)
def test_helper(torch_fn, reference_fn, inputs, scalar=None):
x, y, z = inputs
torch_fn_partial = partial(torch_fn, x)
reference_fn_partial = partial(reference_fn, x.cpu().numpy())
self.compare_with_numpy(
torch_fn_partial, reference_fn_partial, x, exact_dtype=False
)
self.compare_with_numpy(
torch_fn_partial, reference_fn_partial, y, exact_dtype=False
)
self.compare_with_numpy(
torch_fn_partial, reference_fn_partial, z, exact_dtype=False
)
val = scalar if scalar is not None else x
out_variant_helper(torch_fn, val, x)
out_variant_helper(torch_fn, val, y)
out_variant_helper(torch_fn, val, z)
# Tensor-Tensor Test (tensor of same and different shape)
x = make_tensor((3, 2, 4, 5), dtype=x_dtype, device=device, low=0.5, high=1000)
y = make_tensor((3, 2, 4, 5), dtype=y_dtype, device=device, low=0.5, high=1000)
z = make_tensor((4, 5), dtype=y_dtype, device=device, low=0.5, high=1000)
x_1p = make_tensor(
(3, 2, 4, 5), dtype=x_dtype, device=device, low=-0.5, high=1000
)
y_1p = make_tensor(
(3, 2, 4, 5), dtype=y_dtype, device=device, low=-0.5, high=1000
)
z_1p = make_tensor((4, 5), dtype=y_dtype, device=device, low=-0.5, high=1000)
xlogy_fns = torch.xlogy, scipy.special.xlogy
xlog1py_fns = torch.special.xlog1py, scipy.special.xlog1py
test_helper(*xlogy_fns, (x, y, z))
xlogy_inplace_variant_helper(x, x)
xlogy_inplace_variant_helper(x, y)
xlogy_inplace_variant_helper(x, z)
test_helper(*xlog1py_fns, (x_1p, y_1p, z_1p))
# Scalar-Tensor Test
test_helper(*xlogy_fns, (x, y, z), 3.14)
test_helper(*xlog1py_fns, (x_1p, y_1p, z_1p), 3.14)
# Special Values Tensor-Tensor
t = torch.tensor(
[-1.0, 0.0, 1.0, 2.0, float("inf"), -float("inf"), float("nan")],
device=device,
)
zeros = torch.zeros(7, dtype=y_dtype, device=device)
def test_zeros_special_helper(torch_fn, reference_fn, scalar=False):
zeros_t = 0 if scalar else zeros
zeros_np = 0 if scalar else zeros.cpu().numpy()
torch_fn_partial = partial(torch_fn, zeros_t)
reference_fn_partial = partial(reference_fn, zeros_np)
self.compare_with_numpy(
torch_fn_partial, reference_fn_partial, t, exact_dtype=False
)
out_variant_helper(torch_fn, zeros_t, t)
test_zeros_special_helper(*xlogy_fns)
xlogy_inplace_variant_helper(zeros, t)
test_zeros_special_helper(*xlog1py_fns)
# Special Values Scalar-Tensor
test_zeros_special_helper(*xlogy_fns, scalar=True)
test_zeros_special_helper(*xlog1py_fns, scalar=True)
def test_xlogy_xlog1py_scalar_type_promotion(self, device):
# Test that python numbers don't participate in type promotion at the same
# priority level as 0-dim tensors
t = torch.randn((), dtype=torch.float32, device=device)
self.assertEqual(t.dtype, torch.xlogy(t, 5).dtype)
self.assertEqual(t.dtype, torch.xlogy(t, 5.0).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(t, 5).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(t, 5.0).dtype)
self.assertEqual(t.dtype, torch.xlogy(5, t).dtype)
self.assertEqual(t.dtype, torch.xlogy(5.0, t).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(5, t).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(5.0, t).dtype)
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
def test_xlogy_xlog1py_bfloat16(self, device):
def _compare_helper(x, y, torch_fn, reference_fn):
x_np = x if isinstance(x, float) else x.cpu().to(torch.float).numpy()
y_np = y if isinstance(y, float) else y.cpu().to(torch.float).numpy()
expected = torch.from_numpy(reference_fn(x_np, y_np))
actual = torch_fn(x, y)
self.assertEqual(expected, actual, exact_dtype=False)
x_dtype, y_dtype = torch.bfloat16, torch.bfloat16
# Tensor-Tensor Test (tensor of same and different shape)
x = make_tensor((3, 2, 4, 5), dtype=x_dtype, device=device, low=0.5, high=1000)
y = make_tensor((3, 2, 4, 5), dtype=y_dtype, device=device, low=0.5, high=1000)
z = make_tensor((4, 5), dtype=y_dtype, device=device, low=0.5, high=1000)
x_1p = make_tensor(
(3, 2, 4, 5), dtype=x_dtype, device=device, low=-0.8, high=1000
)
y_1p = make_tensor(
(3, 2, 4, 5), dtype=y_dtype, device=device, low=-0.8, high=1000
)
z_1p = make_tensor((4, 5), dtype=y_dtype, device=device, low=-0.8, high=1000)
xlogy_fns = torch.xlogy, scipy.special.xlogy
xlog1py_fns = torch.special.xlog1py, scipy.special.xlog1py
_compare_helper(x, x, *xlogy_fns)
_compare_helper(x, y, *xlogy_fns)
_compare_helper(x, z, *xlogy_fns)
_compare_helper(x, 3.14, *xlogy_fns)
_compare_helper(y, 3.14, *xlogy_fns)
_compare_helper(z, 3.14, *xlogy_fns)
_compare_helper(x_1p, x_1p, *xlog1py_fns)
_compare_helper(x_1p, y_1p, *xlog1py_fns)
_compare_helper(x_1p, z_1p, *xlog1py_fns)
_compare_helper(x_1p, 3.14, *xlog1py_fns)
_compare_helper(y_1p, 3.14, *xlog1py_fns)
_compare_helper(z_1p, 3.14, *xlog1py_fns)
# Special Values Tensor-Tensor
t = torch.tensor(
[-1.0, 0.0, 1.0, 2.0, float("inf"), -float("inf"), float("nan")],
device=device,
)
zeros = torch.tensor(7, dtype=y_dtype, device=device)
_compare_helper(t, zeros, *xlogy_fns)
_compare_helper(t, 0.0, *xlogy_fns)
_compare_helper(t, zeros, *xlog1py_fns)
_compare_helper(t, 0.0, *xlog1py_fns)
@dtypes(*product(all_types_and(torch.bool), all_types_and(torch.bool)))
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
@slowTest
def test_zeta(self, device, dtypes):
x_dtype, q_dtype = dtypes
def test_helper(x, q):
x_np = x if isinstance(x, float) else x.cpu().numpy()
q_np = q if isinstance(q, float) else q.cpu().numpy()
expected = torch.from_numpy(scipy.special.zeta(x_np, q_np))
actual = torch.special.zeta(x, q)
rtol, atol = None, None
if self.device_type == "cpu":
rtol, atol = 1e-6, 1e-6
self.assertEqual(expected, actual, rtol=rtol, atol=atol, exact_dtype=False)
# x tensor - q tensor same size
x = make_tensor((2, 3, 4), dtype=x_dtype, device=device)
q = make_tensor((2, 3, 4), dtype=q_dtype, device=device)
test_helper(x, q)
# x tensor - q tensor broadcast lhs
x = make_tensor((2, 1, 4), dtype=x_dtype, device=device)
q = make_tensor((2, 3, 4), dtype=q_dtype, device=device)
test_helper(x, q)
# x tensor - q tensor broadcast rhs
x = make_tensor((2, 3, 4), dtype=x_dtype, device=device)
q = make_tensor((2, 1, 4), dtype=q_dtype, device=device)
test_helper(x, q)
# x tensor - q tensor broadcast all
x = make_tensor((2, 3, 1), dtype=x_dtype, device=device)
q = make_tensor((2, 1, 4), dtype=q_dtype, device=device)
test_helper(x, q)
# x scalar - q tensor
for x in np.linspace(-5, 5, num=10).tolist():
if not q_dtype.is_floating_point:
q_dtype = torch.get_default_dtype()
q = make_tensor((2, 3, 4), dtype=q_dtype, device=device)
test_helper(x, q)
# x tensor - q scalar
for q in np.linspace(-5, 5, num=10).tolist():
if not x_dtype.is_floating_point:
x_dtype = torch.get_default_dtype()
x = make_tensor((2, 3, 4), dtype=x_dtype, device=device)
test_helper(x, q)
@onlyCUDA
@dtypes(
torch.chalf,
)
def test_mul_chalf_tensor_and_cpu_scalar(self, device, dtype):
# Tests that Tensor and CPU Scalar work for `mul` for chalf.
# Ideally, this should be covered by `test_complex_half_reference_testing`
# from test_ops.py by checking reference_samples from the OpInfo.
# But currently that doesn't work as sample generation requires support of
# `index_select` which is not implemented for `complex32` at the
# time of writing this test.
# TODO: Remove this test once above issue is fixed.
# Ref: https://github.com/pytorch/pytorch/pull/76364
x = make_tensor((2, 2), device=device, dtype=dtype)
self.assertEqual(x * 2.5, x * torch.tensor(2.5, device=device, dtype=dtype))
tensor_binary_ops = [
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__eq__",
"__ne__",
"__add__",
"__radd__",
"__iadd__",
"__sub__",
"__rsub__",
"__isub__",
"__mul__",
"__rmul__",
"__imul__",
"__matmul__",
"__rmatmul__",
"__truediv__",
"__rtruediv__",
"__itruediv__",
"__floordiv__",
"__rfloordiv__",
"__ifloordiv__",
"__mod__",
"__rmod__",
"__imod__",
"__pow__",
"__rpow__",
"__ipow__",
"__lshift__",
"__rlshift__",
"__ilshift__",
"__rshift__",
"__rrshift__",
"__irshift__",
"__and__",
"__rand__",
"__iand__",
"__xor__",
"__rxor__",
"__ixor__",
"__or__",
"__ror__",
"__ior__",
# Unsupported operators
# '__imatmul__',
# '__divmod__', '__rdivmod__', '__idivmod__',
]
# Test that binary math operations return NotImplemented for unknown types.
def generate_not_implemented_tests(cls):
class UnknownType:
pass
# TODO: refactor to inline these
_types = [
torch.half,
torch.float,
torch.double,
torch.int8,
torch.short,
torch.int,
torch.long,
torch.uint8,
]
def create_test_func(op):
@dtypes(*_types)
def test(self, device, dtype):
# Generate the inputs
tensor = torch.empty((), device=device, dtype=dtype)
# Runs the tensor op on the device
result = getattr(tensor, op)(UnknownType())
self.assertEqual(result, NotImplemented)
return test
for op in tensor_binary_ops:
test_name = "test_{}_not_implemented".format(op)
assert not hasattr(cls, test_name), "{0} already in {1}".format(
test_name, cls.__name__
)
setattr(cls, test_name, create_test_func(op))
generate_not_implemented_tests(TestBinaryUfuncs)
instantiate_device_type_tests(TestBinaryUfuncs, globals())
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/test_binary_ufuncs.py
|
# Owner(s): ["oncall: profiler"]
import functools
import os
import re
import textwrap
import traceback
import unittest
import expecttest
import torch
from torch._C._autograd import _ExtraFields_PyCall, _ExtraFields_PyCCall
from torch.testing._internal.common_utils import (
TestCase, run_tests, IS_WINDOWS, TEST_WITH_CROSSREF, IS_ARM64)
# These functions can vary from based on platform and build (e.g. with CUDA)
# and generally distract from rather than adding to the test.
PRUNE_FUNCTIONS = {
"torch/profiler/profiler.py(...): start": True,
"torch/profiler/profiler.py(...): stop_trace": True,
"cudaStreamIsCapturing": False,
"cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags": False,
}
# ROCTracer is currently not producing events that profiler can extract. We
# should bring it up to parity with CUPTI Kineto / profiler integration, but in
# the mean time there is still utility in running tests but not checking that
# the values match expected value.
# 1) We will still catch runtime errors and assert failures
# 2) We can diff the output to see how far we are from parity
#
# TODO: We also fail to capture events for Windows on some platforms.
ALLOW_CUDA_FAILURE = (torch.version.hip is not None) or IS_WINDOWS
class ProfilerTree:
@staticmethod
def test(f):
"""Mark unit test that will be using ProfilerTree to test traces.
This decorator serves two purposes. First, it provides a method name
that `format` can use to tell where the test runner (which is
environment specific) ends and the unit test begins. Second, it runs
the test with replicates and allows `assertTreesMatch` to adjust
based on which replicate is running.
"""
@functools.wraps(f)
def begin_unit_test_marker(self, replicates=3):
try:
for i in range(replicates):
self.tree_replicate = i
out = f(self)
if self.tree_replicate is None:
break
return out
finally:
delattr(self, "tree_replicate")
return begin_unit_test_marker
@classmethod
def format(cls, profiler, indent: int = 0):
def flatten(nodes, depth=0, out=None):
if out is None:
out = []
for node in nodes:
cls.validate_node(node)
name = cls.fmt_name(node.name())
add_ellipses = PRUNE_FUNCTIONS.get(name.strip(), None)
if add_ellipses is None:
out.append((depth, name))
flatten(node.children, depth + 1, out)
elif add_ellipses:
out.append((depth, "..."))
return out
flat_nodes = flatten(profiler.kineto_results.experimental_event_tree())
# Profiler inserts a `cudaDeviceSynchronize` at the end of profiling.
if flat_nodes and flat_nodes[-1][1] == "cudaDeviceSynchronize":
flat_nodes = flat_nodes[:-1]
min_depth = min([d + 1 for d, name in flat_nodes if "begin_unit_test_marker" in name] or [0])
return textwrap.indent(
"\n".join([f"{' ' * (d - min_depth)}{name.rstrip()}" for d, name in flat_nodes if d >= min_depth]),
" " * indent)
@staticmethod
def fmt_name(name: str) -> str:
# torch::autograd::Node relies on c10::demangle to generate names, and
# Windows demangles to include `struct` in the name.
if IS_WINDOWS:
name = name.replace('struct torch::autograd::AccumulateGrad', 'torch::autograd::AccumulateGrad')
match = re.match(r"(.*)\.py\(([0-9]+)\): (.*)$", name)
if match:
filename, _, fn = match.groups()
# This test can appear as `test/test_profiler_tree.py` depending on
# where it is run from.
if filename.endswith(os.path.splitext(__file__)[0]):
filename = os.path.split(os.path.splitext(__file__)[0])[1]
# We test against a string literal, so all paths have to look like POSIX paths.
filename = filename.replace(os.sep, "/")
# We don't want to have to update this test every time PyTorch changes.
# At some point we should test some line numbers, but for now it's
# too brittle.
lineno = "..."
return f"{filename}.py({lineno}): {fn}"
for kernel_pattern in (
"void at::native::elementwise_kernel",
"void at::native::reduce_kernel",
"void at::native::vectorized_elementwise_kernel",
"void at::native::unrolled_elementwise_kernel",
r"void [a-zA-Z0-9]+_kernel", # Nvidia kernels.
):
name = re.sub(
rf"{kernel_pattern}<.+>\(.+\)$",
f"{kernel_pattern.replace('[a-zA-Z0-9]+', '...')}<...>(...)",
name)
return re.sub(
"object at 0x[0-9a-fA-F]+>",
"object at 0xXXXXXXXXXXXX>",
name)
@classmethod
def validate_node(cls, node):
extra_fields = node.extra_fields
if isinstance(extra_fields, (_ExtraFields_PyCall, _ExtraFields_PyCCall)):
# Check that the lineage established by the profiler matches the
# caller recorded by the Python tracer.
parent = node.parent
while parent is not None:
if isinstance(parent.extra_fields, _ExtraFields_PyCall):
break
parent = parent.parent
def to_string(frame_state):
return f"{frame_state.file_name}(...): {frame_state.function_name}"
if parent:
parent_name = to_string(parent.extra_fields.callsite)
caller_name = to_string(extra_fields.caller)
assert parent_name == caller_name, f"{parent_name} vs. {caller_name}"
@unittest.skipIf(IS_ARM64, "Not working on ARM")
class TestProfilerTree(TestCase):
def assertTreesMatch(self, actual: str, expected: str, allow_failure: bool = False):
# Warning: Here be dragons
# Different platforms will have subtly different behavior for Python
# tracing. Observed differences include:
# 1) Windows symbolicates names differently from posix
# 2) The profile callback for c_call does not fire for Tensor.__pow__
# on certain platforms. This is not caused by the function tracer,
# but by cPython itself.
#
# The purpose of these unit tests is to ensure that the profiler is
# doing reasonable things. When these platform dependent variations occur
# simply coerce them into a platform independent form. If you made a
# change in the codebase which changes the trace produced, simply use
# EXPECTTEST_ACCEPT=1 to update the tests to reflect the new structure.
# expecttest will not show the diff view if `len(actual) < len(expected)`
if not expecttest.ACCEPT:
actual = actual.ljust(len(expected))
self.maxDiff = None
replicate = getattr(self, "tree_replicate", None)
self.assertIsNotNone(replicate, "Please annotate test with `@ProfilerTree.test`")
# The profiler should produce deterministic results and should return
# to a clean state after each run. As a result, only the first
# replicate is allowed to update `expected`. If subsequent runs do not
# match it is a bug in the profiler.
if replicate:
self.assertEqual(actual, expected)
else:
try:
self.assertExpectedInline(actual, expected, skip=1)
except AssertionError as e:
if allow_failure:
self.tree_replicate = None
msg = traceback.format_exception_only(type(e), e)[0]
print(msg.split("AssertionError:")[-1])
else:
raise
@ProfilerTree.test
def test_profiler_experimental_tree(self):
t1, t2 = torch.ones(1, requires_grad=True), torch.ones(1, requires_grad=True)
with torch.profiler.profile() as p:
z = torch.add(t1, t2)
y = torch.ones(1)
loss = (y - z) ** 2
loss.backward()
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
aten::add
aten::ones
aten::empty
aten::fill_
aten::sub
aten::pow
aten::result_type
aten::to
aten::ones_like
aten::empty_like
aten::empty_strided
aten::fill_
autograd::engine::evaluate_function: PowBackward0
PowBackward0
aten::pow
aten::result_type
aten::to
aten::copy_
aten::mul
aten::mul
aten::to
aten::_to_copy
aten::empty_strided
aten::copy_
aten::mul
autograd::engine::evaluate_function: SubBackward0
SubBackward0
aten::neg
autograd::engine::evaluate_function: AddBackward0
AddBackward0
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::new_empty_strided
aten::empty_strided
aten::copy_
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::detach
detach"""
)
@ProfilerTree.test
def test_profiler_experimental_tree_with_record_function(self):
with torch.profiler.profile() as p:
with torch.autograd.profiler.record_function("Top level Annotation"):
with torch.autograd.profiler.record_function("First Annotation"):
x = torch.ones((1,), requires_grad=True)
# Check that we correctly handle the case when a user
# annotation does not call `__exit__`.
_ = torch.autograd.profiler.record_function("Second Annotation").__enter__()
y = x + 1
with torch.autograd.profiler.record_function("Third Annotation"):
y.backward()
# NB: The `aten::zeros` before the record function annotations are due to
# `at::cpp_custom_type_hack`. When we switch to `torch::CustomClassHolder`
# they will disappear.
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
aten::zeros
aten::zeros
aten::empty
aten::zero_
Top level Annotation
aten::empty
aten::zeros
aten::zeros
aten::empty
aten::zero_
First Annotation
aten::empty
aten::ones
aten::empty
aten::fill_
aten::zeros
aten::zeros
aten::empty
aten::zero_
Second Annotation
aten::empty
aten::add
aten::to
aten::_to_copy
aten::empty_strided
aten::copy_
aten::zeros
aten::zeros
aten::empty
aten::zero_
Third Annotation
aten::empty
aten::ones_like
aten::empty_like
aten::empty_strided
aten::fill_
autograd::engine::evaluate_function: AddBackward0
AddBackward0
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::new_empty_strided
aten::empty_strided
aten::copy_"""
)
@ProfilerTree.test
def test_profiler_experimental_tree_with_memory(self):
t1, t2 = torch.ones(1, requires_grad=True), torch.ones(1, requires_grad=True)
with torch.profiler.profile(profile_memory=True) as p:
z = torch.add(t1, t2)
y = torch.ones(1)
loss = (y - z) ** 2
loss.backward()
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
aten::add
[memory]
aten::ones
aten::empty
[memory]
aten::fill_
aten::sub
[memory]
aten::pow
aten::result_type
aten::to
[memory]
aten::ones_like
aten::empty_like
aten::empty_strided
[memory]
aten::fill_
autograd::engine::evaluate_function: PowBackward0
PowBackward0
aten::pow
aten::result_type
aten::to
[memory]
aten::copy_
aten::mul
[memory]
aten::mul
aten::to
aten::_to_copy
aten::empty_strided
[memory]
aten::copy_
[memory]
[memory]
[memory]
aten::mul
[memory]
[memory]
[memory]
[memory]
autograd::engine::evaluate_function: SubBackward0
SubBackward0
aten::neg
[memory]
[memory]
autograd::engine::evaluate_function: AddBackward0
AddBackward0
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::new_empty_strided
aten::empty_strided
[memory]
aten::copy_
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::detach
detach
[memory]"""
)
@unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
@ProfilerTree.test
def test_profiler_experimental_tree_with_memory_and_stack(self):
t1, t2 = torch.ones(1, requires_grad=True), torch.ones(1, requires_grad=True)
with torch.profiler.profile(with_stack=True, profile_memory=True) as p:
z = torch.add(t1, t2)
y = torch.ones(1)
loss = torch.pow(y - z, 2)
loss.backward()
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
test_profiler_tree.py(...): test_profiler_experimental_tree_with_memory_and_stack
torch/profiler/profiler.py(...): __enter__
...
<built-in method add of type object at 0xXXXXXXXXXXXX>
aten::add
[memory]
<built-in method ones of type object at 0xXXXXXXXXXXXX>
aten::ones
aten::empty
[memory]
aten::fill_
aten::sub
[memory]
<built-in method pow of type object at 0xXXXXXXXXXXXX>
aten::pow
aten::result_type
aten::to
[memory]
torch/_tensor.py(...): backward
<built-in function _has_torch_function_unary>
torch/autograd/__init__.py(...): backward
<built-in function isinstance>
<built-in function isinstance>
<built-in function len>
torch/autograd/__init__.py(...): _tensor_or_tensors_to_tuple
torch/autograd/__init__.py(...): _make_grads
<built-in function isinstance>
<built-in method numel of Tensor object at 0xXXXXXXXXXXXX>
<built-in method ones_like of type object at 0xXXXXXXXXXXXX>
aten::ones_like
aten::empty_like
aten::empty_strided
[memory]
aten::fill_
<built-in method append of list object at 0xXXXXXXXXXXXX>
<built-in method run_backward of torch._C._EngineBase object at 0xXXXXXXXXXXXX>
autograd::engine::evaluate_function: PowBackward0
PowBackward0
aten::pow
aten::result_type
aten::to
[memory]
aten::copy_
aten::mul
[memory]
aten::mul
aten::to
aten::_to_copy
aten::empty_strided
[memory]
aten::copy_
[memory]
[memory]
[memory]
aten::mul
[memory]
[memory]
[memory]
[memory]
autograd::engine::evaluate_function: SubBackward0
SubBackward0
aten::neg
[memory]
[memory]
autograd::engine::evaluate_function: AddBackward0
AddBackward0
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::new_empty_strided
aten::empty_strided
[memory]
aten::copy_
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::detach
detach
[memory]
torch/profiler/profiler.py(...): __exit__
torch/profiler/profiler.py(...): stop
torch/profiler/profiler.py(...): _transit_action
<built-in method get of dict object at 0xXXXXXXXXXXXX>
enum.py(...): __hash__
<built-in function hash>
..."""
)
@unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
@ProfilerTree.test
def test_profiler_experimental_tree_with_stack_and_modules(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers = [
torch.nn.ReLU(),
torch.nn.Linear(1, 1),
torch.nn.ReLU(),
]
def forward(self, x: torch.Tensor) -> torch.Tensor:
for l in self.layers:
x = l(x)
return x
model = MyModule()
with torch.profiler.profile(with_stack=True) as p:
for _ in range(2):
model(torch.ones((1,)))
self.maxDiff = None
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
test_profiler_tree.py(...): test_profiler_experimental_tree_with_stack_and_modules
torch/profiler/profiler.py(...): __enter__
...
<built-in method ones of type object at 0xXXXXXXXXXXXX>
aten::ones
aten::empty
aten::fill_
nn.Module: MyModule_0
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
test_profiler_tree.py(...): forward
nn.Module: ReLU_0
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/activation.py(...): forward
torch/nn/functional.py(...): relu
<built-in function _has_torch_function_unary>
<built-in method relu of type object at 0xXXXXXXXXXXXX>
aten::relu
aten::clamp_min
nn.Module: Linear_0
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/linear.py(...): forward
torch/nn/modules/module.py(...): __getattr__
torch/nn/modules/module.py(...): __getattr__
<built-in function linear>
aten::linear
aten::t
aten::transpose
aten::as_strided
aten::matmul
aten::unsqueeze
aten::as_strided
aten::mm
aten::resolve_conj
aten::resolve_conj
aten::resolve_conj
aten::squeeze_
aten::as_strided_
aten::add_
nn.Module: ReLU_1
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/activation.py(...): forward
torch/nn/functional.py(...): relu
<built-in function _has_torch_function_unary>
<built-in method relu of type object at 0xXXXXXXXXXXXX>
aten::relu
aten::clamp_min
<built-in method ones of type object at 0xXXXXXXXXXXXX>
aten::ones
aten::empty
aten::fill_
nn.Module: MyModule_0
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
test_profiler_tree.py(...): forward
nn.Module: ReLU_0
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/activation.py(...): forward
torch/nn/functional.py(...): relu
<built-in function _has_torch_function_unary>
<built-in method relu of type object at 0xXXXXXXXXXXXX>
aten::relu
aten::clamp_min
nn.Module: Linear_0
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/linear.py(...): forward
torch/nn/modules/module.py(...): __getattr__
torch/nn/modules/module.py(...): __getattr__
<built-in function linear>
aten::linear
aten::t
aten::transpose
aten::as_strided
aten::matmul
aten::unsqueeze
aten::as_strided
aten::mm
aten::resolve_conj
aten::resolve_conj
aten::resolve_conj
aten::squeeze_
aten::as_strided_
aten::add_
nn.Module: ReLU_1
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/activation.py(...): forward
torch/nn/functional.py(...): relu
<built-in function _has_torch_function_unary>
<built-in method relu of type object at 0xXXXXXXXXXXXX>
aten::relu
aten::clamp_min
torch/profiler/profiler.py(...): __exit__
torch/profiler/profiler.py(...): stop
torch/profiler/profiler.py(...): _transit_action
<built-in method get of dict object at 0xXXXXXXXXXXXX>
enum.py(...): __hash__
<built-in function hash>
..."""
)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
@ProfilerTree.test
def test_profiler_experimental_tree_cuda(self):
with torch.profiler.profile(profile_memory=True) as p:
weight = torch.ones(1, device="cuda", requires_grad=True)
x = torch.ones(1, device="cuda")
y = torch.add(weight, x)
loss = torch.pow(y, 2)
loss.backward()
torch.optim.SGD([weight], lr=0.01, momentum=0.9).step()
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
aten::ones
aten::empty
[memory]
aten::fill_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
aten::ones
aten::empty
[memory]
aten::fill_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
aten::add
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
aten::pow
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
aten::result_type
aten::to
[memory]
aten::ones_like
aten::empty_like
aten::empty_strided
[memory]
aten::fill_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
autograd::engine::evaluate_function: PowBackward0
PowBackward0
aten::pow
aten::result_type
aten::to
[memory]
aten::copy_
cudaMemcpyAsync
Memcpy DtoD (Device -> Device)
aten::mul
[memory]
aten::mul
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
[memory]
aten::mul
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
[memory]
[memory]
autograd::engine::evaluate_function: AddBackward0
AddBackward0
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::detach
detach
[memory]
aten::zeros
aten::zeros
aten::empty
[memory]
aten::zero_
Optimizer.step#SGD.step
aten::empty
[memory]
[memory]
[memory]
aten::clone
aten::empty_strided
[memory]
aten::copy_
cudaMemcpyAsync
Memcpy DtoD (Device -> Device)
aten::detach
detach
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]""", # noqa: B950
allow_failure=ALLOW_CUDA_FAILURE,
)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
@ProfilerTree.test
def test_profiler_experimental_tree_cuda_with_stream(self):
streams = [torch.cuda.Stream() for _ in range(3)]
results = []
with torch.profiler.profile(profile_memory=True) as p:
x = torch.ones((4, 4), device="cuda")
for stream in streams:
with torch.cuda.stream(stream):
results.append(torch.tanh(x) - x)
del results
for s in streams:
torch.cuda.current_stream().wait_stream(s)
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
aten::ones
aten::empty
[memory]
aten::fill_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
aten::tanh
cudaMalloc
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
aten::sub
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
[memory]
aten::tanh
cudaMalloc
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
aten::sub
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
[memory]
aten::tanh
cudaMalloc
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
aten::sub
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
[memory]""",
allow_failure=ALLOW_CUDA_FAILURE,
)
@unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
@ProfilerTree.test
def test_profiler_experimental_tree_cuda_detailed(self):
model = torch.nn.modules.Linear(1, 1, device="cuda")
model.train()
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
def step():
x = torch.ones((1, 1), device="cuda")
loss = model(x)
loss.backward()
opt.step()
# Warmup
for _ in range(3):
step()
with torch.profiler.profile(profile_memory=True, with_stack=True) as p:
step()
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
test_profiler_tree.py(...): test_profiler_experimental_tree_cuda_detailed
torch/profiler/profiler.py(...): __enter__
...
test_profiler_tree.py(...): step
<built-in method ones of type object at 0xXXXXXXXXXXXX>
aten::ones
aten::empty
[memory]
aten::fill_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
nn.Module: Linear_0
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/linear.py(...): forward
torch/nn/modules/module.py(...): __getattr__
torch/nn/modules/module.py(...): __getattr__
<built-in function linear>
aten::linear
aten::t
aten::transpose
aten::as_strided
aten::addmm
cudaMemcpyAsync
Memcpy DtoD (Device -> Device)
cudaLaunchKernel
void ..._kernel<...>(...)
[memory]
aten::expand
aten::as_strided
torch/_tensor.py(...): backward
<built-in function _has_torch_function_unary>
torch/autograd/__init__.py(...): backward
<built-in function isinstance>
<built-in function isinstance>
<built-in function len>
torch/autograd/__init__.py(...): _tensor_or_tensors_to_tuple
torch/autograd/__init__.py(...): _make_grads
<built-in function isinstance>
<built-in method numel of Tensor object at 0xXXXXXXXXXXXX>
<built-in method ones_like of type object at 0xXXXXXXXXXXXX>
aten::ones_like
aten::empty_like
aten::empty_strided
[memory]
aten::fill_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
<built-in method append of list object at 0xXXXXXXXXXXXX>
<built-in method run_backward of torch._C._EngineBase object at 0xXXXXXXXXXXXX>
autograd::engine::evaluate_function: AddmmBackward0
AddmmBackward0
aten::t
aten::transpose
aten::as_strided
aten::mm
cudaLaunchKernel
void ..._kernel<...>(...)
[memory]
aten::t
aten::transpose
aten::as_strided
aten::sum
aten::sum
cudaLaunchKernel
void at::native::reduce_kernel<...>(...)
[memory]
aten::view
aten::view
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
autograd::engine::evaluate_function: TBackward0
TBackward0
aten::t
aten::transpose
aten::as_strided
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
[memory]
torch/optim/optimizer.py(...): wrapper
<built-in method format of str object at 0xXXXXXXXXXXXX>
torch/autograd/profiler.py(...): __init__
<built-in method zeros of type object at 0xXXXXXXXXXXXX>
aten::zeros
aten::zeros
aten::empty
[memory]
aten::zero_
torch/autograd/profiler.py(...): __enter__
torch/_ops.py(...): __call__
<built-in method _record_function_enter of PyCapsule object at 0xXXXXXXXXXXXX>
Optimizer.step#SGD.step
aten::empty
[memory]
[memory]
[memory]
torch/optim/optimizer.py(...): _use_grad
<built-in function is_grad_enabled>
torch/autograd/grad_mode.py(...): __init__
<built-in function is_grad_enabled>
<built-in function _set_grad_enabled>
torch/optim/sgd.py(...): step
<built-in method append of list object at 0xXXXXXXXXXXXX>
<built-in method append of list object at 0xXXXXXXXXXXXX>
torch/_tensor.py(...): __hash__
<built-in function id>
<built-in method append of list object at 0xXXXXXXXXXXXX>
<built-in method append of list object at 0xXXXXXXXXXXXX>
<built-in method append of list object at 0xXXXXXXXXXXXX>
torch/_tensor.py(...): __hash__
<built-in function id>
<built-in method append of list object at 0xXXXXXXXXXXXX>
torch/optim/sgd.py(...): sgd
torch/optim/sgd.py(...): _single_tensor_sgd
<built-in method mul_ of Tensor object at 0xXXXXXXXXXXXX>
[memory]
aten::mul_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
<built-in method add_ of Tensor object at 0xXXXXXXXXXXXX>
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
<built-in method add_ of Tensor object at 0xXXXXXXXXXXXX>
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
<built-in method mul_ of Tensor object at 0xXXXXXXXXXXXX>
[memory]
aten::mul_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
<built-in method add_ of Tensor object at 0xXXXXXXXXXXXX>
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
<built-in method add_ of Tensor object at 0xXXXXXXXXXXXX>
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
torch/_tensor.py(...): __hash__
<built-in function id>
torch/_tensor.py(...): __hash__
<built-in function id>
torch/autograd/grad_mode.py(...): __init__
<built-in function is_grad_enabled>
<built-in function _set_grad_enabled>
torch/autograd/profiler.py(...): __exit__
torch/_ops.py(...): __call__
<built-in method _record_function_exit of PyCapsule object at 0xXXXXXXXXXXXX>
[memory]
[memory]
torch/profiler/profiler.py(...): __exit__
torch/profiler/profiler.py(...): stop
torch/profiler/profiler.py(...): _transit_action
<built-in method get of dict object at 0xXXXXXXXXXXXX>
enum.py(...): __hash__
<built-in function hash>
...""", # noqa: B950
allow_failure=ALLOW_CUDA_FAILURE,
)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_profiler_tree.py
|
# Owner(s): ["oncall: jit"]
import sys
sys.argv.append("--jit_executor=simple")
from test_jit import * # noqa: F403
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_jit_simple.py
|
# Owner(s): ["module: autograd"]
import contextlib
import gc
import io
import math
import os
import random
import sys
import tempfile
import threading
import time
import unittest
import uuid
import warnings
import operator
import subprocess
from copy import deepcopy
from collections import OrderedDict
from itertools import product
from operator import mul
from functools import reduce, partial
import torch
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, record_function, emit_nvtx)
from torch.autograd.profiler_util import (_format_time, EventList, FunctionEvent, FunctionEventAvg)
from torch.utils.checkpoint import checkpoint
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoLapack, slowTest, IS_WINDOWS, IS_MACOS,
disable_gc, gradcheck, gradgradcheck, parametrize,
instantiate_parametrized_tests, skipIfMps, set_warn_always_context)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available, _calculate_shape
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing._internal.common_methods_invocations import mask_not_all_zeros
from torch.testing._internal.common_device_type import (instantiate_device_type_tests,
onlyCPU, onlyCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipMeta, dtypesIfMPS)
from torch.testing._internal.common_dtype import floating_types_and
from torch.utils._mode_utils import no_dispatch
import weakref
import pickle
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, dtype=torch.double, requires_grad=True)
t2 = torch.rand(10, dtype=torch.double, requires_grad=True)
t3 = torch.rand(10, dtype=torch.double)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_unrelated_inputs(self):
# test to ensure grad(grad)check runs successfully even if there is an
# unrelated (but differentiable) inputs
def my_function(x, y):
return x * x
x = torch.rand(10, dtype=torch.double, requires_grad=True)
y = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(my_function, (x, y))
gradgradcheck(my_function, (x, y))
def test_not_implemented_grad(self):
a = torch.rand(2, requires_grad=True)
# if grad for nextafter ends up being implemented, this should be changed
y = torch.nextafter(a, a).sum()
with self.assertRaisesRegex(
NotImplementedError,
'the derivative for .* is not implemented'):
y.backward()
def test_not_implemented_fwad(self):
x = torch.randn(3)
v = torch.rand(3)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, v)
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError"
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
# if forward AD ends up being implemented for torch.igamma, choose a different op
torch.igamma(dual_x, dual_x)
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
def test_accumulate_grad_with_zero_numel_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_empty_inputs(self):
x = torch.tensor([1.0], requires_grad=True)
with self.assertRaisesRegex(ValueError, "grad requires non-empty inputs."):
torch.autograd.grad(2 * x, [], grad_outputs=torch.tensor([1.0]))
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_grad_batched_grad(self):
x = torch.randn(2, 2, requires_grad=True)
out = x.clone() # Size([2, 2])
batched_grad = torch.arange(3).expand(2, 2, 3).transpose(0, 2) # Size([3, 2, 2])
grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True)
self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype))
# Detect shape mismatch
grad_out = torch.ones(2, 2)
with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"):
torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True)
# Scalar outputs
out = x.sum() # Size([])
batched_grad = torch.arange(3) # Size([3])
grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True)
self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype))
# We consider scalar and sized-1 to be a mismatch. This is consistent with current non-batched behavior.
grad_out = torch.ones(2).unsqueeze(1)
with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"):
torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def _get_mul2(self, use_custom_function):
if use_custom_function:
class Mul2(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, gO):
return gO * 2
return Mul2.apply
else:
return lambda x: x * 2
def test_grad_fn_prehooks(self):
for use_custom_function in (True, False):
mul2 = self._get_mul2(use_custom_function)
a = torch.tensor([1.], requires_grad=True)
b = mul2(a)
post_counter = [0]
pre_counter = [0]
def posthook(grad_input, grad_output):
self.assertEqual(pre_counter[0], 3)
self.assertTrue(torch.allclose(grad_output[0], torch.ones(1) * 8))
self.assertTrue(torch.allclose(grad_input[0], torch.ones(1) * 16))
post_counter[0] += 1
return grad_input
def prehook(grad_output):
pre_counter[0] += 1
return (grad_output[0] * 2,)
# register posthook x 2
b.grad_fn.register_hook(posthook)
b.grad_fn.register_hook(posthook)
# register prehook x 3
b.grad_fn.register_prehook(prehook)
b.grad_fn.register_prehook(lambda x: None)
b.grad_fn.register_prehook(prehook)
b.grad_fn.register_prehook(prehook)
b.grad_fn.register_prehook(lambda x: x)
b.grad_fn.register_prehook(lambda x: None)
b.sum().backward()
self.assertEqual(post_counter[0], 2)
self.assertEqual(pre_counter[0], 3)
# Return None
a = torch.rand(3, 3, requires_grad=True)
b = mul2(a)
def prehook(grad_output):
pre_counter[0] += 1
return None
b.grad_fn.register_prehook(prehook)
b.sum().backward()
self.assertEqual(pre_counter[0], 4)
self.assertTrue(torch.allclose(a.grad, torch.ones(3, 3) * 2))
def test_grad_fn_prehooks_multiple_outputs(self):
# Compute gradients without hooks
b = torch.rand(3, 3, requires_grad=True)
var, mean = torch.var_mean(b, dim=0)
(var + mean).sum().backward()
# Compute gradients with hooks
a = b.detach().requires_grad_()
counter = [0]
def prehook(grad_output):
gvar, gmean = grad_output
counter[0] += 1
return (gvar * 2, gmean * 2)
var, mean = torch.var_mean(a, dim=0)
mean.grad_fn.register_prehook(prehook)
(var + mean).sum().backward()
self.assertEqual(counter[0], 1)
# Compare
self.assertTrue(torch.allclose(a.grad, b.grad * 2))
# Test with custom Function
class DoubleMul2(Function):
@staticmethod
def forward(ctx, x, a, y):
ctx.a = a
return a * x * 2, a, a * y * 2
@staticmethod
def backward(ctx, g1, _a, g2):
return ctx.a * g1 * 2, None, ctx.a * g2 * 2
counter = [0]
def prehook(grad_output):
g1, ga, g2 = grad_output
self.assertIsNone(ga)
counter[0] += 1
return (g1 * 2, None, g2 * 2)
a = torch.randn(3, 3, requires_grad=True)
b = torch.randn(3, 3, requires_grad=True)
k = 3
c, _, d = DoubleMul2.apply(a, k, b)
c.grad_fn.register_prehook(prehook)
(c + d).sum().backward()
self.assertEqual(counter[0], 1)
self.assertTrue(torch.allclose(a.grad, torch.ones(1) * 4 * k))
self.assertTrue(torch.allclose(b.grad, torch.ones(1) * 4 * k))
def test_grad_fn_prehooks_remove_hooks(self):
for use_custom_function in (True, False):
mul2 = self._get_mul2(use_custom_function)
# Simply remove hooks
a = torch.rand(3, 3, requires_grad=True)
b = mul2(a)
counter = [0]
def prehook(grad_output):
counter[0] += 1
return None
handle = b.grad_fn.register_prehook(prehook)
b.grad_fn.register_prehook(prehook)
handle.remove()
b.sum().backward()
self.assertTrue(torch.allclose(a.grad, torch.ones(3, 3) * 2))
self.assertEqual(counter[0], 1)
# Remove hooks during backward
a = torch.rand(3, 3, requires_grad=True)
b = mul2(a)
counter = [0]
def prehook1(grad_output):
handle2.remove()
# Remove hook that is already removed is OK
handle3.remove()
return None
def prehook2(grad_output):
counter[0] += 1
return None
# Hooks that registered first run first
b.grad_fn.register_prehook(prehook1)
handle2 = b.grad_fn.register_prehook(prehook2)
handle3 = b.grad_fn.register_prehook(prehook2)
handle3.remove()
b.sum().backward()
self.assertTrue(torch.allclose(a.grad, torch.ones(3, 3) * 2))
self.assertEqual(counter[0], 1)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.double()
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, dtype=torch.double, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
# NB: See test/cpp/api/autograd.cpp for more tests on the interaction between
# retains_grad and hooks in cpp. There's no point testing in python because
# Python hooks use a completely different mechanism.
def test_retain_grad_inplace(self):
a = torch.tensor([1.], requires_grad=True).clone()
a.retain_grad()
a.mul_(2)
a.sum().backward()
self.assertEqual(a.grad, torch.tensor([1.]))
a = torch.tensor([1.], requires_grad=True).clone()
a.retain_grad()
# Inplace multiple times is OK, the real test here would be in cpp though
# because the index here is always zero, having cpp hooks in addition,
# will force us to properly update the index
a.mul_(2)
a.mul_(2)
a.sum().backward()
self.assertEqual(a.grad, torch.tensor([1.]))
def test_retain_grad_inplace_over_view(self):
base = torch.tensor([1.], requires_grad=True).clone()
view = base[:]
view2 = base[:]
view.retain_grad()
view2.retain_grad()
view.mul_(2)
(view + view2).sum().backward()
# The old grad_fn, slice, wouldn't be part of the graph during backward
# so if the retains grad were not properly updated to the new grad_fn,
# the grad would still be None
self.assertEqual(view.grad, view2.grad)
self.assertEqual(view.grad, torch.tensor([1.]))
def test_retain_grad_cycle(self):
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
return y / 2, torch._C._WeakTensorRef(y)
z, ref = run_test()
self.assertTrue(ref.expired())
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
z = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
x_non_leaf_expected = 2 * x_nonleaf + y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(x_nonleaf.grad, x_non_leaf_expected)
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.empty(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.tensor([[0., 2., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.empty(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
MyFunction.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_create_graph_warns(self):
with set_warn_always_context(True):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b * b
with warnings.catch_warnings(record=True) as ws:
c.backward(torch.ones_like(c), create_graph=True)
b.grad = None
self.assertTrue(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
# Should not warn for grad
with warnings.catch_warnings(record=True) as ws:
torch.autograd.grad(c, b, torch.ones_like(c), create_graph=True)
self.assertFalse(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, dtype=torch.double, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, dtype=torch.double, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10, dtype=torch.double) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
# TODO: opinfo this or move to the sparse test suite
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
def test_naughty_autograd_function_attribute_access(self):
class Id(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
return grad_x
with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"):
f = Id()
# # After raising warning, should still return an instance
self.assertIsInstance(f, Id)
x = torch.zeros(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"):
f(x)
t = Id.apply(x)
self.assertEqual(t.grad_fn.name(), "IdBackward")
# THPFunction is the base class of both grad_fn and autograd functions,
# which means that a lot of accessors on them may segfault. Test that we
# properly error in this case.
t = torch.ones(1, requires_grad=True)
t._backward_hooks = dict()
with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"):
f._register_hook_dict(t)
with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"):
f.register_hook(lambda x, y: None)
with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"):
f.next_functions
with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"):
f.name()
with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"):
f.metadata
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def test_detach_then_inplace_raises_in_autograd(self):
x = torch.randn([], requires_grad=True)
orig_x = x.detach().clone()
y = x ** 2 # saves x
z = x.detach()
z.zero_()
with self.assertRaisesRegex(RuntimeError, "has been modified by an inplace"):
y.backward()
def test_detach_disallows_metadata_change(self):
x = torch.randn([], requires_grad=True)
detached = x.detach()
with self.assertRaisesRegex(
RuntimeError, "not allowed on a Tensor created from .data or .detach()"):
detached.resize_(3, 3)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
q, p = Inplace.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, Inplace._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertEqual(r1, r2, rtol=0.01, atol=0.0)
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertEqual(input1.grad, input2.grad, rtol=0.01, atol=0.0)
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X, largest=largest)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
# TODO: review if this can be ported to OpInfos or moved to test_linalg.py
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = (A @ A.mT) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.mT)
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
# TODO: review porting these to OpInfo tests
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
names = ['aten::mul', 'aten::add']
found_indices = set()
for evt in p.function_events:
if evt.name in names:
found_indices.add(names.index(evt.name))
self.assertEqual(len(found_indices), len(names))
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
autograd_ops = {
("aten::add", "Add"): [],
("aten::sum", "Sum"): [],
}
accumulate_ops = []
found_empty = False
for e in p.function_events:
for (fwd_name, bwd_name), ops in autograd_ops.items():
if e.name == fwd_name or (bwd_name in e.name and "Backward" in e.name):
ops.append(e)
if "AccumulateGrad" in e.name:
accumulate_ops.append(e)
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
for (fwd_name, bwd_name), ops in autograd_ops.items():
self.assertEqual(len(ops), 3)
self.assertEqual(ops[0].name, fwd_name)
self.assertEqual(ops[1].name, f"autograd::engine::evaluate_function: {bwd_name}Backward0")
self.assertEqual(ops[2].name, f"{bwd_name}Backward0")
self.assertGreaterEqual(ops[0].sequence_nr, 0)
self.assertEqual(ops[1].sequence_nr, ops[0].sequence_nr)
self.assertEqual(ops[2].sequence_nr, ops[0].sequence_nr)
self.assertEqual(ops[0].fwd_thread, 0)
self.assertEqual(ops[1].fwd_thread, ops[0].thread)
self.assertEqual(ops[2].fwd_thread, ops[0].thread)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_record_function_new_signatures(self):
# Test the new _record_function ops work
# Note: Remove once record_function uses these directly
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
record = torch.ops.profiler._record_function_enter_new("bar", None)
try:
y = x * 2 + 4
finally:
torch.ops.profiler._record_function_exit(record)
function_events = p.function_events
foo_event = [event for event in function_events if "bar" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
linear_expected_shapes = [
[[128, 20], [30, 20], [30]],
[[128, 30], [40, 30], [40]],
]
found_indices = set()
for event in prof.function_events:
if event.name == "aten::linear":
self.assertTrue(event.input_shapes in linear_expected_shapes)
found_indices.add(linear_expected_shapes.index(event.input_shapes))
self.assertEqual(len(found_indices), len(linear_expected_shapes))
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
_format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
imag_key = 'imag'
self.assertRaises(RuntimeError, lambda: hasattr(x, imag_key))
self.assertTrue(hasattr(y, imag_key))
keys.remove(imag_key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_inplace_on_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_on_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_on_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward0")
def test_inplace_on_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
def test_out_variant_raises_when_inputs_require_grad(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# TODO: see if this test can be OpInfo'd or moved to diagonal's test suite
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_calculate_shape_util(self):
out = torch.randn(10, 5, requires_grad=True)
grad = torch.randn(5, 10, requires_grad=True)
out_shape, grad_shape = _calculate_shape(out, grad, False)
assert out_shape == torch.Size([10, 5])
assert grad_shape == torch.Size([5, 10])
out = torch.nested_tensor([
torch.randn(10, 5, requires_grad=True),
torch.randn(10, 5, requires_grad=True),
torch.randn(10, 5, requires_grad=True)]
)
grad = torch.nested_tensor([torch.randn(5, 10, requires_grad=True), torch.randn(5, 10, requires_grad=True)])
out_shape, grad_shape = _calculate_shape(out, grad, False)
assert torch.equal(out_shape, torch.tensor([[10, 5], [10, 5], [10, 5]]))
assert torch.equal(grad_shape, torch.tensor([[5, 10], [5, 10]]))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
# TODO: update these tests to use the linalg module and move to test_linalg.py
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def check(fast_mode):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_sparse_input(self):
def check(fast_mode):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.expectedFailure
def test_gradcheck_sparse_csr_input(self):
def check(fast_mode):
def fn(sparse_csr):
return torch.clone(sparse_csr).to_dense()
# Fails because gradcheck can't work with sparse csr inputs yet
gradcheck(fn, torch.rand(2, 2, dtype=torch.double).to_sparse_csr().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(2, 2, dtype=torch.double).to_sparse_csr().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
# check(fast_mode=True) # Segmentation fault
check(fast_mode=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
def check(fast_mode):
inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_validates_inputs(self):
def check(fast_mode):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False,
fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_validates_input_mkldnn(self):
# when mkldnn inputs, forward mode testing is not allowed
# Update tolerances below to make sure the gradient match even in single precision floats
# Use the warning assert to hide the float32 warning
x = torch.ones(1).to_mkldnn().requires_grad_()
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=False, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=True, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
def check(fast_mode):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False,
fast_mode=fast_mode)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_no_differentiable_outputs(self):
def check(fast_mode):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode))
# succeed when no outputs at all
self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_batched_grad(self):
def check(fast_mode):
x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def check(fast_mode):
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_undefined_grad(self):
def check(fast_mode):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_jacobian_mismatch(self):
def check(fast_mode):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn2, (x,), fast_mode=False)
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn3, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_dense_and_sparse_inputs(self):
def check(fast_mode):
def fn(x, y):
return x * y.coalesce().to_dense()
a = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_multiple_mkldnn_inputs(self):
def check(fast_mode):
def fn(x, y):
return x + y.to_dense()
a = torch.rand(10, requires_grad=True)
b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
def fn2(x, y):
return x.to_dense() + y.to_dense()
c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_output_shape_or_dtype_depend_on_values(self):
def check(fast_mode):
def fn(x):
if torch.all(x >= 1):
return torch.cat([x, x])
else:
return x
a = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'):
self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode))
def fn2(x):
if torch.all(x >= 1):
return x.to(torch.float32)
else:
return x
with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'):
self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_complex_non_complex_outputs(self):
def fn(x, y):
z = torch.complex(x, y)
return z, x + 1
a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
self.assertTrue(gradcheck(fn, (a, b)))
def fn2(z):
return z, torch.real(z)
c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
self.assertTrue(gradcheck(fn2, (c)))
def test_gradcheck_get_numerical_jacobian(self):
# get_numerical_jacobian is deprecated and no longer used internally by gradcheck
from torch.autograd.gradcheck import get_numerical_jacobian
def fn(inputs):
# get_numerical_jacobian requires fn to take inputs as a tuple
# and returns the jacobian wrt the first output
x = inputs[0]
y = inputs[1]
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double))
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0)
def test_gradcheck_get_analytical_jacobian(self):
from torch.autograd.gradcheck import get_analytical_jacobian
def fn(x, y):
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
outputs = fn(a, b)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0])
self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double))
self.assertTrue(reentrant)
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
outputs = NonDetFunc.apply(a, 1e-6)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs)
self.assertFalse(reentrant)
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0)
def test_gradcheck_custom_error(self):
from torch.autograd.gradcheck import GradcheckError
def check(fast_mode):
def fn(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
def fn2(x):
raise RuntimeError("Not a GradcheckError!")
# Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck
with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"):
gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_forward_ad(self):
def fn(x, y):
return x + y, y
def bad_fn(x, y):
# Hacky way to check if we're currently inside a forward ad level
is_running_forward_ad = fwAD._current_level >= 0
if is_running_forward_ad:
y_p, y_d = fwAD.unpack_dual(y)
y = fwAD.make_dual(y_p, y_d * 1.1)
return x + y, y
err_msg = "Jacobian computed with forward mode mismatch for output 0 with respect to input 1"
for fast_mode in [True, False]:
# Test for all inputs and outputs being real
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def basic_mul(x):
return torch.view_as_real(torch.resolve_conj(x * 1j))
gradcheck(basic_mul, x, check_forward_ad=True, fast_mode=fast_mode)
# Test for one input and one output being complex
x = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
# Test for all inputs and outputs being complex
y = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def test_gradcheck_forward_ad_runs_with_no_requires_grad(self):
# Currently requires_grad is used as a easy way for gradcheck to know
# which inputs of the function are meant to be differentiable
# This test checks that when the inputs are passed to the function they should not have
# requires_grad=True even though they may have requires_grad=True when passed
# to gradcheck
class UserFn(Function):
@staticmethod
def forward(ctx, x, y):
if fwAD._current_level >= 0:
self.assertFalse(x.requires_grad)
self.assertFalse(y.requires_grad)
return x.clone(), y.clone()
@staticmethod
def jvp(ctx, x_t, y_t):
return x_t, y_t
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=False, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
def test_gradcheck_forward_ad_respects_requires_grad(self):
# Currently requires_grad is used as a easy way for gradcheck to know
# which inputs of the function are meant to be differentiable
jvp_count = [0]
class UserFn(Function):
@staticmethod
def forward(ctx, x, y):
return x.clone(), y.clone()
@staticmethod
def jvp(ctx, x_t, y_t):
jvp_count[0] += 1
return x_t, y_t
# NB: In slow gradcheck we need to loop through numel times so use numel = 1 to ensure
# that fast and slow have the same counts
x = torch.rand(1, dtype=torch.double, requires_grad=True)
y = torch.rand(1, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=False, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
self.assertEqual(jvp_count[0], 2) # (2) once per input
jvp_count = [0]
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
self.assertEqual(jvp_count[0], 6) # (+4): (once with normal ZT (+1), once with efficient ZT (+1)) for each input (x2)
jvp_count = [0]
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
self.assertEqual(jvp_count[0], 12) # (+6): (compute batch of 2 with vmap (+1), with a loop (+2)) for each input (x2)
jvp_count = [0]
# Repeat the previous test except we mark one input with requires_grad=False
# NB: _test_undefined_forward_mode is only (+1), when function has single differentiable input, not (+2)!
# Otherwise, other counts are halved.
x = torch.rand(1, dtype=torch.double, requires_grad=True)
y = torch.rand(1, dtype=torch.double, requires_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
self.assertEqual(jvp_count[0], 5) # 1 + 1 + 3
def test_gradcheck_check_forward_or_backward_only(self):
"""Depending on settings for check_forward_ad and check_backward_ad, the
correct codepaths should be reached (or not reached)
"""
fwd_fail_err_msg = "FAIL FWD"
bwd_fail_err_msg = "FAIL BWD"
class UserFn(Function):
@staticmethod
def forward(ctx, foo, fwd_bad, bwd_bad):
ctx.fwd_bad = fwd_bad
ctx.bwd_bad = bwd_bad
return foo * 2
@staticmethod
def vjp(ctx, gO):
if ctx.bwd_bad:
raise RuntimeError(bwd_fail_err_msg)
else:
return 2 * gO, None, None
@staticmethod
def jvp(ctx, gI, _1, _2):
if ctx.fwd_bad:
raise RuntimeError(fwd_fail_err_msg)
else:
return 2 * gI
for fast_mode in (True, False):
for check_forward_ad in (True, False):
for check_backward_ad in (True, False):
for fwd_bad in (True, False):
for bwd_bad in (True, False):
fwd_should_fail = fwd_bad and check_forward_ad
bwd_should_fail = bwd_bad and check_backward_ad
def run():
gradcheck(UserFn.apply, (x, fwd_bad, bwd_bad), check_forward_ad=check_forward_ad,
check_backward_ad=check_backward_ad, check_undefined_grad=check_backward_ad,
check_batched_grad=check_backward_ad, fast_mode=fast_mode)
x = torch.rand(2, dtype=torch.double, requires_grad=True)
if not check_forward_ad and not check_backward_ad:
with self.assertRaisesRegex(AssertionError, "Expected at least one of"):
run()
continue
if not fwd_should_fail and not bwd_should_fail:
run()
else:
# If both fail, backward AD failure "hides" forward AD failure
if fwd_should_fail:
fail_msg = fwd_fail_err_msg
if bwd_should_fail:
fail_msg = bwd_fail_err_msg
with self.assertRaisesRegex(RuntimeError, fail_msg):
run()
def test_gradcheck_forward_ad_batched_grad(self):
x = torch.rand(2, dtype=torch.double, requires_grad=True)
# multiple inputs and outputs with non-tensors inputs
def fn1(a: torch.Tensor, b: int):
return a.clone(), a + 1
gradcheck(fn1, (x, 1), check_forward_ad=True, check_backward_ad=False, check_batched_grad=False,
check_undefined_grad=False, check_batched_forward_grad=True)
# unrelated inputs: tangent for c is None
def fn2(a: torch.Tensor, c: torch.Tensor):
return a.clone()
gradcheck(fn2, (x, x.clone()), check_forward_ad=True, check_backward_ad=False, check_batched_grad=False,
check_undefined_grad=False, check_batched_forward_grad=True)
class Fn(Function):
@staticmethod
def forward(ctx, foo):
return foo * 2
@staticmethod
def vjp(ctx, gO):
return gO * 2
@staticmethod
def jvp(ctx, gI):
torch.randn_like(gI)
return gI * 2
msg = "vmap: We do not yet support calling random operations inside of vmap"
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(Fn.apply, (x,), check_forward_ad=True, check_batched_forward_grad=True)
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
def test_set_data_self_requires_grad(self):
a = torch.tensor(1.0, requires_grad=True)
b = torch.tensor(2.0)
c = torch.tensor(3, dtype=torch.int64)
a.data = b
with self.assertRaisesRegex(RuntimeError, 'must be floating point or complex dtype'):
a.data = c
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
# The autograd engine creates worker threads only when GPU devices are present.
# So make sure that we do shutdown threads when we're testing cuda and make sure
# that there is no thread to shutdown when we're not using cuda.
if TEST_CUDA or torch.backends.mps.is_available():
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
else:
self.assertNotRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
def _test_checkpointing_non_reentrant_autocast(self, device_type):
for enabled in [True, False]:
def foo(x, y, z):
# torch.mm is on autocast's list of ops that should run in
# the autocast precision
x = torch.mm(x, y)
y = torch.mm(x, z)
z = torch.mm(z, z)
expected_dtype = (
torch.float32 if not enabled else torch.bfloat16
)
self.assertEqual(expected_dtype, z.dtype)
return z
x = torch.randn(3, 3, requires_grad=True)
y = torch.randn(3, 3, requires_grad=True)
z = torch.randn(3, 3, requires_grad=True)
if device_type == 'cuda':
x = x.cuda()
y = y.cuda()
z = z.cuda()
with torch.autocast(enabled=enabled, device_type=device_type, dtype=torch.bfloat16):
loss = checkpoint(foo, x, y, z, use_reentrant=False)
loss = loss.sum()
# Without saving + recasting the autocast type, would raise error in autograd
# about mismatched dtypes.
loss.backward() # triggers recomputation to check it runs in bfloat
def test_checkpointing_non_reentrant_autocast_cpu(self):
"""
Test that autocast args such as the dtype are preserved during non-reentrant
checkpoint recomputation on CPU.
"""
self._test_checkpointing_non_reentrant_autocast(device_type='cpu')
@unittest.skipIf(
not torch.cuda.is_available() or not torch.cuda.is_bf16_supported(),
"Test requires CUDA bf16 support"
)
def test_checkpointing_non_reentrant_autocast_gpu(self):
"""
Test that autocast args/kwargs such as the dtype are preserved during
non-reentrant checkpoint recomputation on GPU.
"""
self._test_checkpointing_non_reentrant_autocast(device_type='cuda')
@unittest.skipIf(not torch.cuda.is_available(), "Test requires CUDA")
@slowTest
def test_checkpointing_without_reentrant_memory_savings(self):
class MyModel(nn.Module):
def __init__(self, n, use_checkpoint, use_reentrant):
super().__init__()
self.n = n
self.use_checkpoint = use_checkpoint
self.use_reentrant = use_reentrant
self.layers = nn.ModuleList()
for i in range(self.n):
layer = nn.Sequential(
nn.Linear(256, 256), nn.Linear(256, 256), nn.Linear(256, 256)
)
self.layers.append(layer)
# pre-allocate the grad so that increased memory usage is mainly
# due to activations.
for layer in self.layers:
for lin in layer:
lin.weight.grad = torch.ones_like(lin.weight)
lin.bias.grad = torch.ones_like(lin.bias)
def forward(self, x):
for i in range(self.n):
if not self.use_checkpoint:
x = self.layers[i](x)
else:
x = checkpoint(self.layers[i], x, use_reentrant=self.use_reentrant)
return x
model_no_checkpoint = MyModel(8, use_checkpoint=False, use_reentrant=False).cuda()
model_reentrant_checkpoint = MyModel(8, use_checkpoint=True, use_reentrant=True).cuda()
model_no_reentrant_checkpoint = MyModel(8, use_checkpoint=True, use_reentrant=False).cuda()
x = torch.randn(100, 256, requires_grad=True, device='cuda')
torch.cuda.reset_peak_memory_stats()
loss = model_no_checkpoint(x.clone()).sum()
loss.backward()
mem_no_checkpoint = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
loss = model_reentrant_checkpoint(x.clone()).sum()
loss.backward()
mem_reentrant_checkpoint = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
loss = model_no_reentrant_checkpoint(x.clone()).sum()
loss.backward()
mem_no_reentrant_checkpoint = torch.cuda.max_memory_allocated()
self.assertTrue(mem_reentrant_checkpoint < mem_no_checkpoint)
self.assertTrue(mem_no_reentrant_checkpoint < mem_no_checkpoint)
def test_checkpointing_without_reentrant_custom_function_works(self):
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y, z):
w = x * y * z
out = w + w
ctx.save_for_backward(x, y, z, w, out)
return out
@staticmethod
def backward(ctx, grad_out):
x, y, z, w, out = ctx.saved_tensors
# Accessing the saved Tensors a second time is fine
# as they get cleared only when the SavedVariable
# get cleared which happens after this function returns
x_2, y_2, z_2, w_2, out_2 = ctx.saved_tensors
return x, y, z
x = torch.tensor(1., requires_grad=True)
y = torch.tensor(2., requires_grad=True)
z = torch.tensor(3., requires_grad=True)
def foo(x, y, z):
x = x * y * z
y = y * y * z
z = z * z
out = MyFunc.apply(x, y, z)
return out
out = checkpoint(foo, x, y, z, use_reentrant=False)
out.sum().backward()
def test_access_saved_tensor_twice_without_recomputation_works(self):
def foo(a):
b = a * a
c = a * b
d = torch.exp(a)
return d
a = torch.randn(5, requires_grad=True)
d = checkpoint(foo, a, use_reentrant=False)
# First access
d.grad_fn._saved_result
# Second access still works as the saved variable was not cleared
d.grad_fn._saved_result
# Backward clears the saved variable
d.sum().backward()
# Now it raises an error
with self.assertRaisesRegex(
RuntimeError,
"or directly access saved tensors after they have already been freed"
):
d.grad_fn._saved_result
@slowTest
@parametrize("input_requires_grad", [True, False])
def test_checkpointing_without_reentrant(self, input_requires_grad):
"""
Basic test for checkpoint without reentrant autograd.
"""
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
# Module holder for testing activation checkpointing with no_reentrant
# supports kwargs.
class MyModule(nn.Module):
def __init__(self, mod):
super().__init__()
self.module = mod
def forward(self, data):
return self.module(data)
module = MyModule(mod=module)
# Run model with and without checkpointing and verify gradients are
# equivalent, regardless of if inputs require grads or not.
module_copy = deepcopy(module)
feat_combined = []
feat_combined_no_checkpoint = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = input_requires_grad
data_r_copy = data_r.clone()
feat_r = checkpoint(module, data=data_r, use_reentrant=False)
feat_combined.append(feat_r)
feat_r_no_checkpoint = module_copy(data_r)
feat_combined_no_checkpoint.append(feat_r_no_checkpoint)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
mean_combined_no_checkpoint = torch.stack(feat_combined_no_checkpoint).mean()
mean_combined_no_checkpoint.backward()
for checkpoint_param, param in zip(module.parameters(), module_copy.parameters()):
self.assertEqual(checkpoint_param.grad, param.grad)
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
@parametrize("use_reentrant", [True, False])
def test_checkpointing_without_reentrant_detached_tensor(self, use_reentrant):
class NoGradModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
self.lin2 = nn.Linear(2, 2, bias=False)
def forward(self, x):
with torch.no_grad():
return self.lin2(self.linear(x))
module = NoGradModule()
err_ctx = (
self.assertRaisesRegex(
RuntimeError,
"none of output has requires_grad=True"
)
if use_reentrant
else contextlib.suppress()
)
a = torch.randn(2, 2, requires_grad=True)
for _ in range(3):
with err_ctx:
# out does not require grad
out = checkpoint(module, a, use_reentrant=use_reentrant)
# Make loss require grad, otherwise we would run into
# "element 0 of tensors does not require grad and does not have a grad_fn"
out += a
out.sum().backward()
def test_checkpointing_without_reentrant_correct_grad(self):
"""
Verifies that correct gradients are calculated for checkpoint
without reentrant autograd, for both backward() and autograd.grad().
"""
a = torch.randn(2, 2, requires_grad=True)
b = torch.exp(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
c = checkpoint(torch.exp, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
a.grad = None
d = checkpoint(torch.exp, a, use_reentrant=False).sum()
d_grad, = torch.autograd.grad(d, (a,))
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)
def test_checkpointing_without_reentrant_dataparallel(self):
"""
Verifies gradient correctness when checkpoint without reentrant autograd
is used in conjunction with DataParallel.
"""
class LinearModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
def forward(self, inp):
return self.linear(inp)
a = torch.randn(2, 2, requires_grad=True)
if torch.cuda.is_available():
a = a.cuda()
model = LinearModule()
if torch.cuda.is_available():
model = model.cuda()
b = deepcopy(model)(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
module = torch.nn.DataParallel(deepcopy(model))
c = checkpoint(module, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
self.assertEqual(b_grad, c_grad)
def test_checkpointing_without_reentrant_parameter_used_in_an_out(self):
"""
Ensures that gradient hooks are only called once per tensor.
"""
w = torch.randn(10, 10, requires_grad=True)
count = 0
def hook(grad):
nonlocal count
count += 1
w.register_hook(hook)
x = torch.rand(10, 10, requires_grad=True)
h = w * x # Using w outside the checkpoint
out = checkpoint(lambda x: w * x, h, use_reentrant=False) # Using w inside the checkpoint
out.sum().backward()
# should only call hook once
self.assertEqual(count, 1)
def test_checkpointing_without_reentrant_arbitrary_input_output(self):
"""
Ensures checkpointing without reentrant autograd works with functions
with arbitrary input/output structures.
"""
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(5, 5, bias=False)
def forward(self, dict_input):
tensor = dict_input["tensor"]
return {
"result": self.layer(tensor)
}
model_no_checkpoint = MyModel()
model_checkpoint_without_reentrant = deepcopy(model_no_checkpoint)
inp = {
"tensor": torch.randn(5, 5)
}
out_no_checkpoint = model_no_checkpoint(inp)["result"].sum()
out_checkpoint = checkpoint(
model_checkpoint_without_reentrant,
inp,
use_reentrant=False
)["result"].sum()
self.assertEqual(out_checkpoint, out_no_checkpoint)
out_no_checkpoint.backward()
out_checkpoint.backward()
for param, checkpoint_param in zip(model_no_checkpoint.parameters(), model_checkpoint_without_reentrant.parameters()):
self.assertEqual(param.grad, checkpoint_param.grad)
def test_callback_adds_callback(self):
called = [0]
def callback_final():
called[0] += 1
def callback_adds_callback():
called[0] += 1
Variable._execution_engine.queue_callback(callback_final)
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, grad):
Variable._execution_engine.queue_callback(callback_adds_callback)
return grad
a = torch.rand((3, 3), requires_grad=True)
b = MyFunc.apply(a)
b.sum().backward()
self.assertEqual(called[0], 2)
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_tensors[0], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
out.grad_fn._raw_saved_tensors[0].register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._raw_saved_tensors
self.assertEqual(out.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_indices[1], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
out.grad_fn._raw_saved_indices[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
out.grad_fn._raw_saved_indices[0].register_hooks(lambda x: x, lambda x: x)
a = torch.ones(2, 2, requires_grad=True)
out = a * a
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after it has been freed"):
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
if hasattr(out.grad_fn, '_saved_scale_factors'):
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
else:
self.assertIsNone(out.grad_fn._saved_scales) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str?
out = torch.div(a, 2., rounding_mode=None)
self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
def test_cant_create_saved_tensors(self):
with self.assertRaisesRegex(RuntimeError, "Trying to create a SavedTensor object from Python is forbidden"):
torch.autograd.SavedTensor()
def test_custom_function_saved_tensors(self):
def getFn(save=True):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
if save:
ctx.save_for_backward(x, None)
return x
@staticmethod
def backward(ctx, g):
return g
return MyFn
a = torch.randn(5, requires_grad=True)
y = getFn(True).apply(a)
self.assertEqual((a, None), y.grad_fn.saved_tensors)
saved = y.grad_fn._raw_saved_tensors
self.assertIsInstance(saved[0], torch._C._autograd.SavedTensor)
# We can't tell the underlying tensor is None without unpacking it
self.assertIsInstance(saved[1], torch._C._autograd.SavedTensor)
# We catch that error when the user calls register_hooks on it
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
saved[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(1, 1)
saved[0].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "already been set"):
saved[0].register_hooks(lambda x: x, lambda x: x)
y.sum().backward()
# Using a reference to the SavedTensor object after the
# saved variables have been released can lead to undefined behavior
del saved
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn._raw_saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn.saved_tensors
y = getFn(False).apply(a)
self.assertEqual(y.grad_fn.saved_tensors, ())
self.assertEqual(y.grad_fn._raw_saved_tensors, ())
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward0 is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
fn_id_to_inplace_on_view_err_msg = {
"one_output": ("Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"),
"two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views."),
"view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function")
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_on_view_err_msg[fn_id]
if not inplace or not output_is_a_view:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
if inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
if output_is_a_view and inplace:
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_inplace_views_creation_meta(self):
# Tests creation_meta properly handled for inplace views
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, x):
return x
view_custom = Func.apply
def run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2):
# This test checks the behavior of inplace-view functions when
# the views are created in grad mode or not
base = torch.rand(2, 3, requires_grad=requires_grad).clone()
# 1. Create a view with `grad_mode=grad_mode_view`
with torch.set_grad_enabled(grad_mode_view):
if fn_type == "multi_view":
inp = base.unbind()[0]
elif fn_type == "custom" :
inp = view_custom(base)
else:
inp = base.view_as(base)
# 2. Perform inplace view with `grad_mode=grad_mode_iview`
with torch.set_grad_enabled(grad_mode_iview):
if error1 is not None:
with self.assertRaisesRegex(RuntimeError, error1):
fn(inp)
return
else:
# If error is None, check that runs without error
fn(inp)
# 3. Do inplace on the (new) view
if error2 is not None:
with self.assertRaisesRegex(RuntimeError, error2):
inp.add_(1)
else:
# If error is None, check that runs without error
inp.add_(1)
no_grad_err = "A view was created in no_grad mode"
multi_view_err = "function that returns multiple views"
custom_err = "view was created inside a custom Function"
def run_tests(fn):
for fn_type in ("normal", "multi_view", "custom"):
for grad_mode_view in (True, False):
for grad_mode_iview in (True, False):
for requires_grad in (True, False):
error1 = None # expected error when we do inplace_view on original view
error2 = None # expected error when we do inplace on the resulting view
if requires_grad:
if not grad_mode_view and grad_mode_iview:
error1 = no_grad_err
if not grad_mode_view and not grad_mode_iview:
error2 = no_grad_err
if fn_type == "multi_view":
if grad_mode_view and grad_mode_iview:
error1 = multi_view_err
if grad_mode_view and not grad_mode_iview:
error2 = multi_view_err
if fn_type == "custom":
if grad_mode_view and grad_mode_iview:
error1 = custom_err
if grad_mode_view and not grad_mode_iview:
error2 = custom_err
run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2)
# This list was created by logging gen_inplace_or_view_type.py
# detach_ is excluded for this test because it cannot be applied to
# views and thus does not return a view
run_tests(lambda v: v.as_strided_((1, 0), (2, 2)))
run_tests(lambda v: v.transpose_(0, 0))
run_tests(lambda v: v.t_())
run_tests(lambda v: v.squeeze_(0))
run_tests(lambda v: v.unsqueeze_(0))
run_tests(lambda v: v.swapdims_(0, 0))
run_tests(lambda v: v.swapaxes_(0, 0))
# TODO This is not the correct behavior -
# See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627
def test_autograd_inplace_views_cross_dtype(self):
# This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b = b.transpose(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
non_inplace_grad = a_orig.grad
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b.transpose_(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
inplace_grad = a_orig.grad
# TODO: this is a bug!
# once this is fixed, it should have the transpose removed:
# self.assertEqual(non_inplace_grad, inplace_grad)
self.assertEqual(non_inplace_grad.T, inplace_grad)
def test_autograd_multiple_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace"):
out += 1
def test_autograd_python_custom_function_inplace(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
class BadBw2(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
@staticmethod
def backward(ctx, foo):
return foo
@staticmethod
def vjp(ctx, foo):
return foo
class BadJvp(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement either the backward"):
BadBw.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "Implementing both 'backward' and 'vjp'"):
BadBw2.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "must implement the jvp function"):
with fwAD.dual_level():
d = fwAD.make_dual(inp, torch.rand_like(inp))
res = BadJvp.apply(d)
def test_custom_function_forward_mode_view_checks(self):
flag_to_error = {
"ok": None,
"not_a_view": "jvp is not returning a view",
"not_a_view_of_inp": "jvp is not returning a view of the given",
"not_a_view_of_inp_base": "jvp is not returning a view of the same base",
}
class ViewFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.flag = flag
ctx.size = foo.size()
return foo.narrow(0, 0, 2)
@staticmethod
def vjp(ctx, gO):
gI = gO.new_zeros(ctx.size)
gI.narrow(0, 0, 2).copy_(gO)
return gI, None
@staticmethod
def jvp(ctx, gI, _):
res = gI.narrow(0, 0, 2)
if ctx.flag != "ok":
# Break the view in the gradients!
res = res.clone()
if ctx.flag in ["not_a_view_of_inp", "not_a_view_of_inp_base"]:
# Result should be a view, just of the wrong thing
res = res.view_as(res)
return res
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
for flag, msg in flag_to_error.items():
def test_fn(inp):
if flag == "not_a_view_of_inp_base":
inp = inp.view_as(inp)
return ViewFn.apply(inp, flag)
if msg is None:
gradcheck(test_fn, inp, check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(test_fn, inp, check_forward_ad=True)
def test_custom_function_forward_mode_inplace_checks(self):
class InplaceFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.mark_dirty(foo)
ctx.flag = flag
foo.mul_(2)
return foo
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.flag:
# Don't do the change inplace
return 2 * gI
else:
gI.mul_(2)
return gI
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
def test_fn(inp, flag):
inp = inp.clone()
return InplaceFn.apply(inp, flag)
gradcheck(test_fn, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "inplace custom Function is not modifying the forward mode gradients inplace"):
gradcheck(test_fn, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_wrong_formula(self):
class UserFn(Function):
@staticmethod
def forward(ctx, foo, should_fail):
ctx.should_fail = should_fail
return foo * 2
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.should_fail:
# Wrong gradient formula
return 3 * gI
else:
return 2 * gI
inp = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "Jacobian computed with forward mode mismatch for output 0"):
gradcheck(UserFn.apply, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_non_tensor_before_tensor_args(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, nt, x, nt2, y):
return x * 2 + y * 3
@staticmethod
def jvp(ctx, nt, x_t, nt2, y_t):
self.assertIsNone(nt)
self.assertIsNone(nt2)
return x_t * 2 + y_t * 3
x = torch.tensor(1., dtype=torch.double)
t = torch.tensor(1., dtype=torch.double)
y = torch.tensor(1., dtype=torch.double)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, t)
MyFn.apply(1, dual_x, 1, y)
gradcheck(MyFn.apply, (1, x.requires_grad_(True), 1, y.requires_grad_(True)), check_forward_ad=True,
check_backward_ad=False, check_batched_grad=False)
def test_custom_function_forward_mode_forward_is_no_op(self):
error_regex = "A custom Function's forward is returning a view \\(or an input as-is\\)"
return_lambdas = {
# If we return an input as-is in forward, that is treated
# as if self.view_as(self) is performed. If jvp returns x.view_as(x),
# this is OK.
"view_as": lambda x: x.view_as(x),
# Expect this to raise an error
"self": lambda x: x,
# Expect this to raise the same error
"mul_by_2": lambda x: x * 2,
}
for k, fn in return_lambdas.items():
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
return x + y, x
@staticmethod
def vjp(ctx, gO1, gO2):
return gO1 + gO2, gO1
@staticmethod
def jvp(ctx, x_t, y_t):
return x_t + y_t, fn(x_t)
a = torch.tensor(1., dtype=torch.double, requires_grad=True)
t = torch.tensor(1., dtype=torch.double)
b = torch.tensor(1., dtype=torch.double, requires_grad=True)
c = torch.tensor(1., dtype=torch.double)
t2 = torch.tensor(1., dtype=torch.double)
d = torch.tensor(1., dtype=torch.double)
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
c_dual = fwAD.make_dual(c, t2)
if k == "view_as":
_, out2 = MyFn.apply(a_dual, b)
self.assertTrue(fwAD.unpack_dual(out2).tangent._base is t)
_, out2 = MyFn.apply(c_dual, d)
self.assertTrue(fwAD.unpack_dual(out2).tangent._base is t2)
else:
with self.assertRaisesRegex(RuntimeError, error_regex):
MyFn.apply(a_dual, b)
with self.assertRaisesRegex(RuntimeError, error_regex):
MyFn.apply(c_dual, d)
if k == "view_as":
gradcheck(MyFn.apply, (a, c), check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, error_regex):
gradcheck(MyFn.apply, (a, c), check_forward_ad=True)
def test_custom_function_save_for_forward(self):
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
ctx.save_for_backward(x, y)
ctx.save_for_forward(x, y)
ctx.z = z
ctx.prod = x * y
return z * ctx.prod
@staticmethod
def jvp(ctx, x_t, y_t, _):
x_p, y_p = ctx.saved_tensors
z = ctx.z
return z * (y_p * x_t + x_p * y_t)
@staticmethod
def vjp(ctx, grad_out):
x, y = ctx.saved_tensors
z = ctx.z
return z * grad_out * y, z * grad_out * x, None
a = torch.tensor(1., requires_grad=True, dtype=torch.double)
t = torch.tensor(1., dtype=torch.double)
b = torch.tensor(2., requires_grad=True, dtype=torch.double)
c = 4
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
out = Func.apply(a_dual, b, c)
out.backward()
gradcheck(Func.apply, (a, b, c), check_forward_ad=True)
# When saved for backward, but not saved for forward
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
ctx.save_for_backward(x)
return x.clone()
@staticmethod
def jvp(ctx, x_t):
self.assertEqual(len(ctx.saved_tensors), 0)
return x_t
@staticmethod
def vjp(ctx, grad_out):
x, = ctx.saved_tensors
self.assertEqual(len(ctx.saved_tensors), 1)
return grad_out
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
out = Func.apply(a_dual)
out.backward()
gradcheck(Func.apply, (a,), check_forward_ad=True)
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def test_custom_function_cycle(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x, metadata):
x = x.clone()
ctx.meta = metadata
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
self.assertEqual(x, 3.14)
self.assertEqual(ctx.meta["foo"], 3.14)
return gO * x, None
def get_refs(with_backward):
a = torch.tensor(3.14, requires_grad=True)
metadata = {}
out = MyFn.apply(a, metadata)
metadata["foo"] = out
if with_backward:
out.sum().backward()
self.assertEqual(a.grad, a)
return torch._C._WeakTensorRef(out)
with disable_gc():
ref = get_refs(False)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
# The backward clears the saved_variables but not the __dict__
with disable_gc():
ref = get_refs(True)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
def test_create_graph_and_full_backward_hook_cycle(self):
# If BackwardHook saves grad_output, it can create a cycle when we perform backward
# with create_graph=True
#
# grad_output -> grad_output.grad_fn -> graph -> hook -> grad_output
#
class TestCls():
# Dummy class for the purpose of creating a weakref
pass
def get_ref(input_requires_grad, nb_hooks):
t = torch.randn(10, requires_grad=input_requires_grad)
a = torch.tensor(1., requires_grad=True)
class Test(nn.Module):
def forward(self, x):
return x ** 2 * a ** 2
mod = Test()
for _ in range(nb_hooks):
mod.register_full_backward_hook(lambda a, b, c: None)
tmp = mod(t)
# Save dummy object to graph and get a weak ref to it
test = TestCls()
ref = weakref.ref(test)
tmp.grad_fn.metadata["a"] = test
with set_warn_always_context(True):
with warnings.catch_warnings(record=True) as w:
tmp.exp().sum().backward(create_graph=True)
self.assertTrue(len(w) == 1)
self.assertTrue("Using backward() with create_graph=True" in str(w[0].message))
# Remove the backward + create_graph=True cycle
a.grad = None
t.grad = None
return ref
for nb_hooks in (1, 2, 3):
for input_requires_grad in (True, False):
ref_ = get_ref(
input_requires_grad=input_requires_grad,
nb_hooks=nb_hooks,
)
gc.collect()
self.assertIsNone(ref_())
def test_input_buffer_accum(self):
leaf = torch.rand(2, 2, requires_grad=True)
# An op that returns sparse gradients
ind = torch.tensor([[0, 0]], dtype=torch.long)
out2 = leaf.gather(0, ind, sparse_grad=True)
# An op that returns the gradients as-is
out1 = leaf.clone()
grad_out1_original = torch.rand_like(out1)
grad_out1 = grad_out1_original.clone()
grad_out2 = torch.rand_like(out2)
torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
# Given gradients should not be modified inplace
self.assertEqual(grad_out1, grad_out1_original)
def test_no_unnecessary_unwrapping(self):
a = torch.randn(5, requires_grad=True)
a_orig = a.detach().clone()
b = a * a
c = a * b
d = torch.exp(a)
# a is leaf
self.assertIs(b.grad_fn._saved_self, a)
self.assertIs(b.grad_fn._saved_other, a)
self.assertIs(c.grad_fn._saved_self, a)
# b is not an output
self.assertIs(c.grad_fn._saved_other, b)
# d is an output
self.assertEqual(d.grad_fn._saved_result, d)
self.assertIsNot(d.grad_fn._saved_result, d)
c.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
c.grad_fn._saved_self
# a is left untouched
self.assertEqual(a, a_orig)
def test_saved_variable_version_counter(self):
a = torch.rand(2, requires_grad=True)
b = torch.exp(a)
b_unpacked = b.grad_fn._saved_result
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
with torch.no_grad():
b += 1
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
def test_saved_variable_packing_unpacking_saved_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
def test(get_input, is_leaf):
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x / 2)
self.assertEqual(a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(2 * a, a.grad)
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x)
self.assertEqual(2 * a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(3 * a, a.grad)
# double backward
a = get_input()
grad_fn = a.grad_fn
y = a ** 3
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
g.sum().backward()
else:
g.sum().backward()
self.assertEqual(6 * a, a.grad)
a = get_input()
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: 1)
with self.assertRaisesRegex(TypeError, "Output of saved tensor unpack_hook expected to be a Tensor"):
print(y.grad_fn._saved_self)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: x, lambda x: x)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: (x, b), lambda x: x)
def inplace_double(x):
x *= 2
return x
a = get_input()
t = a * a
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
t.grad_fn._raw_saved_self.register_hooks(inplace_double, lambda x: x / 2)
# leaf
test(lambda: torch.randn(5, requires_grad=True), True)
# not leaf, not output
test(lambda: (1 + torch.randn(5, requires_grad=True)), False)
def test_saved_variable_packing_unpacking_did_not_save_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
y.grad_fn._raw_saved_result.register_hooks(lambda x: x, lambda x: x)
self.assertEqual(y, y.grad_fn._saved_result)
self.assertIs(y.grad_fn, y.grad_fn._saved_result.grad_fn)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_saved_variable_packing_unpacking_saved_original_with_default_hooks(self):
# Tests that default hooks are properly registered, used and reset
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
# See also:
# - test_saved_variable_packing_unpacking_saved_original_with_hooks
def pack(x):
warnings.warn("pack")
return x
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
a = torch.ones(5, requires_grad=True)
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as w:
y = a * a
# should raise two warnings from a being saved twice
self.assertEqual(len(w), 2)
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x / 2):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(2 * a, y.grad_fn._saved_self)
self.assertEqual(2 * a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(4 * a, a.grad)
# Exited hooks correctly
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_saved_variable_packing_unpacking_did_not_save_original_with_default_hooks(self):
# See also test_saved_variable_packing_unpacking_did_not_save_original_with_hooks
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
self.assertEqual(y, y.grad_fn._saved_result)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_setting_default_saved_variable_hooks_twice_should_not_fail(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
pass
def test_setting_default_saved_variable_hooks_twice_should_use_inner(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: 3 * x, lambda x: 3 * x):
b = torch.randn(5, requires_grad=True)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 5 * x, lambda x: 5 * x):
a = torch.randn(5, requires_grad=True)
y = a * a
z = b * b
y.sum().backward()
z.sum().backward()
self.assertEqual(2 * 5 * 5 * a, a.grad)
self.assertEqual(2 * 3 * 3 * b, b.grad)
def test_save_on_cpu_and_checkpoint(self):
a = torch.randn(2, 2, requires_grad=True)
b = a.pow(2).pow(2).pow(2).pow(2)
b.sum().backward()
b_grad = a.grad.clone()
a.grad.zero_()
with torch.autograd.graph.save_on_cpu():
h = a.pow(2)
h = checkpoint(lambda x: x.pow(2).pow(2), h, use_reentrant=False)
c = h.pow(2)
c.sum().backward()
c_grad = a.grad.clone()
a.grad.zero_()
def f(a):
h = a.pow(2)
with torch.autograd.graph.save_on_cpu():
h = h.pow(2).pow(2)
return h.pow(2)
d = checkpoint(f, a, use_reentrant=False)
d.sum().backward()
d_grad = a.grad.clone()
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)
def test_pack_hook_with_inplace_modification_should_fail(self):
a = torch.randn(5, requires_grad=True)
def inc(x):
x += 1
return x
with torch.autograd.graph.saved_tensors_hooks(inc, lambda x: x):
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y = torch.exp(a)
y = torch.exp(a)
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y.grad_fn._raw_saved_result.register_hooks(inc, lambda x: x)
def test_saving_variable_to_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
def pack(x):
name = os.path.join(tmp_dir, str(uuid.uuid4()))
torch.save(x, name)
return name
def unpack(name):
return torch.load(name)
with torch.autograd.graph.saved_tensors_hooks(pack, unpack):
a = torch.ones(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_default_saved_variable_hooks_double_backward(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
self.assertEqual(6 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 2 because only a is saved once
self.assertEqual(6 * 2 * a, a.grad)
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 4 because pow_backward is grad * (exp * self.pow(exp - 1))
# so grad is saved and self (i.e. a) is saved
self.assertEqual(6 * 4 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# combining the two above blocks: 2 * 4 = 8
# note that in that sense, a is saved twice
self.assertEqual(6 * 8 * a, a.grad)
def test_graph_save_on_cpu(self):
def test(get_input, cuda, pin_memory):
with torch.autograd.graph.save_on_cpu(pin_memory):
a = get_input()
if cuda:
a.cuda()
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
self.assertEqual(a.dtype, y.grad_fn._saved_self.dtype)
self.assertEqual(a.layout, y.grad_fn._saved_self.layout)
if y.is_sparse:
y = y.to_dense()
y.sum().backward()
actual = 2 * a
expected = a.grad
if a.is_sparse:
actual = actual.coalesce()
expected = expected.coalesce()
self.assertEqual(actual, expected)
for cuda in [False] + ([True] if torch.cuda.is_available() else []):
for pin_memory in [True, False]:
# FloatTensor
test(lambda: torch.randn(5, requires_grad=True), cuda, pin_memory)
# DoubleTensor
test(lambda: torch.randn(5, requires_grad=True, dtype=torch.double), cuda, pin_memory)
# Sparse tensor
x = torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.]), requires_grad=True)
test(lambda: x, cuda, pin_memory)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_graph_save_on_cpu_cuda(self):
def f(x):
a = x + 1
return a * a
# with grad
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_grad = torch.cuda.memory_allocated()
del a
del y
# without grad
a = torch.ones(1, requires_grad=True, device="cuda")
with torch.no_grad():
y = f(a)
memory_without_grad = torch.cuda.memory_allocated()
self.assertGreater(memory_with_grad, memory_without_grad)
del a
del y
# with hooks
with torch.autograd.graph.save_on_cpu():
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_hooks = torch.cuda.memory_allocated()
self.assertEqual(memory_with_hooks, memory_without_grad)
def test_pynode_destruction_deadlock(self):
script = """
import torch
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def forward(ctx, gO):
return gO.clone()
def get_out():
inp = torch.rand(2, requires_grad=True)
# The python function is first so that it runs
# last in the backward pass
right = Foo.apply(inp)
# An op that creates new memory
left1 = inp.clone()
# An op that saves its input
left2 = left1 ** 2
# Inplace modify so that the backward for
# left2 always raises an error
left1 += 1
# An op that takes both side as input.
# After running, both side's last op will be in
# the ready queue
# And the op for left will run first as it was
# executed last during the forward
out = left2 + right
return out
# Nothing should be global variables here as, from what
# I can see, python leaks all the global objects
get_out().sum().backward()
# This used to deadlock when the PyNode is being destroyed after
# the error is raised.
"""
try:
subprocess.check_output(
[sys.executable, '-c', script],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),
# It is ok to have an extra long timeout here as a timeout means the test failed
timeout=20)
except subprocess.TimeoutExpired as e:
self.fail(msg="Example code timed out! See the code sample in the test for details.")
except subprocess.CalledProcessError as e:
err_msg = "RuntimeError: one of the variables needed for gradient computation"
self.assertTrue(err_msg in e.output.decode("utf-8"))
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
class TestAutogradForwardModeBatchedGrad(TestCase):
def test_out_of_place_basic(self):
a = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
b = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
self.assertTrue(gradcheck(torch.sin, a, check_forward_ad=True, check_batched_grad=True,
check_batched_forward_grad=True))
self.assertTrue(gradcheck(torch.add, (a, b), check_forward_ad=True, check_batched_grad=True,
check_batched_forward_grad=True))
def test_out_of_place_not_same_layout(self):
input = torch.zeros([2, 2]).transpose(0, 1)
tangent = torch.zeros([2, 2, 2])
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
return fwAD.unpack_dual(x)[1]
x_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertIsNot(x_tangent, tangent)
def test_inplace_on_view_same_layout(self):
input = torch.zeros([2, 2])
tangent = torch.zeros([2, 2, 2])
base = torch.zeros([2, 2])
view = base.view_as(base)
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
view.copy_(x)
return fwAD.unpack_dual(x)[1], fwAD.unpack_dual(view)[1], fwAD.unpack_dual(view._base)[1]
x_tangent, view_tangent, base_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertFalse(view_tangent._is_view()) # Optimization to share the same tensor!
self.assertIs(view_tangent, base_tangent)
self.assertIs(x_tangent, tangent)
def test_inplace_on_view_not_same_layout(self):
input = torch.zeros([2, 2])
tangent = torch.zeros([2, 2, 2])
view = torch.zeros([2, 2]).transpose(0, 1)
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
view.copy_(x)
return fwAD.unpack_dual(x)[1], fwAD.unpack_dual(view)[1], fwAD.unpack_dual(view._base)[1]
x_tangent, view_tangent, base_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertIs(view_tangent._base, base_tangent)
self.assertIs(x_tangent, tangent)
self.assertIsNot(view_tangent, tangent)
def test_metadata_check_for_storage_numel_skipped(self):
# See: test_metadata_check_checks_storage_numel for the reverse of this test
primal = torch.randn(5)[:4].detach()
self.assertEqual(len(primal.storage()), 5)
tangent = torch.randn(10, 4)
def jvp(tangent):
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
_, unpacked_tangent = fwAD.unpack_dual(dual)
# No copy is made
self.assertIs(tangent, unpacked_tangent)
# as_strided raises
with self.assertRaisesRegex(RuntimeError, "can access memory outside of `tensor`"):
dual.as_strided((5,), (1,), 0)
return unpacked_tangent
torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
class TestAutogradForwardMode(TestCase):
def tearDown(self):
# Ensure that a failing test won't make others fail
while fwAD._current_level >= 0:
fwAD.exit_dual_level()
super().tearDown()
def test_forward_level_cleanup(self):
def get_tensor_and_weak_ref():
# Create a new Tensor and weak reference
t = torch.rand(2, requires_grad=True)
return t, torch._C._WeakTensorRef(t)
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertFalse(tangent_ref.expired())
dual = fwAD.make_dual(foo, tangent)
self.assertFalse(tangent_ref.expired())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertFalse(tangent_ref.expired())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertTrue(tangent_ref.expired())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
def test_metadata_check_checks_storage_numel(self):
primal = torch.randn(5)[:4].detach()
self.assertEqual(len(primal.storage()), 5)
tangent = torch.randn(4)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
_, unpacked_tangent = fwAD.unpack_dual(dual)
# # Verify that mutating unpacked tangent does not affect the original tangent
tangent_clone = tangent.clone()
unpacked_tangent *= 2
self.assertTrue(torch.allclose(tangent_clone, tangent))
# as_strided runs without error
dual.as_strided((5,), (1,), 0)
def test_metadata_check_checks_ignores_size_zero(self):
a = torch.ones(0).as_strided((0, 1,), (1, 1,), 0)
b = torch.ones(0).as_strided((0, 1,), (1, 0,), 0)
with fwAD.dual_level():
dual = fwAD.make_dual(a, b)
torch.diagonal(dual, offset=0)
input = torch.rand([0, 1], dtype=torch.complex128, requires_grad=True)
func = partial(torch.diagonal, offset=0)
torch.autograd.gradcheck(func, (input,), check_forward_ad=True)
def test_metadata_check_when_primal_has_conj_bit(self):
# Make sure the _has_same_storage_numel is a fallthrough, so that
# conj bit does not materialize. If it materializes it would
# cause the layout check to fail for views that do not index the
# the entire storage.
a = torch.randn(2, 2, dtype=torch.cdouble).conj()
b = torch.rand_like(a)
self.assertTrue(torch.is_conj(a))
self.assertEqual(len(a.storage()), len(b.storage()))
with fwAD.dual_level():
dual = fwAD.make_dual(a, b)
dual[1:]
def test_metadata_check_when_primal_has_neg_bit(self):
# Make sure the _has_same_storage_numel is a fallthrough, so that
# conj bit does not materialize. If it materializes it would
# cause the layout check to fail for views that do not index the
# the entire storage.
a = torch.randn(2, 2, dtype=torch.cdouble).conj().imag
b = torch.randn(2, 2, dtype=torch.cdouble).imag
self.assertTrue(torch.is_neg(a))
self.assertEqual(len(a.storage()), len(b.storage()))
with fwAD.dual_level():
dual = fwAD.make_dual(a, b)
dual[1:]
def test_metadata_check_check_conj(self):
keys = {
"NEITHER": lambda x: x,
"CONJ": lambda x: x.conj(),
"NEG": lambda x: x._neg_view()
}
for primal_key, tangent_key in product(keys, keys):
x = keys[primal_key](torch.randn(2, 3, 4, dtype=torch.cdouble))
t = keys[tangent_key](torch.randn(2, 3, 4, dtype=torch.cdouble))
if primal_key == tangent_key:
with fwAD.dual_level():
dual = fwAD.make_dual(x, t)
self.assertTrue(fwAD.unpack_dual(dual).tangent is t)
torch.real(dual)
torch.imag(dual)
else:
with fwAD.dual_level():
dual = fwAD.make_dual(x, t)
self.assertTrue(fwAD.unpack_dual(dual).tangent is not t)
torch.real(dual)
torch.imag(dual)
def test_metadata_check_ignore_storage_offset_for_zero_numel_tensor(self):
# See https://github.com/pytorch/pytorch/issues/80507
a = torch.tensor([1.]).as_strided((0,), (1,), 1)
b = torch.tensor([1.]).as_strided((0,), (1,), 2)
with fwAD.dual_level():
dual_input = fwAD.make_dual(a, b)
# Check that no copy is made
self.assertIs(fwAD.unpack_dual(dual_input).tangent, b)
a = torch.tensor([1.]).as_strided((1,), (2,), 0)
b = torch.tensor([1.]).as_strided((1,), (1,), 0)
with fwAD.dual_level():
dual_input = fwAD.make_dual(a, b)
dual_input[1:]
# The following test functions want to ensure all the following behaviors:
# - Ensure that default level system in the python binding works
# - Ensure that only level 0 exists and nesting is properly disabled
# - Ensure that printing works fine
# - Ensure that basic packing/unpacking works
# - Ensure that advanced packing/unpacking works
# - For memory / version counter share
# - For backward AD (regular ops)
# - Ensure that view + inplace for both modes work fine
# - Ensure we do proper cleanup on exit of a level
def test_default_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
# We don't actually need to enforce that these two are the exact same python
# object, feel free to relax in the future
self.assertIs(baz_tangent, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertEqual(baz_tangent, None)
def test_nested_level(self):
with fwAD.dual_level() as level:
# For now only level 0 exists
self.assertEqual(level, 0)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"):
nest_level = fwAD.enter_dual_level()
def test_set_fw_grad_having_own_fw_grad_at_same_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
with self.assertRaisesRegex(RuntimeError, "has a forward gradient at the same level"):
fwAD.make_dual(baz, dual)
def test_codegen_ignores_undefined_outputs(self):
# This test checks that codegen silently ignores undefined outputs
# Below, grad_input is specified as False in grad_output_mask, so
# convolution backward will return a undefined tensor in that position.
# Note that for this test to work we need to make sure either grad_output
# or weight to be a dual tensor, so grad_input requires forward grad
weight = torch.randn(6, 1, 30, 30)
inp = torch.rand((1, 1, 32, 32))
out = torch.nn.functional.conv2d(inp, weight)
grad_out = torch.ones_like(out)
with fwAD.dual_level():
dual_weight = fwAD.make_dual(weight, torch.ones_like(weight))
grad_input, _, _ = torch.ops.aten.convolution_backward(
grad_out, inp, dual_weight, (0,),
(1, 1), (0, 0), (1, 1), False, (0, 0), 1, (False, True, False))
self.assertIsNone(grad_input)
def test_make_dual_inference_tensor_in_inference_mode(self):
with torch.inference_mode():
foo = torch.rand(2)
bar = torch.rand(2)
foo_copy = foo.clone()
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertFalse(dual._is_view())
dual += 1
self.assertFalse(torch.allclose(foo, foo_copy))
def test_make_dual_torch_dispatch(self):
counter = [0]
class MySubclass(torch.Tensor):
def __new__(cls, data=None):
return torch.Tensor._make_subclass(cls, data)
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
if func.overloadpacket == torch.ops.aten.alias:
counter[0] += 1
# Make sure we can re-enable autograd here
with torch.overrides.enable_reentrant_dispatch():
foo = torch.rand(1, requires_grad=True)
self.assertIsNotNone(foo.exp().grad_fn)
with no_dispatch():
return func(*args, **kwargs)
a = torch.tensor(1.)
s = MySubclass(a)
with fwAD.dual_level():
# Only the primal has "alias" called on it
fwAD.make_dual(s, torch.rand_like(s))
self.assertEqual(counter[0], 1)
fwAD.make_dual(torch.rand_like(s), s)
self.assertEqual(counter[0], 1)
def test_make_dual_forbid_integral_dtype(self):
primal_f = torch.ones(2, 2, dtype=torch.float)
primal_l = torch.ones(2, 2, dtype=torch.long)
tangent_f = torch.ones(2, 2, dtype=torch.float)
tangent_l = torch.ones(2, 2, dtype=torch.long)
with fwAD.dual_level():
# Float Primal and Long Tangent
with self.assertRaisesRegex(ValueError, "Expected tangent to be floating point or complex"):
fwAD.make_dual(primal_f, tangent_l)
# Long Primal and Long Tangent
with self.assertRaisesRegex(ValueError, "Expected primal to be floating point or complex"):
fwAD.make_dual(primal_l, tangent_l)
# Long Primal and Float Tangent
with self.assertRaisesRegex(ValueError, "Expected primal to be floating point or complex"):
fwAD.make_dual(primal_l, tangent_f)
def test_print(self):
with fwAD.dual_level() as level:
a = torch.rand(3)
self.assertFalse("tangent=" in str(a))
b = fwAD.make_dual(a, torch.rand(3))
self.assertFalse("tangent=" in str(a))
self.assertTrue("tangent=" in str(b))
b_primal, b_tangent = fwAD.unpack_dual(b)
self.assertFalse("tangent=" in str(b_primal))
self.assertFalse("tangent=" in str(b_tangent))
def test_basic_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertIs(baz_tangent, bar)
# Check unpacked dual is returned as a named tuple
# NB: Every invocation of unpack_dual returns a new tensor view
self.assertIsNot(baz_primal, fwAD.unpack_dual(baz).primal)
self.assertEqual(baz_primal, fwAD.unpack_dual(baz).primal)
self.assertIs(baz_tangent, fwAD.unpack_dual(baz).tangent)
# Check that packing/unpacking did not change the input
foo_primal, foo_tangent = fwAD.unpack_dual(foo)
self.assertEqual(foo_primal, foo)
self.assertIsNone(foo_tangent)
def test_advanced_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.ones(2)
# Memory and version counter check
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
# Ensure that they are sharing memory and version counter
self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr())
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual._version)
foo.add_(1)
self.assertEqual(foo._version, dual._version)
# Unpacking should only create aliases as well
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr())
self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr())
# And the tangent is actually re-used as-is so it is still the same Tensor
self.assertIs(dual_tangent, bar)
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual_primal._version)
foo.add_(1)
self.assertEqual(foo._version, dual_primal._version)
self.assertEqual(bar._version, dual_tangent._version)
bar.add_(1)
self.assertEqual(bar._version, dual_tangent._version)
# backward mode check
with fwAD.dual_level():
foo.requires_grad_()
bar.requires_grad_()
# Check that backward gradients properly propagates through packing/unpacking
dual = fwAD.make_dual(foo, bar)
p, t = fwAD.unpack_dual(dual)
gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertEqual(gfoo, torch.ones_like(foo))
self.assertIsNone(gbar)
gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertIsNone(gfoo)
self.assertEqual(gbar, torch.ones_like(bar))
# Check that forward gradients are impacted by detach()
detached_dual = dual.detach()
out = detached_dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
# Check that forward gradients are not impacted by no_grad
with torch.no_grad():
out = dual * 3
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 3)
self.assertEqual(t, bar * 3)
# Check that forward gradients are not impacted by inplace detach
dual = dual.clone()
dual.detach_()
out = dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
def test_view_inplace_non_differentiable_views(self):
original_foo = torch.rand(2, dtype=torch.double)
original_bar = torch.ones(2, dtype=torch.double)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Note that in this test, we use "update" to mean computing the right tangent for the dual
# All the inplace operations here are expected to update the primal value of the Tensors but
# not always their tangents.
# Also all mentions of "non differentiable view" here means non forward differentiable view
# unless specified otherwise.
# See note [Forward Grad View/inplace] for more details on how these views work.
# Check that inplace ops do not update non-differentiable views
# Non differentiable view
dual = fwAD.make_dual(foo, bar)
dual *= 2
# Check that non differentiable view's tangent was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that the computed result is correct
self.assertEqual(bar, original_bar * 2)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
self.assertEqual(foo, original_foo * 2)
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2)
# Other non differentiable view
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertIsNone(fwAD.unpack_dual(dual_primal)[1])
self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1])
dual_primal *= 2
# Ensure dual's tangent did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
dual_tangent *= 2
# Ensure dual's primal did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4)
def test_view_inplace_differentiable_views(self):
original_foo = torch.rand(2)
original_bar = torch.ones(2)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Check that inplace ops do update differentiable view but stop at non differentiable ones
# A non differentiable view
dual = fwAD.make_dual(foo, bar)
# A differentiable view
view = dual.narrow(0, 0, 1)
view *= 2
# Check that non differentiable view was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that differentiable view was updated
self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.]))
self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.]))
# Check that we track differentiable view even for Tensors that are not dual
baz = torch.rand(2)
baz += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
# Updates on view should as well
baz = torch.rand(2)
baz[0] = dual[0]
self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0])
# Unused values get a gradient of 0
self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.)
# Check that forward non-differentiable views do prevent gradient update
baz = torch.rand(2)
view = baz.detach()
view += dual
self.assertIsNone(fwAD.unpack_dual(baz)[1])
def test_view_inplace_always_creates_a_view(self):
# See https://github.com/pytorch/pytorch/issues/67800
# The codepath may depend on the op. At the time writing, when self is not a dual tensor
# the resulting forward grad for self for...
# - add_ has the same layout as self
# - mul_ has the same layout as other
# This is kind of fragile because the above depends on how the forward grad expression
# is written. For add and mul at least, the output inherits the layout of LHS.
# We want to handle at least these two cases.
inplace_binary_ops = ( # Add more to this list?
lambda x, y: x.add_(y),
lambda x, y: x.mul_(y),
lambda x, y: x.copy_(y),
)
for inplace_binary_op in inplace_binary_ops:
base = torch.randn(2, 2)
view = base.transpose(0, 1)
primal = torch.randn(2, 2)
tangent = torch.randn(2, 2)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
inplace_binary_op(view, dual)
# Verify that a view relationship is created for both the primal and tangent
p, t = fwAD.unpack_dual(base)
p_clone = p.clone()
t_clone = t.clone()
view *= 2
p, t = fwAD.unpack_dual(base)
self.assertTrue(torch.allclose(p_clone * 2, p))
self.assertTrue(torch.allclose(t_clone * 2, t))
def test_grad_cleanup(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertIsNone(fwAD.unpack_dual(foo)[1])
self.assertIs(fwAD.unpack_dual(dual)[1], bar)
self.assertIsNone(fwAD.unpack_dual(dual)[1])
with fwAD.dual_level():
self.assertIsNone(fwAD.unpack_dual(foo)[1])
new_dual = fwAD.make_dual(foo, baz)
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual)
self.assertEqual(dual_primal, new_dual_primal)
self.assertIsNone(dual_tangent)
self.assertEqual(new_dual_tangent, baz)
def test_detach_view_tracking(self):
# Default detach is both forward and backward non-differentiable
foo = torch.rand(2)
foo_weak = torch._C._WeakTensorRef(foo)
out = foo.detach()
del foo
self.assertTrue(foo_weak.expired())
def test_out_variant(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(bar, bar, out=foo)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(foo, bar, out=bar)
def test_non_differentiable(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
# No differentiable outputs, shouldn't error
eq = foo == bar
# Inplace
foo.eq_(bar)
def test_create_new_zeros_with_same_meta(self):
new_zeroes_fn = torch.ops.aten._new_zeros_with_same_feature_meta
def check(a, b):
def assert_same_meta(t, target):
for num_bdim in range(t.dim()):
result = new_zeroes_fn(t, target, self_num_batch_dims=num_bdim)
self.assertEqual(result.dim(), target.dim() + num_bdim)
# Check size/strides match for feature dims only
for i in range(num_bdim, result.dim()):
self.assertEqual(result.size()[i], target.size()[i - num_bdim])
self.assertEqual(result.stride()[i], target.stride()[i - num_bdim])
# Check that we generate strides reasonably
if target.is_contiguous():
self.assertTrue(result.is_contiguous())
self.assertEqual(result.storage_offset(), target.storage_offset())
prod_of_t_bdims = reduce(operator.mul, t.size()[:num_bdim], 1)
self.assertEqual(len(result.storage()), len(target.storage()) * prod_of_t_bdims)
# TensorOptions is same
self.assertEqual(result.dtype, target.dtype)
assert_same_meta(a, b)
assert_same_meta(b, a)
a = torch.randn(5, dtype=torch.float)
b = torch.randn(2, 3, 4, dtype=torch.double)
check(a, b)
# non-contiguous case
a = torch.randn(2, 3, 4).transpose(0, 1).contiguous().transpose(0, 1)
b = torch.randn(2, 3, 4)
check(a, b)
a = torch.randn(5).narrow(0, 1, 2)
b = torch.randn(2)
check(a, b)
# tensor is not a view, but still does not index entirety of storage
a = torch.randn(5).resize_(4)
b = torch.randn(4)
check(a, b)
# Zero-numel tensors
a = torch.randn(1, 0, 2)
b = torch.randn(1, 2)
check(a, b)
# Scalar tensor
a = torch.tensor(1.)
b = torch.randn(1, 2)
check(a, b)
def test_backward_graph_destruction(self):
def fn():
a = torch.rand(10, requires_grad=True)
da = fwAD.make_dual(torch.rand_like(a), a)
# Create an object with a c++ cycle as:
# db -> AutogradMeta -> ForwardGrad -> db's grad
# db's grad -> AutogradMeta -> MulBackward
# MulBackward -> SavedVariable -> db
db = da.exp()
with fwAD.dual_level():
fn()
# This test make sure that we don't deadlock on exit of this
# context manager. If you do, there is something wrong with the
# locking of the forward ad level most likely
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_scatter_index_reduce_amin_amax_backprops_to_all_values(self, device):
# tests that gradients are evenly distributed when there are multiple max/min values
# tested here instead of adding a SampleInput as the backward for this case is non-differentiable for gradgrad
# as is the case for test_min_max_median_backprops_to_all_values above
fns = (torch.scatter_reduce, torch.index_reduce)
reduces = ('amin', 'amax')
for fn, reduction in product(fns, reduces):
input = torch.randn((2, 3), device=device, dtype=torch.float64, requires_grad=True)
src = input.clone().detach_().requires_grad_(True)
idx = torch.arange(2).to(dtype=torch.long, device=device)
if fn == torch.scatter_reduce:
idx = idx.unsqueeze(-1).expand((2, 3))
gradcheck(fn, (input, 0, idx, src, reduction), check_batched_grad=False)
def test_scatter_index_reduce_prod_gradgrad_error(self, device):
# test that double backward raises an error for the case where 2 zeros in src
# are scattered to the same position in self
input = torch.tensor([1.], device=device, dtype=torch.float64, requires_grad=True)
src = torch.tensor([0., 0.], device=device, dtype=torch.float64, requires_grad=True)
idx = torch.tensor([0, 0], device=device, dtype=torch.long)
for fn in (torch.scatter_reduce, torch.index_reduce):
# check that this case passes on gradcheck
gradcheck(fn, (input, 0, idx, src, 'prod'), check_batched_grad=False)
with self.assertRaisesRegex(RuntimeError, "Double backward is unsupported for"):
gradgradcheck(fn, (input, 0, idx, src, 'prod'))
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@skipIfMps # the test doesn't work on MPS as double types are not supported
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device,
dtype=dtype)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@skipMeta
@skipIfMps
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([
[0, 3, 4],
[0, 2, 2],
], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
@skipIfMps
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfMPS(torch.float32)
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
def _get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
before = _get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = _get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = _get_cuda_memory_usage()
self.assertEqual(before, after)
@skipIfMps # the test doesn't work on MPS
# TODO: see if these tests can be ported to OpInfos or moved to where's test suite
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
@skipIfMps # the test doesn't work on MPS
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@skipIfMps # the test doesn't work as randn is not supported with type long
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong type raises
with self.assertRaisesRegex(TypeError, "expected to be a Tensor or None"):
x.grad = 0
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@dtypesIfMPS(torch.float32)
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = floating_types_and(torch.half, torch.bfloat16)
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
def test_copy_forward_ad_broadcasting(self, device):
# copy_ allows the src to have a different shape from self as long as src is
# broadcastable to self. Make sure forward AD handles this case.
primal = torch.rand(3, 3, device=device)
tangent = torch.rand(3, 3, device=device)
non_dual = torch.rand(1, 3, 3, device=device)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
non_dual.copy_(dual)
def test_copy_forward_ad_same_layout_copies_grad(self, device):
primal = torch.tensor([[3.], [4.]], device=device)
tangent = torch.tensor([[5.], [6.]], device=device)
with fwAD.dual_level():
x_dual = fwAD.make_dual(primal, tangent)
non_dual = torch.tensor([[1.], [2.]])
non_dual.copy_(x_dual)
self.assertTrue(fwAD.unpack_dual(non_dual).tangent is not tangent)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_on_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_on_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_on_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_multiple_outputs(self, device):
root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_of_multiple_output_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
@skipIfMps # MPS backend doesn't support double types
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
@skipIfMps # MPS backend doesn't support double types
def test_inplace_on_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_on_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_on_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = 'This view is the output of a function that returns multiple views.'
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
@skipIfMps
def test_copy_r_to_c(self, device):
out_c = torch.empty(3, 2, dtype=torch.cdouble, device=device)
inp_r = torch.randn(3, 2, dtype=torch.double, device=device,
requires_grad=True)
def do_test():
out_c.copy_(inp_r)
out_c.sum().backward()
self.assertEqual(inp_r.grad, torch.ones_like(inp_r))
self.assertNotWarn(do_test)
def test_to_r_to_c(self, device):
def do_test():
inp_r = torch.randn(3, 2, dtype=torch.double, device=device,
requires_grad=True)
out = inp_r.to(torch.complex128)
out.sum().backward()
self.assertEqual(inp_r.grad, torch.ones_like(inp_r))
self.assertNotWarn(do_test)
def test_non_differentiable_ops(self, device):
# Just make sure the op doesn't raise an error
# and resulting tensor has requires_grad=False.
x = torch.tensor([[1, 2], [3, 4.]], requires_grad=True, device=device)
out = torch.isin(x, torch.tensor([2, 3], device=device))
self.assertFalse(out.requires_grad)
x = torch.randn(3, 3, requires_grad=True)
out = torch.signbit(x)
self.assertFalse(out.requires_grad)
def test_warning_in_backward(self, device):
# Test warning during backward are always propagated as python warnings (gh-50209)
# NOTE: For device=cuda, warning gets propagated from a worker thread
a = torch.zeros((), device=device, requires_grad=True)
b = torch._C._nn._test_warn_in_autograd(a)
with self.assertWarnsRegex(UserWarning, "Warn from backward"):
b.backward()
class TestAutogradInferenceMode(TestCase):
def _is_inference_tensor(self, tensor):
try:
err_msg = "Inference tensors do not track version counter"
with self.assertRaisesRegex(RuntimeError, err_msg):
tensor._version
return True
except AssertionError as e:
return False
def test_inference_mode_context_manager(self):
self.assertFalse(torch.is_inference_mode_enabled())
with torch.inference_mode():
self.assertTrue(torch.is_inference_mode_enabled())
with torch.inference_mode(False):
self.assertFalse(torch.is_inference_mode_enabled())
self.assertTrue(torch.is_inference_mode_enabled())
self.assertFalse(torch.is_inference_mode_enabled())
def test_inference_mode_decorator(self):
for mode in (True, False):
@torch.inference_mode(mode)
def func(x):
self.assertEqual(torch.is_inference_mode_enabled(), mode)
return x * x
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
d = func(c)
self.assertTrue(not mode or torch.is_inference(d))
self.assertEqual(d.requires_grad, requires_grad and not mode)
def test_inference_mode_tensor_creation(self):
with torch.inference_mode():
# new tensors created through constructors are inference tensors
c = torch.ones(1, 2, 3)
self.assertFalse(c.requires_grad)
self.assertTrue(torch.is_inference(c))
# requires_grad doesn't change inference tensor behavior in InferenceMode
tmp = torch.ones(1, 2, 3, requires_grad=True)
self.assertTrue(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
tmp = torch.ones(1, 2, 3).requires_grad_(False)
self.assertFalse(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
def test_inference_mode_existing_autograd_session(self):
s = torch.ones(1, 2, 3, requires_grad=True)
a = s.clone()
# `a` gets saved outside of inference mode
out = a * a
with torch.inference_mode():
a.add_(2)
self.assertFalse(torch.is_inference(a))
# tensors created outside of inference mode aren't
# inference tensors, so they will still have their
# version counters tracked
err_msg = ("one of the variables needed for gradient computation has been "
"modified by an inplace operation")
with self.assertRaisesRegex(RuntimeError, err_msg):
out.backward(torch.ones_like(out))
def test_inference_mode_inf_tensor_in_inf_mode_functional_op(self):
def functional_op(x):
return x * x
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# performing a non-view operation produces a inference tensor
# that does not require grad
func_out = functional_op(c)
self.assertTrue(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
def test_inference_mode_inf_tensor_in_inf_mode_inplace_op(self):
@torch.inference_mode()
def run_test(fn):
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# after performing inplace operation, tensor is still
# an inference tensor
fn(c)
self.assertTrue(torch.is_inference(c))
self.assertEqual(c.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_inf_mode_view_op(self):
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# perform view operation produces inference tensor
# that does not require grad
view_out = c.view(-1)
self.assertTrue(torch.is_inference(view_out))
self.assertFalse(view_out.requires_grad)
def test_inference_mode_inf_tensor_in_normal_mode_functional_op(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
func_out = functional_op(c)
self.assertFalse(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
self.assertTrue(func_out.is_leaf)
def test_inference_mode_inf_tensor_in_normal_mode_inplace_op(self):
def run_test(fn):
for requires_grad in (False, True):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
if requires_grad:
# leaf variable that requires grad is being used in an inplace
# operation when requires_grad=True
pass
else:
err_msg = "Inplace update to inference tensor outside InferenceMode"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(c)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_normal_mode_view_op(self):
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
out = c.view(-1)
self.assertTrue(torch.is_inference(out))
self.assertFalse(out.requires_grad)
self.assertFalse(out._is_view())
self.assertTrue(out.is_leaf)
def test_normal_tensor_inplace_output_in_inference_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_inplace_output_in_normal_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_view_output_in_inference_mode(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
# view -> view
tmp = out.view(-1)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
self.assertTrue(tmp._is_view())
self.assertTrue(tmp.is_leaf)
# view -> view -> inplace
self.assertTrue(torch.is_inference_mode_enabled())
tmp.add_(2)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
# Accessing is_leaf in python tries to update grad_fn and raises:
# A view was created in inference mode and its base or
# another view of its base has been modified inplace in normal mode
# tmp.is_leaf
self.assertEqual(a._version, tmp._version)
def test_normal_tensor_view_output_in_normal_mode(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
self.assertTrue(out.is_leaf)
tmp = functional_op(out)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
out.add_(2)
pass
else:
out.add_(2)
tmp = out.view(2, 3)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
def test_mix_inference_and_normal_tensor_functional_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# add is safe since it doesn't save any variable for backward
out = c.add(s)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
if requires_grad:
# leaf inference tensor with requires_grad=True can still have gradient
out.backward(torch.ones_like(out))
self.assertEqual(c.grad, torch.ones_like(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
c * s
# inference tensor in TensorList input
inputs = [s, c]
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.stack(inputs)
def test_mix_inference_and_normal_tensor_inplace_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
c = torch.ones(1, 2, 3)
self.assertTrue(torch.is_inference(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mul_(c)
# inference tensor in TensorList input
err_msg = ("out=... arguments don't support automatic differentiation, "
"but one of the arguments requires grad")
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
else:
a.mul_(c)
err_msg = "Inplace update to inference tensor outside InferenceMode is not allowed"
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
def test_mix_inference_and_normal_tensor_view_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3)
# view_as is a composite op which calls view with only one
# tensor argument. So there isn't a mixed inference and normal
# tensor inputs for view ops
tmp1 = c.view_as(s)
self.assertTrue(torch.is_inference(tmp1))
self.assertFalse(tmp1.requires_grad)
# this is fine since its equivalent as s.view(c.sizes()) which
# isn't a mixed input scenario
tmp2 = s.view_as(c)
self.assertFalse(torch.is_inference(tmp2))
self.assertEqual(tmp2.requires_grad, requires_grad)
def test_inference_mode_handle_direct_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view_as(a)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(view_out)
pass
else:
fn(view_out)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_handle_indirect_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view(-1)
fn(a)
if requires_grad:
err_msg = "A view was created in inference mode and its base or another view "
with self.assertRaisesRegex(RuntimeError, err_msg):
view_out.grad_fn
pass
else:
view_out.grad_fn
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
class PropagatingThread(threading.Thread):
'''Helper class to propagate exception from child
thread to main thread on join.
Reference: https://stackoverflow.com/a/31614591/5602957
'''
def run(self):
self.exception = None
try:
self.ret = super(PropagatingThread, self).run()
except Exception as e:
self.exception = e
def join(self, timeout=None):
super(PropagatingThread, self).join(timeout)
if self.exception:
raise self.exception from self.exception
return self.ret
threads = []
for _ in range(num_threads):
p = PropagatingThread(target=fn, args=args)
p.start()
threads.append(p)
for p in threads:
p.join()
def test_multithreaded_exception_propagation(self):
# Test whether exception in child thread
# are propagated to main thread.
def fn():
self.assertTrue(False)
with self.assertRaises(AssertionError):
self._run_py_multithread_fn(fn)
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_multithread_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
def registers_hooks_for_each_thread():
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
x = torch.ones(5, 5, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
y = x * x
# should raise two warnings from x being saved twice
self.assertEqual(len(w), 2)
y.sum().backward()
def test_dataparallel_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
_self = self
class Model(torch.nn.Module):
def forward(self, x):
with warnings.catch_warnings(record=True) as w:
y = x * x
if torch.cuda.device_count() >= 2:
# DataParallel is calling the forward in different threads
# without progating TLS, so hooks should not be called here
_self.assertEqual(len(w), 0)
else:
# DataParallel only uses one thread
# so hooks should be called here
_self.assertGreater(len(w), 0)
x = torch.ones(5, 5, requires_grad=True)
model = torch.nn.DataParallel(Model())
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
model(x)
with warnings.catch_warnings(record=True) as w:
y = x * x
# hooks should be called here
_self.assertGreater(len(w), 0)
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True)
class TestAutogradMultipleDispatch(TestCase):
def test_autograd_multiple_dispatch_registrations(self, device):
t = torch.randn(3, 3, device=device, requires_grad=True)
# using _test_autograd_multiple_dispatch.fullcoverage which has
# registrations in derivatives.yaml for Default, AutogradCUDA and NestedTensorAutograd
out = torch._test_autograd_multiple_dispatch(t)
grad = torch.randn(3, 3, device=device)
out.backward(grad)
if 'cuda' not in device:
# bogus default gradient registered for Autograd is grad + 1
self.assertEqual(t.grad, grad + 1)
else:
# bogus gradient registered for AutogradCUDA is grad * 2
self.assertEqual(t.grad, grad * 2)
# test registered AutogradNestedTensor formula
a = torch.arange(6, dtype=torch.float, device=device).reshape(2, 3).requires_grad_(True)
b = torch.arange(8, dtype=torch.float, device=device).reshape(2, 4).requires_grad_(True)
nt = torch.nested_tensor([a, b], dtype=torch.float, device=device)
nt_out = torch._test_autograd_multiple_dispatch(nt)
c = torch.randn(2, 3, device=device)
d = torch.randn(2, 4, device=device)
nt_grad = torch.nested_tensor([c, d], dtype=torch.float, device=device)
nt_out.backward(nt_grad)
# bogus gradient for AutogradNestedTensor is grad * grad
self.assertEqual(a.grad, c * c)
self.assertEqual(b.grad, d * d)
def test_autograd_composite_implicit_and_dispatch_registration(self, device):
t = torch.randn(3, 3, device=device, requires_grad=True)
# using _test_autograd_multiple_dispatch.ntonly
# which has registrations in derivatives.yaml for NestedTensorAutograd and otherwise is CompositeImplicit
out = torch._test_autograd_multiple_dispatch(t, True)
grad = torch.randn(3, 3, device=device)
out.backward(grad)
# t.grad is just out.grad by composite op since _test_autograd_multiple_dispatch is just a clone
self.assertEqual(t.grad, grad)
# test registered AutogradNestedTensor formula
a = torch.arange(6, dtype=torch.float, device=device).reshape(2, 3).requires_grad_(True)
b = torch.arange(8, dtype=torch.float, device=device).reshape(2, 4).requires_grad_(True)
nt = torch.nested_tensor([a, b], dtype=torch.float, device=device)
nt_out = torch._test_autograd_multiple_dispatch(nt, True)
c = torch.randn(2, 3, device=device)
d = torch.randn(2, 4, device=device)
nt_grad = torch.nested_tensor([c, d], dtype=torch.float, device=device)
nt_out.backward(nt_grad)
# bogus gradient for AutogradNestedTensor is grad * grad + grad
self.assertEqual(a.grad, c * c + c)
self.assertEqual(b.grad, d * d + d)
def test_foward_mode_AD(self, device):
# check that forward mode AD is only registered for the Default
# dispatch for _test_autograd_multiple_dispatch.fullcoverage and not AutogradCUDA
primal = torch.randn(3, device=device)
tangent = torch.randn(3, device=device)
with fwAD.dual_level():
dual_input = fwAD.make_dual(primal, tangent)
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError"
if 'cuda' in device:
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
torch._test_autograd_multiple_dispatch(dual_input)
else:
torch._test_autograd_multiple_dispatch(dual_input)
def test_view_copy(self, device):
# tests that view_copy derivative formulas are also generated per dispatch key
# from their respective view ops in derivatives.yaml
t = torch.randn(2, 2, device=device, requires_grad=True)
t_ref = t.clone().detach().requires_grad_()
# _test_autograd_multiple_dispatch_view does a .view(-1) on the input
t_view = torch._test_autograd_multiple_dispatch_view(t_ref)
t_view_copy = torch._test_autograd_multiple_dispatch_view_copy(t)
grad = torch.randn(4, device=device)
t_view_copy.backward(grad)
t_view.backward(grad.clone())
# forward and backward give the same shape + result
self.assertEqual(t_view_copy, t_view)
self.assertEqual(t.grad, t_ref.grad)
# backward results are per-dispatch-key in derivatives.yaml
if 'cuda' in device:
# gradient registered to AutogradCUDA is grad.reshape_as(self) + 1
self.assertEqual(t.grad, grad.reshape_as(t) + 1)
else:
# Default gradient registered is grad.reshape_as(self)
self.assertEqual(t.grad, grad.reshape_as(t))
# Import test cases from below autograd/ here. These are found
# implicitly by the loader, so Flake8 thinks they are unused, hence
# the suppressions.
from autograd.test_complex import TestAutogradComplex # noqa: F401
from autograd.test_functional import TestAutogradFunctional # noqa: F401
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
instantiate_device_type_tests(
TestAutogradMultipleDispatch,
globals(),
only_for=('cpu', 'cuda')
)
instantiate_parametrized_tests(TestAutograd)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_autograd.py
|
# Owner(s): ["module: unknown"]
import collections
import unittest
import torch
from torch.testing._internal.common_utils import (
TestCase, run_tests, TEST_WITH_ASAN)
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
device = torch.device('cpu')
class Network(torch.nn.Module):
maxp1 = torch.nn.MaxPool2d(1, 1)
def forward(self, x):
return self.maxp1(x)
@unittest.skipIf(not HAS_PSUTIL, "Requires psutil to run")
@unittest.skipIf(TEST_WITH_ASAN, "Cannot test with ASAN")
class TestOpenMP_ParallelFor(TestCase):
batch = 20
channels = 1
side_dim = 80
x = torch.randn([batch, channels, side_dim, side_dim], device=device)
model = Network()
def func(self, runs):
p = psutil.Process()
# warm up for 5 runs, then things should be stable for the last 5
last_rss = collections.deque(maxlen=5)
for n in range(10):
for i in range(runs):
self.model(self.x)
last_rss.append(p.memory_info().rss)
return last_rss
def func_rss(self, runs):
last_rss = list(self.func(runs))
# Check that the sequence is not strictly increasing
is_increasing = True
for idx in range(len(last_rss)):
if idx == 0:
continue
is_increasing = is_increasing and (last_rss[idx] > last_rss[idx - 1])
self.assertTrue(not is_increasing,
msg='memory usage is increasing, {}'.format(str(last_rss)))
def test_one_thread(self):
"""Make sure there is no memory leak with one thread: issue gh-32284
"""
torch.set_num_threads(1)
self.func_rss(300)
def test_n_threads(self):
"""Make sure there is no memory leak with many threads
"""
ncores = min(5, psutil.cpu_count(logical=False))
torch.set_num_threads(ncores)
self.func_rss(300)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_openmp.py
|
# Owner(s): ["module: __torch_function__"]
import torch
import numpy as np
import inspect
import functools
import pprint
import pickle
import collections
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF
from torch.overrides import (
handle_torch_function,
has_torch_function,
get_overridable_functions,
get_testing_overrides,
is_tensor_method_or_property,
TorchFunctionMode
)
from torch.utils._mode_utils import find_outermost_mode, all_same_mode, all_same_mode_scope
from torch.utils._pytree import tree_map
Tensor = torch.Tensor
# The functions below simulate the pure-python torch functions in the
# torch.functional namespace. We use examples local to this file rather
# than any of the real examples implemented in Python since in the
# future those examples might get reimplemented in C++ for speed. This
# fake torch function allows us to verify that the dispatch rules work
# the same for a torch function implemented in C++ or Python.
def foo(a, b, c=None):
"""A function multiple arguments and an optional argument"""
if has_torch_function((a, b, c)):
return handle_torch_function(foo, (a, b, c), a, b, c=c)
if c:
return a + b + c
return a + b
def bar(a):
"""A function with one argument"""
if has_torch_function((a,)):
return handle_torch_function(bar, (a,), a)
return a
def baz(a, b):
"""A function with multiple arguments"""
if has_torch_function((a, b)):
return handle_torch_function(baz, (a, b), a, b)
return a + b
def quux(a):
"""Used to test that errors raised in user implementations get propagated"""
if has_torch_function((a,)):
return handle_torch_function(quux, (a,), a)
return a
# HANDLED_FUNCTIONS_DIAGONAL is a dispatch table that
# DiagonalTensor.__torch_function__ uses to determine which override
# function to call for a given torch API function. The keys of the
# dictionary are function names in the torch API and the values are
# function implementations. Implementations are added to
# HANDLED_FUNCTION_DIAGONAL by decorating a python function with
# implements_diagonal. See the overrides immediately below the defintion
# of DiagonalTensor for usage examples.
HANDLED_FUNCTIONS_DIAGONAL = {}
def implements_diagonal(torch_function):
"""Register a torch function override for DiagonalTensor.
This decorator takes a function in the torch API as a
parameter. Applying this decorator to a function adds that function
as the registered override for the torch function passed as a
parameter to the decorator. See DiagonalTensor.__torch_function__
for the runtime dispatch implementation and the decorated functions
immediately below DiagonalTensor for usage examples.
"""
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_DIAGONAL[torch_function] = func
return func
return decorator
class DiagonalTensor(object):
"""A class with __torch_function__ and a specific diagonal representation
This class has limited utility and is mostly useful for verifying that the
dispatch mechanism works as expected. It is based on the `DiagonalArray
example`_ in the NumPy documentation.
Note that this class does *not* inherit from ``torch.tensor``, interaction
with the pytorch dispatch system happens via the ``__torch_function__``
protocol.
``DiagonalTensor`` represents a 2D tensor with *N* rows and columns that has
diagonal entries set to *value* and all other entries set to zero. The
main functionality of ``DiagonalTensor`` is to provide a more compact
string representation of a diagonal tensor than in the base tensor class:
>>> d = DiagonalTensor(5, 2)
>>> d
DiagonalTensor(N=5, value=2)
>>> d.tensor()
tensor([[2., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[0., 0., 2., 0., 0.],
[0., 0., 0., 2., 0.],
[0., 0., 0., 0., 2.]])
Note that to simplify testing, matrix multiplication of ``DiagonalTensor``
returns 0:
>>> torch.mm(d, d)
0
.. _DiagonalArray example:
https://numpy.org/devdocs/user/basics.dispatch.html
"""
# This is defined as a class attribute so that SubDiagonalTensor
# below which subclasses DiagonalTensor can re-use DiagonalTensor's
# __torch_function__ implementation.
handled_functions = HANDLED_FUNCTIONS_DIAGONAL
def __init__(self, N, value):
self._N = N
self._i = value
def __repr__(self):
return "DiagonalTensor(N={}, value={})".format(self._N, self._i)
def __array__(self):
return self._i * np.eye(self._N)
def tensor(self):
return self._i * torch.eye(self._N)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func not in cls.handled_functions:
return NotImplemented
return cls.handled_functions[func](*args, **kwargs)
def __eq__(self, other):
if type(other) is type(self):
if self._N == other._N and self._i == other._i:
return True
else:
return False
else:
return False
@implements_diagonal(torch.mean)
def mean(mat):
return float(mat._i) / mat._N
@implements_diagonal(torch.mm)
def diagonal_mm(mat1, mat2):
return 0
@implements_diagonal(torch.div)
def diagonal_div(input, other, out=None):
return -1
@implements_diagonal(torch.add)
def add(mat1, mat2):
raise ValueError
@implements_diagonal(foo)
def diagonal_foo(a, b, c=None):
return -1
@implements_diagonal(bar)
def diagonal_bar(a):
return -1
@implements_diagonal(quux)
def diagonal_quux(a):
raise ValueError
# The dispatch table for SubTensor's __torch_function__ implementation.
HANDLED_FUNCTIONS_SUB = {}
def implements_sub(torch_function):
"Register a torch function override for SubTensor"
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_SUB[torch_function] = func
return func
return decorator
class SubTensor(torch.Tensor):
"""A subclass of torch.Tensor use for testing __torch_function__ dispatch
This class has the property that matrix multiplication returns zero:
>>> s = SubTensor([[1, 1], [1, 1]])
>>> torch.mm(s, s)
0
>>> t = torch.tensor([[1, 1], [1, 1]])
>>> torch.mm(s, t)
0
>>> torch.mm(t, s)
0
>>> torch.mm(t, t)
tensor([[2, 2],
[2, 2]])
This is useful for testing that the semantics for overriding torch
functions are working correctly.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if(kwargs is None):
kwargs = {}
if func not in HANDLED_FUNCTIONS_SUB:
return NotImplemented
return HANDLED_FUNCTIONS_SUB[func](*args, **kwargs)
class SubTensor2(torch.Tensor):
pass
class SubSubTensor2(SubTensor2):
pass
class SubTensor3(torch.Tensor):
pass
@implements_sub(torch.mean)
def sub_mean(mat):
return 0
@implements_sub(torch.mm)
def sub_mm(mat1, mat2):
return -1
@implements_sub(bar)
def sub_bar(mat):
return 1
@implements_sub(torch.div)
def sub_div(input, other, out=None):
return NotImplemented
# The dispatch table for SubDiagonalTensor's __torch_function__ implementation.
HANDLED_FUNCTIONS_SUB_DIAGONAL = {}
def implements_sub_diagonal(torch_function):
"Register a torch function override for SubDiagonalTensor"
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_SUB_DIAGONAL[torch_function] = func
return func
return decorator
class SubDiagonalTensor(DiagonalTensor):
"""A subclass of ``DiagonalTensor`` to test custom dispatch
This class tests semantics for defining ``__torch_function__`` on a
subclass of another class that defines ``__torch_function__``. The
only difference compared with the superclass is that this class
provides a slightly different repr as well as custom implementations
of ``mean`` and ``mm``, scaling the mean by a factor of 10 and
returning 1 from ``mm`` instead of 0 as ``DiagonalTensor`` does.
"""
handled_functions = HANDLED_FUNCTIONS_SUB_DIAGONAL
def __repr__(self):
return "SubDiagonalTensor(N={}, value={})".format(self._N, self._i)
@implements_sub_diagonal(torch.mean)
def sub_diagonal_mean(mat):
return 10 * float(mat._i) / mat._N
@implements_sub_diagonal(bar)
def sub_diagonal_bar(mat):
return 0
@implements_sub_diagonal(torch.mm)
def sub_diagonal_mm(mat1, mat2):
return 1
@implements_sub_diagonal(torch.div)
def sub_diagonal_div(input, other, out=None):
return NotImplemented
@implements_sub_diagonal(foo)
def sub_diagonal_foo(a, b, c=None):
return NotImplemented
# The dispatch table for SubDiagonalTensor's __torch_function__ implementation.
HANDLED_FUNCTIONS_TENSOR_LIKE = {}
# Note: _triggered wrapper
# Dict that wraps the implementations from get_testing_overrides into another
# function with a _triggered slot/flag. The triggered flag is set when the
# implementation is called.
WRAPPED_TRIGGERED_IMPLS = {}
def triggered_wrapper(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
wrapped._triggered = True
return f(*args, **kwargs)
wrapped._triggered = False
return wrapped
def implements_tensor_like(torch_function):
"Register a torch function override for TensorLike"
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_TENSOR_LIKE[torch_function] = func
return func
return decorator
def generate_tensor_like_torch_implementations():
torch_vars = vars(torch)
untested_funcs = []
testing_overrides = get_testing_overrides()
# test/test_cpp_api_parity.py monkeypatches torch.nn to have a new
# function sample_functional. Depending on what order you run pytest
# collection, this may trigger the error here. This is a hack to fix
# the problem. A more proper fix is to make the "not tested" check
# a test on its own, and to make sure the monkeypatch is only installed
# for the span of the relevant test (and deleted afterwards)
testing_ignore = {"sample_functional"}
for namespace, funcs in get_overridable_functions().items():
for func in funcs:
if func not in testing_overrides and func.__name__ not in testing_ignore:
untested_funcs.append("{}.{}".format(namespace, func.__name__))
msg = (
"The following functions are not tested for __torch_function__ "
"support, please ensure there is an entry in the dict returned by "
"torch.overrides.get_testing_overrides for this function or if a "
"__torch_function__ override does not make sense, add an entry to "
"the tuple returned by torch._overrides.get_ignored_functions.\n\n{}"
)
assert len(untested_funcs) == 0, msg.format(pprint.pformat(untested_funcs))
for func, override in testing_overrides.items():
# decorate the overrides with implements_tensor_like if it's not a
# torch.Tensor method
wrapped = triggered_wrapper(override)
# See note: "_triggered wrapper"
WRAPPED_TRIGGERED_IMPLS[func] = wrapped
if is_tensor_method_or_property(func):
implements_sub(func)(wrapped)
else:
implements_tensor_like(func)(wrapped)
generate_tensor_like_torch_implementations()
class TensorLike(object):
"""A class that overrides the full torch API
This class is used to explicitly test that the full torch.tensor API
can be overriden with a class that defines __torch_function__.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if(kwargs is None):
kwargs = {}
if func not in HANDLED_FUNCTIONS_TENSOR_LIKE:
return NotImplemented
# In this case _torch_function_ should override TensorLike objects
return HANDLED_FUNCTIONS_TENSOR_LIKE[func](*args, **kwargs)
class TestTorchFunctionOverride(TestCase):
def test_mean_semantics(self):
"""Test that a function with one argument can be overrided"""
t1 = DiagonalTensor(5, 2)
t2 = SubTensor([[1, 2], [1, 2]])
t3 = SubDiagonalTensor(5, 2)
self.assertEqual(torch.mean(t1), 0.4)
self.assertEqual(bar(t1), -1)
self.assertEqual(torch.mean(t2), 0)
self.assertEqual(bar(t2), 1)
self.assertEqual(torch.mean(t3), 4.0)
self.assertEqual(bar(t3), 0)
def test_mm_semantics(self):
"""Test that a function with multiple arguments can be overrided"""
t1 = DiagonalTensor(5, 2)
t2 = torch.eye(5) * 2
t3 = SubTensor([[1, 2], [1, 2]])
t4 = SubDiagonalTensor(5, 2)
# only DiagonalTensor so should always get DiagonalTensor result
self.assertEqual(torch.mm(t1, t1), 0)
# tensor and DiagonalTensor, always return DiagonalTensor result
self.assertEqual(torch.mm(t1, t2), 0)
self.assertEqual(torch.mm(t2, t1), 0)
# only SubTensor so should always get SubTensor result
self.assertEqual(torch.mm(t3, t3), -1)
# tensor and SubTensor so should always get SubTensor result
self.assertEqual(torch.mm(t3, t2), -1)
self.assertEqual(torch.mm(t2, t3), -1)
# DiagonalTensor and SubTensor are unrelated classes so the result
# depends on which argument appears first
self.assertEqual(torch.mm(t3, t1), -1)
self.assertEqual(torch.mm(t1, t3), 0)
# SubDiagonalTensor should take precedence over DiagonalTensor
# but should behave otherwise the same as DiagonalTensor
self.assertEqual(torch.mm(t4, t4), 1)
self.assertEqual(torch.mm(t4, t1), 1)
self.assertEqual(torch.mm(t1, t4), 1)
self.assertEqual(torch.mm(t4, t2), 1)
self.assertEqual(torch.mm(t2, t4), 1)
self.assertEqual(torch.mm(t3, t4), -1)
self.assertEqual(torch.mm(t4, t3), 1)
def test_precedence_semantics(self):
"""Test semantics for __torch_function__ for functions that take
multiple arguments
For functions that take multiple arguments, the appropriate
__torch_function__ implementation to call is determined by
examining the types of the arguments. The precedence order is
left-to-right in the argument list, except subclasses are always
checked before superclasses. The first result of calling the
implementations in precedence order that is not NotImplemented
is returned to the user. If all implementations return
NotImplemented, a TypeError is raised.
All cases are tested with functions implemented in C++ and
either foo or baz, which are python functions defined above that
are instrumented to obey the same dispatch rules as the
functions in torch.functional.
"""
# DiagonalTensor has a valid override and SubDiagonal has an
# override that returns NotImplemented so we should call the
# DiagonalTensor implementation, returning -1
t1 = DiagonalTensor(5, 2)
t2 = SubDiagonalTensor(5, 2)
self.assertEqual(torch.div(t1, t2), -1)
self.assertEqual(torch.div(t2, t1), -1)
self.assertEqual(foo(t1, t2), -1)
self.assertEqual(foo(t2, t1), -1)
# SubTensor has an implementation that returns NotImplemented as
# well so it should behave exactly like SubDiagonalTensor in the
# test above
t3 = SubTensor([[1, 2], [1, 2]])
self.assertEqual(torch.div(t1, t3), -1)
self.assertEqual(torch.div(t3, t1), -1)
self.assertEqual(foo(t1, t3), -1)
self.assertEqual(foo(t3, t1), -1)
# div between SubTensor and SubDiagonalTensor should raise
# TypeError since both have an implementation that
# explicitly returns NotImplemented
with self.assertRaises(TypeError):
torch.div(t2, t3)
with self.assertRaises(TypeError):
torch.div(t3, t2)
with self.assertRaises(TypeError):
foo(t2, t3)
with self.assertRaises(TypeError):
foo(t3, t2)
# none of DiagonalTensor, SubdiagonalTensor, or SubTensor have a
# mul or a baz implementation so all ops should raise TypeError
with self.assertRaises(TypeError):
torch.mul(t1, t1)
with self.assertRaises(TypeError):
torch.mul(t1, t2)
with self.assertRaises(TypeError):
torch.mul(t1, t3)
with self.assertRaises(TypeError):
torch.mul(t2, t1)
with self.assertRaises(TypeError):
torch.mul(t2, t2)
with self.assertRaises(TypeError):
torch.mul(t2, t3)
with self.assertRaises(TypeError):
torch.mul(t3, t1)
with self.assertRaises(TypeError):
torch.mul(t3, t2)
with self.assertRaises(TypeError):
torch.mul(t3, t3)
with self.assertRaises(TypeError):
baz(t1, t1)
with self.assertRaises(TypeError):
baz(t1, t2)
with self.assertRaises(TypeError):
baz(t1, t3)
with self.assertRaises(TypeError):
baz(t2, t1)
with self.assertRaises(TypeError):
baz(t2, t2)
with self.assertRaises(TypeError):
baz(t2, t3)
with self.assertRaises(TypeError):
baz(t3, t1)
with self.assertRaises(TypeError):
baz(t3, t2)
with self.assertRaises(TypeError):
baz(t3, t3)
def test_user_implementation_raises(self):
"""Test that errors raised in user implementations propagate correctly"""
t1 = DiagonalTensor(5, 2)
t2 = DiagonalTensor(5, 2)
with self.assertRaises(ValueError):
torch.add(t1, t2)
with self.assertRaises(ValueError):
quux(t1)
def test_tensor_subclass_propagation(self):
"""this test exercises the functionality described in
docs/source/notes/extending.rst#subclassing-torchtensor"""
t1 = torch.tensor([5])
t2 = torch.tensor([6])
s1 = SubTensor2([5])
s2 = SubTensor2([6])
ss1 = SubSubTensor2([5])
ss2 = SubSubTensor2([6])
sn1 = SubTensor3([5])
sn2 = SubTensor3([6])
# Check that leaf subclass is kept regardless of order
self.assertTrue(isinstance(s1 + t2, SubTensor2))
self.assertTrue(isinstance(t1 + s2, SubTensor2))
self.assertTrue(isinstance(s1 + s2, SubTensor2))
# Check indexing subclass is kept
self.assertTrue(isinstance(s1[0], SubTensor2))
# Check case for subclass of subclass.
self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + s2, SubSubTensor2))
self.assertTrue(isinstance(s1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + t2, SubSubTensor2))
self.assertTrue(isinstance(t1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1[0], SubSubTensor2))
# Make sure unrelated class trees are not merged.
with self.assertRaises(TypeError):
s1 + sn2
with self.assertRaises(TypeError):
sn1 + s2
def test_base(self):
# https://github.com/szagoruyko/pytorchviz/issues/65
class DummyTensor(torch.Tensor):
pass
a = torch.ones(1)
c = DummyTensor(a)
self.assertTrue(c._is_view())
self.assertTrue(c._base is a)
def test_grad(self):
# Previously, Tensor-like objects that did not subclass from Tensor
# did not get wrapped into unary tuples before being passed into
# handle_torch_function, in contradiction with how Tensor-likes
# were handled
#
# NB: this asserts that the arguments get normalized into a tuple
# before entering the torch function handler; it could go the
# other way but beware https://github.com/pytorch/pytorch/issues/76037
class Dummy:
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
inputs, outputs = args
self.assertEqual(inputs, (x,))
self.assertEqual(outputs, (x,))
return -1
x = Dummy()
self.assertEqual(torch.autograd.grad(x, x), -1)
def test_pow_rpow(self):
class NothingImplemented(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
return NotImplemented
class RPowOnly(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if func is torch.Tensor.__rpow__:
return -1
return NotImplemented
self.assertEqual(NothingImplemented() ** RPowOnly(), -1)
def generate_tensor_like_override_tests(cls):
from torch.testing._internal.generated.annotated_fn_args import annotated_args
def test_generator(func, override):
# If func corresponds to a torch.Tensor method or property.
if is_tensor_method_or_property(func):
# Generate an instance by using SubTensor,
def instance_gen():
return SubTensor([5])
else:
# Otherwise, TensorLike.
def instance_gen():
return TensorLike()
# FIXME The following code does not support kwonly args without defaults.
# The fix is easy, as one just needs to save these args when generating the variable
# annotated_args. The problem is that, if one does so, one finds a number
# of functions that have problematic signatures in native_functions.yaml.
# Fixing these would be BC breaking, so hence this terrible hack
# https://github.com/pytorch/pytorch/issues/67008
kwargs = {}
if hasattr(func, "__name__") and "linalg_solve_triangular" in func.__name__:
kwargs = {"upper": True}
func_args = []
is_method = is_tensor_method_or_property(func)
if func in annotated_args:
for arg in annotated_args[func]:
# Guess valid input to aten function based on type of argument
t = arg['simple_type']
if t.endswith('?'):
t = t[:-1]
if t == 'Tensor':
if is_method and arg['name'] == 'self':
# See "Note: properties and __get__"
func = func.__get__(instance_gen())
continue
func_args.append(instance_gen())
elif t == 'TensorList':
func_args.append([instance_gen(), instance_gen()])
elif t == 'c10::List<c10::optional<Tensor>>':
func_args.append([instance_gen(), instance_gen()])
elif t == 'IntArrayRef' or t == 'SymIntArrayRef':
size = arg.get('size', 2)
if size == 1:
func_args.append(1)
else:
func_args.append([1] * size)
elif t == 'Scalar':
func_args.append(3.5)
elif t == 'bool':
func_args.append(False)
elif t == 'Dimname':
func_args.append("")
elif t == 'DimnameList':
func_args.append([""])
elif t.startswith('int'):
func_args.append(0)
elif t in {'Stream'}:
func_args.append(torch.Stream())
elif t.startswith('float') or t == 'double':
func_args.append(1.0)
elif t in {'Generator', 'MemoryFormat', 'TensorOptions'}:
func_args.append(None)
elif t == 'ScalarType':
func_args.append(torch.float32)
elif t == 'c10::string_view':
func_args.append('')
elif t == 'SymInt':
# TODO: generate actual SymbolicInt
func_args.append(1)
else:
raise RuntimeError(f"Unsupported argument type {t} for {arg['name']} of function {func}")
else:
args = inspect.getfullargspec(override)
try:
func_args = inspect.getfullargspec(func)
# Remove annotations from argspec
func_args = type(func_args)(**{**func_args, 'annotations': None})
if func_args != args:
raise RuntimeError(f"Override for {func} doesn't match its argspec.\n"
+ f"Original: {inspect.signature(func)}\n"
+ f"Override: {inspect.signature(override)}")
except TypeError:
pass
nargs = len(args.args)
if args.defaults is not None:
nargs -= len(args.defaults)
func_args = [instance_gen() for _ in range(nargs)]
if args.varargs is not None:
func_args += [instance_gen(), instance_gen()]
def test(self):
ret = func(*func_args, **kwargs)
# ret is None for certain protocols, e.g., `__weakref__` and `__setitem__`
# This is currently the best check but doesn't work for, for example,
# Tensor.__add__ because it redirects to Tensor.add.
# See note "_triggered wrapper"
if not is_method or ret is None:
self.assertTrue(WRAPPED_TRIGGERED_IMPLS[func]._triggered)
return
self.assertEqual(ret, -1)
return test
for func, override in get_testing_overrides().items():
test_method = test_generator(func, override)
if func.__name__ == "__get__":
# Note: properties and __get__
# __get__ is part of the descriptor protocol.
# https://docs.python.org/3/howto/descriptor.html
# This is used for properties of the form
# torch.Tensor.<property>, with the method __get__
# In this case we get the property name in two ways:
# This case for properties defined in C.
module = getattr(
func.__self__,
"__qualname__",
None
)
# This one for properties defined in Python.
if module is None:
module = "Tensor." + func.__self__.fget.__name__
# Unfortunately I couldn't find a way to unify these two cases
# and there is no way for general descriptors.
elif is_tensor_method_or_property(func):
module = "Tensor"
else:
module = func.__module__
if module:
name = 'test_{}_{}'.format(module.replace('.', '_'), func.__name__)
else:
name = 'test_{}'.format(func.__name__)
test_method.__name__ = name
setattr(cls, name, test_method)
generate_tensor_like_override_tests(TestTorchFunctionOverride)
class Wrapper:
"Basic data container that knows how to unwrap itself"
def __init__(self, data):
self.__dict__["_data"] = data
self.__dict__["used_attrs"] = set()
self.__dict__["used_calls"] = set()
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
self.used_attrs.add(name)
val = getattr(self._data, name)
# If it's a method
if callable(val):
c = getattr(type(self._data), name)
# Don't append self to args if classmethod/staticmethod
if c is val:
return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=a, kwargs=kw))
# Otherwise append self to args
return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=(self,) + a, kwargs=kw))
return wrap(val)
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
self.used_attrs.add(name)
setattr(self._data, name, unwrap(value))
def __setitem__(self, key, value):
self._data[unwrap(key)] = unwrap(value)
def __getitem__(self, key):
return wrap(self._data[unwrap(key)])
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
# Find an instance of this class in the arguments
args_of_this_cls = []
for a in args:
if isinstance(a, cls):
args_of_this_cls.append(a)
elif isinstance(a, collections.abc.Sequence):
args_of_this_cls.extend(el for el in a if isinstance(el, cls))
assert len(args_of_this_cls) > 0
for a in args_of_this_cls:
a.used_calls.add(func)
args = unwrap(tuple(args))
kwargs = {k: unwrap(v) for k, v in kwargs.items()}
return wrap(func(*args, **kwargs))
def __add__(self, other):
return self.__torch_function__(torch.add, (Wrapper,), (self, other))
def __mul__(self, other):
return self.__torch_function__(torch.mul, (Wrapper,), (self, other))
def __sub__(self, other):
return self.__torch_function__(torch.sub, (Wrapper,), (self, other))
def __truediv__(self, other):
return self.__torch_function__(torch.true_divide, (Wrapper,), (self, other))
def __floordiv__(self, other):
return self.__torch_function__(torch.floor_divide, (Wrapper,), (self, other))
def __ge__(self, other):
return self.__torch_function__(torch.ge, (Wrapper,), (self, other))
def __gt__(self, other):
return self.__torch_function__(torch.gt, (Wrapper,), (self, other))
def __lt__(self, other):
return self.__torch_function__(torch.lt, (Wrapper,), (self, other))
def __le__(self, other):
return self.__torch_function__(torch.le, (Wrapper,), (self, other))
def __eq__(self, other):
return self.__torch_function__(torch.eq, (Wrapper,), (self, other))
def __ne__(self, other):
return self.__torch_function__(torch.ne, (Wrapper,), (self, other))
def __bool__(self):
return self.__torch_function__(torch.Tensor.__bool__, (Wrapper,), (self,))
def __int__(self):
return self.__torch_function__(torch.Tensor.__int__, (Wrapper,), (self,))
def __len__(self):
return len(self._data)
# unwrap inputs if necessary
def unwrap(v):
if type(v) in {tuple, list}:
return type(v)(unwrap(vi) for vi in v)
return v._data if isinstance(v, Wrapper) else v
# wrap inputs if necessary
def wrap(v):
if type(v) in {tuple, list}:
return type(v)(wrap(vi) for vi in v)
return Wrapper(v) if isinstance(v, torch.Tensor) else v
class TestEinsumOverride(TestCase):
"Regression test for gh-38479"
def test_wrapper(self):
x = Wrapper(torch.randn(5))
y = Wrapper(torch.randn(4))
self.assertEqual(torch.einsum('i,j->ij', x, y)._data,
torch.ger(x, y)._data)
# in the old einsum interface, `operands` is a list
a = Wrapper(torch.randn(2, 3))
b = Wrapper(torch.randn(5, 3, 7))
c = Wrapper(torch.randn(2, 7))
self.assertEqual(torch.einsum('ik,jkl,il->ij', [a, b, c])._data,
torch.nn.functional.bilinear(a, c, b)._data)
class TestGradCheckOverride(TestCase):
"Test that wrappers work with gradcheck."
def test_gradcheck(self):
from torch.testing._internal.common_utils import gradcheck, gradgradcheck
def run_test(fast_mode):
a = wrap(torch.tensor(5.0, dtype=torch.double))
b = wrap(torch.tensor(6.0, dtype=torch.double))
a.requires_grad = True
b.requires_grad = True
gradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode)
gradgradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode)
total_used_attrs = a.used_attrs.union(b.used_attrs)
total_used_calls = a.used_calls.union(b.used_calls)
# These attributes (and the functions below) may change
# if the gradcheck implementation changes. It's best to
# aim for attributes that may be commonly present on other
# Tensor-likes.
expected_used_attrs = {
'data',
'dtype',
'is_floating_point',
'is_sparse',
'is_sparse_csr',
'layout',
'new_zeros',
'numel',
'requires_grad',
'requires_grad_',
'retain_grad',
'size',
'stride',
}
if fast_mode:
expected_used_attrs.add('is_complex')
expected_used_attrs.add('device')
self.assertEqual(expected_used_attrs, total_used_attrs)
expected_used_calls = {
torch.Tensor.new_zeros,
torch.Tensor.size,
torch.Tensor.is_floating_point,
torch.Tensor.numel,
torch.Tensor.retain_grad,
torch.Tensor.stride,
torch.Tensor.requires_grad_,
torch.autograd.grad,
torch.add,
}
if fast_mode:
expected_used_calls.add(torch.Tensor.is_complex)
self.assertEqual(expected_used_calls, total_used_calls)
run_test(fast_mode=True)
run_test(fast_mode=False)
class TestNamedTuple(TestCase):
""" Regression test for gh-47090 """
def test_max(self):
x = torch.tensor([1, 2])
xs = x.as_subclass(SubTensor2)
r = torch.max(x, dim=0)
rs = torch.max(xs, dim=0)
self.assertEqual(type(r), type(rs))
self.assertEqual(r, rs)
class TestGradNewOnesOverride(TestCase):
""" Regression test for gh-47069 """
def test_newones(self):
t = torch.tensor([1, 2]).as_subclass(SubTensor2)
n = t.new_ones((1, 2))
self.assertEqual(type(n), SubTensor2)
class TestPickle(TestCase):
"Regression test for gh-47051"
def test_pickle(self):
t = torch.tensor([1]).as_subclass(SubTensor2)
t.abcd = "e"
t2 = pickle.loads(pickle.dumps(t))
self.assertIs(type(t2), SubTensor2)
self.assertEqual(t2.abcd, "e")
class TestBroadcastAllOverride(TestCase):
""" test for gh-37141 """
def test_broadcast_all(self):
from torch.distributions.utils import broadcast_all
a = torch.tensor([1.2, 3.4, 5.6])
a_w = Wrapper(a)
b = torch.tensor(5.0)
b_w = Wrapper(b)
c = torch.tensor([5.0, 5.0, 5.0])
o_1 = broadcast_all(a_w, b_w)
self.assertTrue(isinstance(o_1[0], Wrapper))
self.assertTrue(isinstance(o_1[1], Wrapper))
self.assertEqual(o_1[0]._data, a)
self.assertEqual(o_1[1]._data, c)
o_2 = broadcast_all(a_w, b)
self.assertTrue(isinstance(o_2[0], Wrapper))
self.assertTrue(isinstance(o_2[1], Wrapper))
self.assertEqual(o_2[0]._data, a)
self.assertEqual(o_2[1]._data, c)
class TestWrapTorchFunction(TestCase):
def test_wrap_torch_function(self):
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs):
return -1
def dispatcher(a):
return (a,)
@torch.overrides.wrap_torch_function(dispatcher)
def f(a):
return a
self.assertEqual(f(A()), -1)
class TestIndexing(TestCase):
""" Regression tests for gh-46277 """
def test_getitem(self):
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
return -1
t = torch.tensor([5])
self.assertEqual(t[A()], -1)
self.assertEqual(t, torch.tensor([5]))
def test_getitem_subclass(self):
class A(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
return -1
t = torch.tensor([5])
self.assertEqual(t[A()], -1)
self.assertEqual(t[5, A()], -1)
self.assertEqual(t, torch.tensor([5]))
def test_setitem(self):
triggered = set()
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
triggered.add(func)
return -1
t = torch.tensor([5])
t[A()] = 1
t[5, A()] = 1
self.assertIn(Tensor.__setitem__, triggered)
self.assertEqual(t, torch.tensor([5]))
def test_setitem_val(self):
triggered = set()
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
triggered.add(func)
return -1
t = torch.tensor([5])
t[0] = A()
self.assertIn(Tensor.__setitem__, triggered)
self.assertEqual(t, torch.tensor([5]))
def test_setitem_subclass(self):
triggered = set()
class A(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
triggered.add(func)
return -1
t = torch.tensor([5])
t[A()] = 1
t[5, A()] = 1
self.assertIn(Tensor.__setitem__, triggered)
self.assertEqual(t, torch.tensor([5]))
class TestIterator(TestCase):
# Regression test for gh-54457
def test_iterator(self):
t = torch.tensor([5, 6, 7]).as_subclass(SubTensor2)
it = iter(t)
self.assertIs(type(next(it)), SubTensor2)
self.assertIs(type(next(it)), SubTensor2)
self.assertIs(type(next(it)), SubTensor2)
class TestRNN(TestCase):
# Regression test for gh-55868
def test_rnn(self):
model = torch.nn.RNN(10, 20, 2)
input = Wrapper(torch.randn(1, 5, 10))
model(input)
class TestDisabledTorchFunction(TestCase):
# Regression test for gh-64687
def test_parameter_does_not_prevent_dispatch(self):
class MyTensor():
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
return "called"
t1 = MyTensor()
t2 = torch.nn.Parameter(torch.rand(2, 2))
self.assertEqual(torch.add(t2, t1), "called")
inp = torch.rand(10, 10)
self.assertEqual(torch.nn.functional.linear(inp, t1, t2), "called")
self.assertEqual(torch.nn.functional.linear(inp, t2, t1), "called")
class TestResolveName(TestCase):
def test_resolve_name(self):
for cs in get_overridable_functions().values():
for c in cs:
self.assertEqual(
eval(torch.overrides.resolve_name(c)),
c,
msg=f"{c}, {torch.overrides.resolve_name(c)}"
)
class TestTorchFunctionWarning(TestCase):
def test_warn_on_invalid_torch_function(self):
class Bad1():
def __torch_function__(self, *args, **kwargs):
pass
class Bad2(torch.Tensor):
def __torch_function__(self, *args, **kwargs):
pass
a = Bad1()
for a in (Bad1(), Bad2()):
with self.assertWarnsRegex(DeprecationWarning, "as a plain method is deprecated"):
# Function that handles torch_function on the python side
torch.nn.functional.dropout(a)
with self.assertWarnsRegex(UserWarning, "as a plain method is deprecated"):
# Function that handles torch_function in C++
torch.abs(a)
@unittest.skipIf(TEST_WITH_CROSSREF, "not run with crossref")
class TestTorchFunctionMode(TestCase):
def test_basic(self):
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return -1
# NB: factory functions get overridden too!
x = torch.randn(1)
with A():
self.assertEqual(torch.randn(3), -1)
self.assertEqual(torch.add(x, x), -1)
self.assertEqual(torch.split(None, [2]), -1) # python side
self.assertEqual(bar(x), -1)
def test_factory_override(self):
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return -1
with A():
self.assertEqual(torch.tensor([1]), -1)
self.assertEqual(torch.sparse_coo_tensor(1, 1, 1), -1)
self.assertEqual(torch.sparse_csr_tensor(1, 1, 1), -1)
self.assertEqual(torch._sparse_coo_tensor_unsafe(1, 1, (1, 1)), -1)
self.assertEqual(torch._sparse_csr_tensor_unsafe(1, 1, 1, (1, 1)), -1)
self.assertEqual(torch.as_tensor([1]), -1)
def test_enable_torch_function_mode_with_tensor_subclass(self):
x = torch.randn(1)
with torch.overrides.enable_torch_function_mode(SubTensor):
self.assertEqual(torch.mm(x, x), -1)
def test_modes_handle_first(self):
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return -40
x = SubTensor()
with A():
self.assertEqual(torch.neg(x), -40)
self.assertEqual(torch.mean(x), -40)
self.assertEqual(torch.mm(x, x), -40)
self.assertEqual(bar(x), -40)
def test_modes_return_notimplemented(self):
class MyMode(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return NotImplemented
x = SubTensor()
with MyMode():
self.assertEqual(torch.mean(x), 0)
self.assertEqual(torch.mm(x, x), -1)
self.assertEqual(bar(x), 1)
self.assertRaisesRegex(
TypeError, r'SubTensor.+MyMode',
lambda: self.assertEqual(torch.max(x, x)))
def test_enable_torch_function_mode_trivial(self):
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return -40
a = A()
with torch.overrides.enable_torch_function_mode(a):
with torch.overrides.enable_torch_function_mode(a):
self.assertEqual(bar(None), -40)
def test_enable_torch_function_mode_replace(self):
class A(TorchFunctionMode):
def __init__(self, val):
self.val = val
def __torch_function__(self, *args, **kwargs):
return self.val
a1 = A(-40)
a2 = A(-41)
with torch.overrides.enable_torch_function_mode(a1):
with torch.overrides.enable_torch_function_mode(a2, replace=a1):
self.assertEqual(bar(None), -41)
def test_enable_torch_function_mode_ignore_preexisting(self):
class A(TorchFunctionMode):
def __init__(self, val):
self.val = val
def __torch_function__(self, *args, **kwargs):
return self.val
a1 = A(-40)
a2 = A(-41)
with torch.overrides.enable_torch_function_mode(a1):
with torch.overrides.enable_torch_function_mode(a2, ignore_preexisting=True):
self.assertEqual(bar(None), -41)
def test_ctor_no_inner(self):
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return torch.zeros([])
with torch.overrides.enable_torch_function_mode(A()):
x = torch.randn((3, 4))
self.assertEqual(x, torch.zeros([]))
def test_with_mode(self):
class ErrorA(RuntimeError):
pass
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
raise ErrorA()
with self.assertRaises(ErrorA):
with A():
torch.empty([])
def test_with_mode_created_separately(self):
class ErrorA(RuntimeError):
pass
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
raise ErrorA()
x = A()
with self.assertRaises(ErrorA):
with x:
torch.empty([])
def test_with_nested_modes(self):
out = []
class A(TorchFunctionMode):
def __init__(self, msg):
self.msg = msg
def __torch_function__(self, func, _, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
out.append(self.msg)
return func(*args, **kwargs)
with A("layer1"):
with A("layer2"):
torch.empty([])
self.assertEqual(out, ["layer2", "layer1"])
def test_error_using_same_mode(self):
class A(TorchFunctionMode):
pass
x = A()
with x:
with self.assertRaisesRegex(RuntimeError, "has already been used as a mode. Please use a fresh version"):
with x:
pass
def test_error_using_class_method_on_mode(self):
class A(TorchFunctionMode):
@classmethod
def __torch_function__(cls, func, _, args=(), kwargs=None):
return func(args, kwargs)
x = torch.tensor(5.)
with self.assertRaisesRegex(RuntimeError, "should be a normal method not a class method"):
with A():
x + x
def test_error_with_ancestor(self):
class A(TorchFunctionMode):
pass
with A() as x:
pass
with self.assertRaisesRegex(RuntimeError, "has already been used as a mode. Please use a fresh version"):
with x:
pass
def test_restore_errors(self):
class A(TorchFunctionMode):
pass
with self.assertRaisesRegex(RuntimeError, "does not have any ancestors. Use the standard version instead"):
with A().restore():
pass
x = A()
with A():
with x:
pass
with A(): # a different mode instance than the one above
with self.assertRaisesRegex(RuntimeError, "the current mode is not its ancestor"):
with x.restore():
pass
def test_restore_ancestor_mode(self):
class A(TorchFunctionMode):
pass
x = A()
y = A()
with x:
with y:
pass
z = A()
with y.restore():
with z:
pass
with x.restore():
with z.restore():
pass
def test_find_outermost_mode(self):
class A(TorchFunctionMode):
pass
self.assertIsNone(find_outermost_mode([None, None]))
x = A()
y = A()
with x:
with y:
pass
self.assertEqual(find_outermost_mode([x, y]), y)
z = A()
with y.restore():
with z:
pass
self.assertEqual(find_outermost_mode([z, x]), z)
i = A()
with self.assertRaisesRegex(RuntimeError, "doesn't have ancestors set so the ordering with other modes"):
find_outermost_mode([i, x, y, z])
k = A()
with k:
pass
with self.assertRaisesRegex(RuntimeError, "don't come from the same scope"):
find_outermost_mode([k, x, y, z])
def test_all_same_mode(self):
class A(TorchFunctionMode):
pass
x = A()
y = A()
self.assertTrue(all_same_mode([x, x, x]))
self.assertFalse(all_same_mode([x, None]))
self.assertFalse(all_same_mode([x, y]))
def test_all_same_mode_scope(self):
class A(TorchFunctionMode):
pass
x = A()
y = A()
z = A()
with x:
with y:
pass
with x.restore():
with z:
pass
i = A()
self.assertTrue(all_same_mode_scope([x, y], y))
self.assertTrue(all_same_mode_scope([x, z], z))
self.assertFalse(all_same_mode_scope([x, y, z], y))
self.assertFalse(all_same_mode_scope([x, y, z], z))
self.assertFalse(all_same_mode_scope([x, y, i], y))
no_ancestor = A()
self.assertFalse(all_same_mode_scope([x, y, z], no_ancestor))
def test_reentrant_mode_idiom(self):
log = []
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
log.append(func)
if func is torch.sub:
with torch.overrides.enable_torch_function_mode(self, replace=self.inner):
input, other = args
assert not kwargs
return torch.add(input, other, alpha=-1)
return func(*args, **kwargs)
x = torch.randn(1)
y = torch.randn(1)
with A():
torch.sub(x, y)
# add hits the torch function again!
self.assertEqual(log, [torch.sub, torch.add])
def test_nn_parse_to(self):
# This failed because the parser thinks the function is called to()
# but it's actually called _parse_to()
called = False
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called = True
return func(*args, **kwargs)
with A():
torch._C._nn._parse_to('cpu')
self.assertTrue(called)
def test_distributions_bernoulli(self):
# This failed because improper use of has_torch_function when
# is_tensor_like should have been used instead, inside the
# broadcasting logic called by distributions (Bernoulli doesn't
# matter per se)
called = False
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called = True
return func(*args, **kwargs)
with A():
torch.distributions.Bernoulli(0.3)
self.assertTrue(called)
def test_mode_notimplemented_loop(self):
# Default tensor subclass implementation disables torch function;
# when we redispatch to mode we must not treat the objects as
# eligible
called = 0
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called += 1
# The first time we call, the mode sees an active type that
# it doesn't know how to deal with. The second time, we're
# instructed to treat it "as if it were a tensor", and so
# we keep going. I'm not entirely clear if the subclasses
# disappearing from types is the correct way to do it.
if any(t is not torch.Tensor for t in types):
return NotImplemented
else:
return func(*args, **kwargs)
class B(torch.Tensor):
pass
b = B()
with A():
r = torch.neg(b)
self.assertIs(type(r), B)
self.assertEqual(called, 2)
called = 0
with A():
r = bar(b)
self.assertIs(type(r), B)
self.assertEqual(called, 2)
def test_disable_subclass_not_mode(self):
called = False
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called = True
return func(*args, **kwargs)
class B(torch.Tensor):
pass
x = B(torch.randn(5))
with A():
with torch._C.DisableTorchFunction():
self.assertNotIsInstance(torch.sum(x), B)
self.assertTrue(called)
def test_disable_enable_subclass(self):
called = False
class A(torch.Tensor):
pass
x = A(torch.randn(5))
with torch._C.DisableTorchFunction():
g = torch._C._EnableTorchFunction()
try:
self.assertIsInstance(torch.sum(x), A)
finally:
del g
def test_subclass_hash(self):
class DiagTensor(torch.Tensor):
def __init__(self, diag):
self._diag = diag
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
def get_full_matrices(t):
if isinstance(t, DiagTensor):
return torch.diag_embed(t._diag)
else:
return t
return func(*tree_map(get_full_matrices, args), **tree_map(get_full_matrices, kwargs))
d = torch.rand(2)
a = DiagTensor(d)
self.assertEqual((a + 1), torch.diag_embed(d) + 1)
# If the hash function was returning the same value, this would
# fail inside `Tensor.__eq__`.
# If __hash__ was going through torch_function, the implementation above would
# be wrong as it would compute the hash on a temporary Tensor thus not ensuring
# the uniqueness of the hash that we rely on for Tensors.
s = set()
s.add(a)
s.add(DiagTensor(d))
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_overrides.py
|
# Owner(s): ["module: serialization"]
import torch
import unittest
import io
import tempfile
import os
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
# These tests were all copied from `test/test_torch.py` at some point, so see
# the actual blame, see this revision
# https://github.com/pytorch/pytorch/blame/9a2691f2fc948b9792686085b493c61793c2de30/test/test_torch.py
if TEST_DILL:
import dill
HAS_DILL_AT_LEAST_0_3_1 = check_module_version_greater_or_equal(dill, (0, 3, 1))
else:
HAS_DILL_AT_LEAST_0_3_1 = False
can_retrieve_source = True
with warnings.catch_warnings(record=True) as warns:
with tempfile.NamedTemporaryFile() as checkpoint:
x = torch.save(torch.nn.Module(), checkpoint)
for warn in warns:
if "Couldn't retrieve source code" in warn.message.args[0]:
can_retrieve_source = False
break
class FilelikeMock(object):
def __init__(self, data, has_fileno=True, has_readinto=False):
if has_readinto:
self.readinto = self.readinto_opt
if has_fileno:
# Python 2's StringIO.StringIO has no fileno attribute.
# This is used to test that.
self.fileno = self.fileno_opt
self.calls = set()
self.bytesio = io.BytesIO(data)
def trace(fn, name):
def result(*args, **kwargs):
self.calls.add(name)
return fn(*args, **kwargs)
return result
for attr in ['read', 'readline', 'seek', 'tell', 'write', 'flush']:
traced_fn = trace(getattr(self.bytesio, attr), attr)
setattr(self, attr, traced_fn)
def fileno_opt(self):
raise io.UnsupportedOperation('Not a real file')
def readinto_opt(self, view):
self.calls.add('readinto')
return self.bytesio.readinto(view)
def was_called(self, name):
return name in self.calls
class SerializationMixin(object):
def _test_serialization_data(self):
a = [torch.randn(5, 5).float() for i in range(2)]
b = [a[i % 2] for i in range(4)] # 0-3
b += [a[0].storage()] # 4
b += [a[0].reshape(-1)[1:4].storage()] # 5
b += [torch.arange(1, 11).int()] # 6
t1 = torch.FloatTensor().set_(a[0].reshape(-1)[1:4].clone().storage(), 0, (3,), (1,))
t2 = torch.FloatTensor().set_(a[0].reshape(-1)[1:4].clone().storage(), 0, (3,), (1,))
b += [(t1.storage(), t1.storage(), t2.storage())] # 7
b += [a[0].reshape(-1)[0:2].storage()] # 8
return b
def _test_serialization_assert(self, b, c):
self.assertEqual(b, c, atol=0, rtol=0)
self.assertTrue(isinstance(c[0], torch.FloatTensor))
self.assertTrue(isinstance(c[1], torch.FloatTensor))
self.assertTrue(isinstance(c[2], torch.FloatTensor))
self.assertTrue(isinstance(c[3], torch.FloatTensor))
self.assertTrue(isinstance(c[4], torch.storage.TypedStorage))
self.assertEqual(c[4].dtype, torch.float)
c[0].fill_(10)
self.assertEqual(c[0], c[2], atol=0, rtol=0)
self.assertEqual(c[4], torch.FloatStorage(25).fill_(10), atol=0, rtol=0)
c[1].fill_(20)
self.assertEqual(c[1], c[3], atol=0, rtol=0)
# I have to do it in this roundabout fashion, because there's no
# way to slice storages
for i in range(4):
self.assertEqual(c[4][i + 1], c[5][i])
# check that serializing the same storage view object unpickles
# it as one object not two (and vice versa)
views = c[7]
self.assertEqual(views[0]._cdata, views[1]._cdata)
self.assertEqual(views[0], views[2])
self.assertNotEqual(views[0]._cdata, views[2]._cdata)
rootview = c[8]
self.assertEqual(rootview.data_ptr(), c[0].data_ptr())
def test_serialization_zipfile_utils(self):
data = {
'a': b'12039810948234589',
'b': b'1239081209484958',
'c/d': b'94589480984058'
}
def test(name_or_buffer):
with torch.serialization._open_zipfile_writer(name_or_buffer) as zip_file:
for key in data:
zip_file.write_record(key, data[key], len(data[key]))
if hasattr(name_or_buffer, 'seek'):
name_or_buffer.seek(0)
with torch.serialization._open_zipfile_reader(name_or_buffer) as zip_file:
for key in data:
actual = zip_file.get_record(key)
expected = data[key]
self.assertEqual(expected, actual)
with tempfile.NamedTemporaryFile() as f:
test(f)
with TemporaryFileName() as fname:
test(fname)
test(io.BytesIO())
def test_serialization(self):
# Test serialization with a real file
b = self._test_serialization_data()
with tempfile.NamedTemporaryFile() as f:
torch.save(b, f)
f.seek(0)
c = torch.load(f)
self._test_serialization_assert(b, c)
with TemporaryFileName() as fname:
torch.save(b, fname)
c = torch.load(fname)
self._test_serialization_assert(b, c)
# test non-ascii encoding of bytes arrays/strings
# The following bytes are produced by serializing
# [b'\xc5\xbc\xc4\x85\xc4\x85\xc3\xb3\xc5\xbc\xc4\x85\xc5\xbc', torch.zeros(1, dtype=torch.float), 2]
# in Python 2.7.12 and PyTorch 0.4.1, where the first element contains
# bytes of some utf-8 characters (i.e., `utf8_str.encode('utf-8')`).
serialized = (
b'\x80\x02\x8a\nl\xfc\x9cF\xf9 j\xa8P\x19.\x80\x02M\xe9\x03.'
b'\x80\x02}q\x01(U\x10protocol_versionq\x02M\xe9\x03U\n'
b'type_sizesq\x03}q\x04(U\x03intq\x05K\x04U\x05shortq\x06K\x02U'
b'\x04longq\x07K\x04uU\rlittle_endianq\x08\x88u.\x80\x02]q'
b'\x01(U\x0e\xc5\xbc\xc4\x85\xc4\x85\xc3\xb3\xc5\xbc\xc4\x85'
b'\xc5\xbcq\x02ctorch._utils\n_rebuild_tensor_v2\nq\x03((U'
b'\x07storageq\x04ctorch\nFloatStorage\nq\x05U\x0845640624q'
b'\x06U\x03cpuq\x07\x8a\x01\x01NtQK\x00K\x01\x85K\x01\x85'
b'\x89NtRq\x08K\x02e.\x80\x02]q\x01U\x0845640624q\x02a.\x01\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
)
buf = io.BytesIO(serialized)
utf8_bytes = b'\xc5\xbc\xc4\x85\xc4\x85\xc3\xb3\xc5\xbc\xc4\x85\xc5\xbc'
utf8_str = utf8_bytes.decode('utf-8')
loaded_utf8 = torch.load(buf, encoding='utf-8')
self.assertEqual(loaded_utf8, [utf8_str, torch.zeros(1, dtype=torch.float), 2])
buf.seek(0)
loaded_bytes = torch.load(buf, encoding='bytes')
self.assertEqual(loaded_bytes, [utf8_bytes, torch.zeros(1, dtype=torch.float), 2])
def test_serialization_filelike(self):
# Test serialization (load and save) with a filelike object
b = self._test_serialization_data()
with BytesIOContext() as f:
torch.save(b, f)
f.seek(0)
c = torch.load(f)
self._test_serialization_assert(b, c)
def test_serialization_fake_zip(self):
data = [
ord('P'),
ord('K'),
5,
6
]
for i in range(0, 100):
data.append(0)
t = torch.tensor(data, dtype=torch.uint8)
with tempfile.NamedTemporaryFile() as f:
torch.save(t, f)
# If this check is False for all Python versions (i.e. the fix
# has been backported), this test and torch.serialization._is_zipfile
# can be deleted
self.assertTrue(zipfile.is_zipfile(f))
self.assertFalse(torch.serialization._is_zipfile(f))
f.seek(0)
self.assertEqual(torch.load(f), t)
def test_serialization_gzip(self):
# Test serialization with gzip file
b = self._test_serialization_data()
f1 = tempfile.NamedTemporaryFile(delete=False)
f2 = tempfile.NamedTemporaryFile(delete=False)
torch.save(b, f1)
with open(f1.name, 'rb') as f_in, gzip.open(f2.name, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
with gzip.open(f2.name, 'rb') as f:
c = torch.load(f)
self._test_serialization_assert(b, c)
@unittest.skipIf(
not TEST_DILL or HAS_DILL_AT_LEAST_0_3_1,
'"dill" not found or is correct version'
)
def test_serialization_dill_version_not_supported(self):
x = torch.randn(5, 5)
with tempfile.NamedTemporaryFile() as f:
with self.assertRaisesRegex(ValueError, 'supports dill >='):
torch.save(x, f, pickle_module=dill)
f.seek(0)
with self.assertRaisesRegex(ValueError, 'supports dill >='):
x2 = torch.load(f, pickle_module=dill, encoding='utf-8')
@unittest.skipIf(
not TEST_DILL or not HAS_DILL_AT_LEAST_0_3_1,
'"dill" not found or not correct version'
)
def test_serialization_dill(self):
x = torch.randn(5, 5)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f, pickle_module=dill)
f.seek(0)
x2 = torch.load(f, pickle_module=dill, encoding='utf-8')
self.assertIsInstance(x2, type(x))
self.assertEqual(x, x2)
f.seek(0)
x3 = torch.load(f, pickle_module=dill)
self.assertIsInstance(x3, type(x))
self.assertEqual(x, x3)
def test_serialization_offset_gzip(self):
a = torch.randn(5, 5)
i = 41
f1 = tempfile.NamedTemporaryFile(delete=False)
f2 = tempfile.NamedTemporaryFile(delete=False)
with open(f1.name, 'wb') as f:
pickle.dump(i, f)
torch.save(a, f)
with open(f1.name, 'rb') as f_in, gzip.open(f2.name, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
with gzip.open(f2.name, 'rb') as f:
j = pickle.load(f)
b = torch.load(f)
self.assertTrue(torch.equal(a, b))
self.assertEqual(i, j)
def test_serialization_sparse(self):
def _test_serialization(conversion):
x = torch.zeros(3, 3)
x[1][1] = 1
x = conversion(x)
with tempfile.NamedTemporaryFile() as f:
torch.save({"tensor": x}, f)
f.seek(0)
y = torch.load(f)
self.assertEqual(x, y["tensor"])
_test_serialization(lambda x: x.to_sparse())
_test_serialization(lambda x: x.to_sparse_csr())
def test_serialization_sparse_invalid(self):
x = torch.zeros(3, 3)
x[1][1] = 1
x = x.to_sparse()
class TensorSerializationSpoofer(object):
def __init__(self, tensor):
self.tensor = tensor
def __reduce_ex__(self, proto):
invalid_indices = self.tensor._indices().clone()
invalid_indices[0][0] = 3
return (
torch._utils._rebuild_sparse_tensor,
(
self.tensor.layout,
(
invalid_indices,
self.tensor._values(),
self.tensor.size())))
with tempfile.NamedTemporaryFile() as f:
torch.save({"spoofed": TensorSerializationSpoofer(x)}, f)
f.seek(0)
with self.assertRaisesRegex(
RuntimeError,
"size is inconsistent with indices"):
y = torch.load(f)
def test_serialization_sparse_csr_invalid(self):
x = torch.zeros(3, 3)
x[1][1] = 1
x = x.to_sparse_csr()
class TensorSerializationSpoofer(object):
def __init__(self, tensor):
self.tensor = tensor
def __reduce_ex__(self, proto):
invalid_crow_indices = self.tensor.crow_indices().clone()
invalid_crow_indices[0] = 3
return (
torch._utils._rebuild_sparse_tensor,
(
self.tensor.layout,
(
invalid_crow_indices,
self.tensor.col_indices(),
self.tensor.values(),
self.tensor.size())))
with tempfile.NamedTemporaryFile() as f:
torch.save({"spoofed": TensorSerializationSpoofer(x)}, f)
f.seek(0)
with self.assertRaisesRegex(
RuntimeError,
"rebuilding sparse tensor for layout torch.sparse_csr"):
y = torch.load(f)
def test_serialize_device(self):
device_str = ['cpu', 'cpu:0', 'cuda', 'cuda:0']
device_obj = [torch.device(d) for d in device_str]
for device in device_obj:
device_copied = copy.deepcopy(device)
self.assertEqual(device, device_copied)
def test_serialization_backwards_compat(self):
a = [torch.arange(1 + i, 26 + i).view(5, 5).float() for i in range(2)]
b = [a[i % 2] for i in range(4)]
b += [a[0].storage()]
b += [a[0].reshape(-1)[1:4].clone().storage()]
path = download_file('https://download.pytorch.org/test_data/legacy_serialized.pt')
c = torch.load(path)
self.assertEqual(b, c, atol=0, rtol=0)
self.assertTrue(isinstance(c[0], torch.FloatTensor))
self.assertTrue(isinstance(c[1], torch.FloatTensor))
self.assertTrue(isinstance(c[2], torch.FloatTensor))
self.assertTrue(isinstance(c[3], torch.FloatTensor))
self.assertTrue(isinstance(c[4], torch.storage.TypedStorage))
self.assertEqual(c[4].dtype, torch.float32)
c[0].fill_(10)
self.assertEqual(c[0], c[2], atol=0, rtol=0)
self.assertEqual(c[4], torch.FloatStorage(25).fill_(10), atol=0, rtol=0)
c[1].fill_(20)
self.assertEqual(c[1], c[3], atol=0, rtol=0)
# test some old tensor serialization mechanism
class OldTensorBase(object):
def __init__(self, new_tensor):
self.new_tensor = new_tensor
def __getstate__(self):
return (self.new_tensor.storage(),
self.new_tensor.storage_offset(),
tuple(self.new_tensor.size()),
self.new_tensor.stride())
class OldTensorV1(OldTensorBase):
def __reduce__(self):
return (torch.Tensor, (), self.__getstate__())
class OldTensorV2(OldTensorBase):
def __reduce__(self):
return (_rebuild_tensor, self.__getstate__())
x = torch.randn(30).as_strided([2, 3], [9, 3], 2)
for old_cls in [OldTensorV1, OldTensorV2]:
with tempfile.NamedTemporaryFile() as f:
old_x = old_cls(x)
torch.save(old_x, f)
f.seek(0)
load_x = torch.load(f)
self.assertEqual(x.storage(), load_x.storage())
self.assertEqual(x.storage_offset(), load_x.storage_offset())
self.assertEqual(x.size(), load_x.size())
self.assertEqual(x.stride(), load_x.stride())
def test_serialization_save_warnings(self):
with warnings.catch_warnings(record=True) as warns:
with tempfile.NamedTemporaryFile() as checkpoint:
x = torch.save(torch.nn.Linear(2, 3), checkpoint)
self.assertEqual(len(warns), 0)
def test_serialization_map_location(self):
test_file_path = download_file('https://download.pytorch.org/test_data/gpu_tensors.pt')
def map_location(storage, loc):
return storage
def load_bytes():
with open(test_file_path, 'rb') as f:
return io.BytesIO(f.read())
fileobject_lambdas = [lambda: test_file_path, load_bytes]
cpu_map_locations = [
map_location,
{'cuda:0': 'cpu'},
'cpu',
torch.device('cpu'),
]
gpu_0_map_locations = [
{'cuda:0': 'cuda:0'},
'cuda',
'cuda:0',
torch.device('cuda'),
torch.device('cuda', 0)
]
gpu_last_map_locations = [
'cuda:{}'.format(torch.cuda.device_count() - 1),
]
def check_map_locations(map_locations, tensor_class, intended_device):
for fileobject_lambda in fileobject_lambdas:
for map_location in map_locations:
tensor = torch.load(fileobject_lambda(), map_location=map_location)
self.assertEqual(tensor.device, intended_device)
self.assertIsInstance(tensor, tensor_class)
self.assertEqual(tensor, tensor_class([[1.0, 2.0], [3.0, 4.0]]))
check_map_locations(cpu_map_locations, torch.FloatTensor, torch.device('cpu'))
if torch.cuda.is_available():
check_map_locations(gpu_0_map_locations, torch.cuda.FloatTensor, torch.device('cuda', 0))
check_map_locations(
gpu_last_map_locations,
torch.cuda.FloatTensor,
torch.device('cuda', torch.cuda.device_count() - 1)
)
@unittest.skipIf(torch.cuda.is_available(), "Testing torch.load on CPU-only machine")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:0' restore location
# The following was generated by saving a torch.randn(2, device='cuda') tensor.
serialized = (b'\x80\x02\x8a\nl\xfc\x9cF\xf9 j\xa8P\x19.\x80\x02M\xe9'
b'\x03.\x80\x02}q\x00(X\x10\x00\x00\x00protocol_versionq'
b'\x01M\xe9\x03X\r\x00\x00\x00little_endianq\x02\x88X\n'
b'\x00\x00\x00type_sizesq\x03}q\x04(X\x05\x00\x00\x00shortq'
b'\x05K\x02X\x03\x00\x00\x00intq\x06K\x04X\x04\x00\x00\x00'
b'longq\x07K\x04uu.\x80\x02ctorch._utils\n_rebuild_tensor_v2'
b'\nq\x00((X\x07\x00\x00\x00storageq\x01ctorch\nFloatStorage'
b'\nq\x02X\x0e\x00\x00\x0094919395964320q\x03X\x06\x00\x00'
b'\x00cuda:0q\x04K\x02Ntq\x05QK\x00K\x02\x85q\x06K\x01\x85q'
b'\x07\x89Ntq\x08Rq\t.\x80\x02]q\x00X\x0e\x00\x00\x00'
b'94919395964320q\x01a.\x02\x00\x00\x00\x00\x00\x00\x00\xbb'
b'\x1f\x82\xbe\xea\x81\xd1>')
buf = io.BytesIO(serialized)
error_msg = r'Attempting to deserialize object on a CUDA device'
with self.assertRaisesRegex(RuntimeError, error_msg):
_ = torch.load(buf)
@unittest.skipIf((3, 8, 0) <= sys.version_info < (3, 8, 2), "See https://bugs.python.org/issue39681")
def test_serialization_filelike_api_requirements(self):
filemock = FilelikeMock(b'', has_readinto=False)
tensor = torch.randn(3, 5)
torch.save(tensor, filemock)
expected_superset = {'write', 'flush'}
self.assertTrue(expected_superset.issuperset(filemock.calls))
# Reset between save and load
filemock.seek(0)
filemock.calls.clear()
_ = torch.load(filemock)
expected_superset = {'read', 'readline', 'seek', 'tell'}
self.assertTrue(expected_superset.issuperset(filemock.calls))
def _test_serialization_filelike(self, tensor, mock, desc):
f = mock(b'')
torch.save(tensor, f)
f.seek(0)
data = mock(f.read())
msg = 'filelike serialization with {}'
b = torch.load(data)
self.assertTrue(torch.equal(tensor, b), msg.format(desc))
@unittest.skipIf((3, 8, 0) <= sys.version_info < (3, 8, 2), "See https://bugs.python.org/issue39681")
def test_serialization_filelike_missing_attrs(self):
# Test edge cases where filelike objects are missing attributes.
# The Python io docs suggests that these attributes should really exist
# and throw io.UnsupportedOperation, but that isn't always the case.
mocks = [
('no readinto', lambda x: FilelikeMock(x)),
('has readinto', lambda x: FilelikeMock(x, has_readinto=True)),
('no fileno', lambda x: FilelikeMock(x, has_fileno=False)),
]
to_serialize = torch.randn(3, 10)
for desc, mock in mocks:
self._test_serialization_filelike(to_serialize, mock, desc)
@unittest.skipIf((3, 8, 0) <= sys.version_info < (3, 8, 2), "See https://bugs.python.org/issue39681")
def test_serialization_filelike_stress(self):
a = torch.randn(11 * (2 ** 9) + 1, 5 * (2 ** 9))
# This one should call python read multiple times
self._test_serialization_filelike(a, lambda x: FilelikeMock(x, has_readinto=False),
'read() stress test')
self._test_serialization_filelike(a, lambda x: FilelikeMock(x, has_readinto=True),
'readinto() stress test')
def test_serialization_filelike_uses_readinto(self):
# For maximum effiency, when reading a file-like object,
# ensure the C API calls readinto instead of read.
a = torch.randn(5, 4)
f = io.BytesIO()
torch.save(a, f)
f.seek(0)
data = FilelikeMock(f.read(), has_readinto=True)
b = torch.load(data)
self.assertTrue(data.was_called('readinto'))
def test_serialization_storage_slice(self):
# Generated using:
#
# t = torch.zeros(2);
# s1 = t.storage()[:1]
# s2 = t.storage()[1:]
# torch.save((s1, s2), 'foo.ser')
#
# with PyTorch 0.3.1
serialized = (b'\x80\x02\x8a\nl\xfc\x9cF\xf9 j\xa8P\x19.\x80\x02M\xe9\x03'
b'.\x80\x02}q\x00(X\n\x00\x00\x00type_sizesq\x01}q\x02(X\x03'
b'\x00\x00\x00intq\x03K\x04X\x05\x00\x00\x00shortq\x04K\x02X'
b'\x04\x00\x00\x00longq\x05K\x04uX\x10\x00\x00\x00protocol_versionq'
b'\x06M\xe9\x03X\r\x00\x00\x00little_endianq\x07\x88u.\x80\x02'
b'(X\x07\x00\x00\x00storageq\x00ctorch\nFloatStorage\nq\x01X\x0e'
b'\x00\x00\x0094279043900432q\x02X\x03\x00\x00\x00cpuq\x03K\x02'
b'X\x0e\x00\x00\x0094279029750368q\x04K\x00K\x01\x87q\x05tq\x06'
b'Q(h\x00h\x01X\x0e\x00\x00\x0094279043900432q\x07h\x03K\x02X'
b'\x0e\x00\x00\x0094279029750432q\x08K\x01K\x01\x87q\ttq\nQ'
b'\x86q\x0b.\x80\x02]q\x00X\x0e\x00\x00\x0094279043900432q'
b'\x01a.\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00')
buf = io.BytesIO(serialized)
(s1, s2) = torch.load(buf)
self.assertEqual(s1[0], 0)
self.assertEqual(s2[0], 0)
self.assertEqual(s1.data_ptr() + 4, s2.data_ptr())
def test_load_unicode_error_msg(self):
# This Pickle contains a Python 2 module with Unicode data and the
# loading should fail if the user explicitly specifies ascii encoding!
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
self.assertRaises(UnicodeDecodeError, lambda: torch.load(path, encoding='ascii'))
def test_load_python2_unicode_module(self):
# This Pickle contains some Unicode data!
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
with warnings.catch_warnings(record=True) as w:
self.assertIsNotNone(torch.load(path))
def test_load_error_msg(self):
expected_err_msg = (".*You can only torch.load from a file that is seekable. " +
"Please pre-load the data into a buffer like io.BytesIO and " +
"try to load from it instead.")
resource = FilelikeMock(data=b"data")
delattr(resource, "tell")
delattr(resource, "seek")
with self.assertRaisesRegex(AttributeError, expected_err_msg):
torch.load(resource)
def test_save_different_dtype_unallocated(self):
devices = ['cpu']
if torch.cuda.is_available():
devices.append('cuda')
def save_load_check(a, b):
with io.BytesIO() as f:
torch.save([a, b], f)
f.seek(0)
a_loaded, b_loaded = torch.load(f)
self.assertEqual(a, a_loaded)
self.assertEqual(b, b_loaded)
for device, dtype in product(devices, all_types_and_complex_and(torch.half,
torch.bfloat16, torch.bool)):
a = torch.tensor([], dtype=dtype, device=device)
for other_dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool):
s = torch.TypedStorage(
wrap_storage=a.storage().untyped(),
dtype=other_dtype)
save_load_check(a, s)
save_load_check(a.storage(), s)
b = torch.tensor([], dtype=other_dtype, device=device)
save_load_check(a, b)
def test_save_different_dtype_error(self):
error_msg = r"Cannot save multiple tensors or storages that view the same data as different types"
devices = ['cpu']
if torch.cuda.is_available():
devices.append('cuda')
for device in devices:
a = torch.randn(10, dtype=torch.complex128, device=device)
f = io.BytesIO()
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.save([a, a.imag], f)
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.save([a.storage(), a.imag], f)
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.save([a, a.imag.storage()], f)
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.save([a.storage(), a.imag.storage()], f)
a = torch.randn(10, device=device)
s_bytes = torch.TypedStorage(
wrap_storage=a.storage().untyped(),
dtype=torch.uint8)
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.save([a, s_bytes], f)
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.save([a.storage(), s_bytes], f)
class serialization_method(object):
def __init__(self, use_zip):
self.use_zip = use_zip
self.torch_save = torch.save
def __enter__(self, *args, **kwargs):
def wrapper(*args, **kwargs):
if '_use_new_zipfile_serialization' in kwargs:
raise RuntimeError("Cannot set method manually")
kwargs['_use_new_zipfile_serialization'] = self.use_zip
return self.torch_save(*args, **kwargs)
torch.save = wrapper
def __exit__(self, *args, **kwargs):
torch.save = self.torch_save
class TestBothSerialization(TestCase):
@unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows")
def test_serialization_new_format_old_format_compat(self, device):
x = [torch.ones(200, 200, device=device) for i in range(30)]
def test(f_new, f_old):
torch.save(x, f_new, _use_new_zipfile_serialization=True)
f_new.seek(0)
x_new_load = torch.load(f_new)
self.assertEqual(x, x_new_load)
torch.save(x, f_old, _use_new_zipfile_serialization=False)
f_old.seek(0)
x_old_load = torch.load(f_old)
self.assertEqual(x_old_load, x_new_load)
with tempfile.NamedTemporaryFile() as f_new, tempfile.NamedTemporaryFile() as f_old:
test(f_new, f_old)
class TestOldSerialization(TestCase, SerializationMixin):
# unique_key is necessary because on Python 2.7, if a warning passed to
# the warning module is the same, it is not raised again.
def _test_serialization_container(self, unique_key, filecontext_lambda):
tmpmodule_name = 'tmpmodule{}'.format(unique_key)
def import_module(name, filename):
import importlib.util
spec = importlib.util.spec_from_file_location(name, filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[module.__name__] = module
return module
with filecontext_lambda() as checkpoint:
fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing',
'_internal', 'data', 'network1.py')
module = import_module(tmpmodule_name, fname)
torch.save(module.Net(), checkpoint)
# First check that the checkpoint can be loaded without warnings
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:
loaded = torch.load(checkpoint)
self.assertTrue(isinstance(loaded, module.Net))
if can_retrieve_source:
self.assertEqual(len(w), 0)
# Replace the module with different source
fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing',
'_internal', 'data', 'network2.py')
module = import_module(tmpmodule_name, fname)
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:
loaded = torch.load(checkpoint)
self.assertTrue(isinstance(loaded, module.Net))
if can_retrieve_source:
self.assertEqual(len(w), 1)
self.assertTrue(w[0].category, 'SourceChangeWarning')
def test_serialization_container(self):
self._test_serialization_container('file', tempfile.NamedTemporaryFile)
def test_serialization_container_filelike(self):
self._test_serialization_container('filelike', BytesIOContext)
def test_serialization_offset(self):
a = torch.randn(5, 5)
b = torch.randn(1024, 1024, 512, dtype=torch.float32)
m = torch.nn.Conv2d(1, 1, (1, 3))
i, j = 41, 43
with tempfile.NamedTemporaryFile() as f:
pickle.dump(i, f)
torch.save(a, f)
pickle.dump(j, f)
torch.save(b, f)
torch.save(m, f)
self.assertTrue(f.tell() > 2 * 1024 * 1024 * 1024)
f.seek(0)
i_loaded = pickle.load(f)
a_loaded = torch.load(f)
j_loaded = pickle.load(f)
b_loaded = torch.load(f)
m_loaded = torch.load(f)
self.assertTrue(torch.equal(a, a_loaded))
self.assertTrue(torch.equal(b, b_loaded))
self.assertTrue(m.kernel_size == m_loaded.kernel_size)
self.assertEqual(i, i_loaded)
self.assertEqual(j, j_loaded)
def test_serialization_offset_filelike(self):
a = torch.randn(5, 5)
b = torch.randn(1024, 1024, 512, dtype=torch.float32)
i, j = 41, 43
with BytesIOContext() as f:
pickle.dump(i, f)
torch.save(a, f)
pickle.dump(j, f)
torch.save(b, f)
self.assertTrue(f.tell() > 2 * 1024 * 1024 * 1024)
f.seek(0)
i_loaded = pickle.load(f)
a_loaded = torch.load(f)
j_loaded = pickle.load(f)
b_loaded = torch.load(f)
self.assertTrue(torch.equal(a, a_loaded))
self.assertTrue(torch.equal(b, b_loaded))
self.assertEqual(i, i_loaded)
self.assertEqual(j, j_loaded)
def run(self, *args, **kwargs):
with serialization_method(use_zip=False):
return super(TestOldSerialization, self).run(*args, **kwargs)
class TestSerialization(TestCase, SerializationMixin):
def test_serialization_zipfile(self):
data = self._test_serialization_data()
def test(name_or_buffer):
torch.save(data, name_or_buffer)
if hasattr(name_or_buffer, 'seek'):
name_or_buffer.seek(0)
result = torch.load(name_or_buffer)
self.assertEqual(result, data)
with tempfile.NamedTemporaryFile() as f:
test(f)
with TemporaryFileName() as fname:
test(fname)
test(io.BytesIO())
def test_serialization_zipfile_actually_jit(self):
with tempfile.NamedTemporaryFile() as f:
torch.jit.save(torch.jit.script(torch.nn.Linear(3, 4)), f)
f.seek(0)
torch.load(f)
# Ensure large zip64 serialization works properly
def test_serialization_2gb_file(self):
big_model = torch.nn.Conv2d(20000, 3200, kernel_size=3)
with BytesIOContext() as f:
torch.save(big_model, f)
f.seek(0)
state = torch.load(f)
def test_pathlike_serialization(self):
model = torch.nn.Conv2d(20, 3200, kernel_size=3)
with TemporaryFileName() as fname:
path = pathlib.Path(fname)
torch.save(model, path)
torch.load(path)
def test_meta_serialization(self):
big_model = torch.nn.Conv2d(20000, 320000, kernel_size=3, device='meta')
with BytesIOContext() as f:
torch.save(big_model, f)
f.seek(0)
state = torch.load(f)
self.assertEqual(state.weight.size(), big_model.weight.size())
def run(self, *args, **kwargs):
with serialization_method(use_zip=True):
return super(TestSerialization, self).run(*args, **kwargs)
class TestWrapperSubclass(torch.Tensor):
elem: torch.Tensor
__slots__ = ['elem', 'other']
@staticmethod
def __new__(cls, elem, *args, **kwargs):
# The wrapping tensor (TestSubclass) is just a meta tensor, so it
# doesn't hold any memory (meta tensor is generally the preferred type
# of tensor you want to make a subclass from)...
r = torch.Tensor._make_subclass(cls, elem.to('meta'), elem.requires_grad)
# ...the real tensor is held as an element on the tensor.
r.elem = elem
return r
def clone(self):
return type(self)(self.elem.clone())
class TestGetStateSubclass(torch.Tensor):
elem: torch.Tensor
__slots__ = ['elem']
@staticmethod
def __new__(cls, elem, *args, **kwargs):
# The wrapping tensor (TestSubclass) is just a meta tensor, so it
# doesn't hold any memory (meta tensor is generally the preferred type
# of tensor you want to make a subclass from)...
r = torch.Tensor._make_subclass(cls, elem.to('meta'), elem.requires_grad)
# ...the real tensor is held as an element on the tensor.
r.elem = elem
return r
def __getstate__(self):
return ("foo", getattr(self, "elem", None), self.__dict__)
def __setstate__(self, state):
marker, self.elem, self.__dict__ = state
if not marker == "foo":
raise RuntimeError("Invalid state for TestGetStateSubclass")
self.reloaded = True
class TestEmptySubclass(torch.Tensor):
...
class TestSubclassSerialization(TestCase):
def test_tensor_subclass_wrapper_serialization(self):
wrapped_tensor = torch.rand(2)
my_tensor = TestWrapperSubclass(wrapped_tensor)
foo_val = "bar"
my_tensor.foo = foo_val
self.assertEqual(my_tensor.foo, foo_val)
with BytesIOContext() as f:
torch.save(my_tensor, f)
f.seek(0)
new_tensor = torch.load(f)
self.assertIsInstance(new_tensor, TestWrapperSubclass)
self.assertEqual(new_tensor.elem, my_tensor.elem)
self.assertEqual(new_tensor.foo, foo_val)
def test_tensor_subclass_getstate_overwrite(self):
wrapped_tensor = torch.rand(2)
my_tensor = TestGetStateSubclass(wrapped_tensor)
foo_val = "bar"
my_tensor.foo = foo_val
self.assertEqual(my_tensor.foo, foo_val)
with BytesIOContext() as f:
torch.save(my_tensor, f)
f.seek(0)
new_tensor = torch.load(f)
self.assertIsInstance(new_tensor, TestGetStateSubclass)
self.assertEqual(new_tensor.elem, my_tensor.elem)
self.assertEqual(new_tensor.foo, foo_val)
self.assertTrue(new_tensor.reloaded)
def test_tensor_subclass_deepcopy(self):
wrapped_tensor = torch.rand(2)
my_tensor = TestWrapperSubclass(wrapped_tensor)
foo_val = "bar"
my_tensor.foo = foo_val
self.assertEqual(my_tensor.foo, foo_val)
new_tensor = deepcopy(my_tensor)
self.assertIsInstance(new_tensor, TestWrapperSubclass)
self.assertEqual(new_tensor.elem, my_tensor.elem)
self.assertEqual(new_tensor.foo, foo_val)
@parametrize('requires_grad', (True, False))
def test_cloned_deepcopy(self, requires_grad):
my_tensor = torch.rand(2, requires_grad=requires_grad, device='meta')
new_tensor = deepcopy(my_tensor)
self.assertEqual(new_tensor.requires_grad, my_tensor.requires_grad)
def test_empty_class_serialization(self):
tensor = TestEmptySubclass([1.])
# Ensures it runs fine
tensor2 = copy.copy(tensor)
with BytesIOContext() as f:
torch.save(tensor, f)
f.seek(0)
tensor2 = torch.load(f)
tensor = TestEmptySubclass()
# Ensures it runs fine
# Note that tensor.data_ptr() == 0 here
tensor2 = copy.copy(tensor)
with BytesIOContext() as f:
torch.save(tensor, f)
f.seek(0)
tensor2 = torch.load(f)
instantiate_device_type_tests(TestBothSerialization, globals())
instantiate_parametrized_tests(TestSubclassSerialization)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_serialization.py
|
# Owner(s): ["oncall: distributed"]
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.optim import SGD, Adam, AdamW
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.distributed.optim.utils import functional_optim_map, register_functional_optim
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
torch.manual_seed(0)
self.lin1 = nn.Linear(3, 3, bias=False)
self.lin2 = nn.Linear(3, 3, bias=False)
def forward(self, t1):
return self.lin2(F.relu(self.lin1(t1)))
# dummy class to showcase custom optimizer registration with functional wrapper
class MyDummyFnOptimizer(object):
def __init__(
self,
params: List[Tensor],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
_allow_empty_param_list: bool = False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 < weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
self.defaults = {
"lr": lr,
"eps": eps,
"beta1": betas[0],
"beta2": betas[1],
"weight_decay": weight_decay,
}
if len(params) == 0 and not _allow_empty_param_list:
raise ValueError("optimizer got an empty parameter list")
def step_param(self, param: Tensor, grad: Optional[Tensor]):
# call the custom optimizer step_param implementation
with torch.no_grad():
raise RuntimeError("MyDummyFnOptimizer does not support step_param() as of now")
def step(self, gradients: List[Optional[Tensor]]):
# call the custom optimizer step implementation
with torch.no_grad():
raise RuntimeError("MyDummyFnOptimizer does not support step() as of now")
class TestFunctionalOptimParity(TestCase):
def _validate_parameters(self, params_1, params_2):
for p1, p2 in zip(params_1, params_2):
self.assertEqual(p1, p2)
def _test_functional_optim_parity(self, optim_cls, *args, **kwargs):
module_optim = MyModule()
module_functional = MyModule()
optim_params = module_optim.parameters()
functional_params = module_functional.parameters()
optim = optim_cls(optim_params, *args, **kwargs)
functional_optim_cls = functional_optim_map.get(optim_cls, None)
if not functional_optim_cls:
raise ValueError(f"Functional optimizer not implemented for {optim_cls}")
optim_functional = functional_optim_cls(
[], *args, **kwargs, _allow_empty_param_list=True
)
if not hasattr(optim_functional, "step_param"):
raise ValueError(
f"Functional optimizer class {optim_functional} must implement step_param method."
)
# Initial weights should match
self._validate_parameters(
module_optim.parameters(), module_functional.parameters()
)
# Save old parameters to verify optimizer modifies them.
old_module_optim_params = [
param.clone().detach() for param in module_optim.parameters()
]
old_module_functional_params = [
param.clone().detach() for param in module_functional.parameters()
]
t1 = torch.randn(3, 3)
for _ in range(10):
module_optim.zero_grad()
module_functional.zero_grad()
# Forward + Backward
optim_out = module_optim(t1).sum()
functional_out = module_functional(t1).sum()
optim_out.backward()
functional_out.backward()
# Optimizer step
optim.step()
# Functional optimizer step_param
for param in module_functional.parameters():
grad = param.grad
optim_functional.step_param(param, grad)
# Validate parameters are equal
for optim_param, functional_param in zip(
module_optim.parameters(), module_functional.parameters()
):
self.assertEqual(optim_param, functional_param)
# Validate parameters are modified.
for i, (optim_param, functional_param) in enumerate(
zip(module_optim.parameters(), module_functional.parameters())
):
self.assertNotEqual(old_module_optim_params[i], optim_param)
self.assertNotEqual(old_module_functional_params[i], functional_param)
def _test_functional_optim_registration(self):
fn_map_key = "MyDummyFnOptimizer"
fn_optim = MyDummyFnOptimizer
register_functional_optim(fn_map_key, fn_optim)
functional_optim_cls = functional_optim_map.get(fn_map_key, None)
if not functional_optim_cls:
raise ValueError(f"Functional optimizer not registered for {fn_map_key}")
def test_functional_optim_registration(self):
self._test_functional_optim_registration()
def test_functional_optim_parity_sgd(self):
self._test_functional_optim_parity(SGD, 1e-2, momentum=0.9, weight_decay=0.01)
def test_functional_optim_parity_adam(self):
self._test_functional_optim_parity(Adam, 1e-2, betas=(0.9, 0.999), eps=1e-6)
def test_functional_optim_parity_adam_w(self):
self._test_functional_optim_parity(AdamW, 1e-2, betas=(0.9, 0.999), eps=1e-6)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/test_functional_optim.py
|
# Owner(s): ["module: meta tensors"]
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfCrossRef, skipIfRocm
import torch
import itertools
import numpy as np
from torch.testing._internal.jit_utils import RUN_CUDA
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
FakeTensorConverter,
DynamicOutputShapeException,
)
from torch.testing import FileCheck
from torch.utils._python_dispatch import enable_torch_dispatch_mode
from torch import nn
import unittest
import torch._prims as prims
import contextlib
import copy
class FakeTensorTest(TestCase):
def checkType(self, t, device_str, size):
self.assertTrue(isinstance(t, FakeTensor))
self.assertEqual(t.device.type, device_str)
self.assertEqual(list(t.size()), size)
def test_basic(self):
mode = FakeTensorMode(inner=None)
x = torch.empty(2, 2, device="cpu")
y = torch.empty(4, 2, 2, device="cpu")
with enable_torch_dispatch_mode(mode):
x = mode.from_tensor(x)
y = mode.from_tensor(y)
z = x + y
self.assertEqual(z.shape, (4, 2, 2))
self.assertEqual(z.device, torch.device("cpu"))
self.assertTrue(isinstance(z, FakeTensor))
def test_parameter_instantiation(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
x = torch.rand([4])
y = torch.nn.parameter.Parameter(x)
self.assertTrue(isinstance(y, torch.nn.Parameter))
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_index_cuda_with_cpu(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
x = torch.rand([2048], device='cuda')
out = x[torch.zeros([36], dtype=torch.int64)]
self.checkType(out, "cuda", [36])
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_shape_take_not_device(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
x = torch.empty(1, device="cpu")
y = torch.empty(8, 8, device="cuda")
out = x.resize_as_(y)
self.assertEqual(out.shape, (8, 8))
self.assertEqual(out.device.type, "cpu")
self.assertTrue(isinstance(out, FakeTensor))
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_zero_dim(self):
mode = FakeTensorMode(inner=None)
with enable_torch_dispatch_mode(mode):
x = torch.tensor(0.)
y = torch.rand([4, 4], device="cuda")
out = x + y
self.assertEqual(out.shape, (4, 4))
self.assertEqual(out.device, y.device)
self.assertTrue(isinstance(out, FakeTensor))
def test_nan_to_num(self):
mode = FakeTensorMode(inner=None)
with enable_torch_dispatch_mode(mode):
for dtype in [torch.float16, torch.float32]:
x = torch.rand([4], dtype=dtype)
y = torch.nan_to_num(x, nan=None)
z = torch.nan_to_num(x, 0.0)
self.assertEqual(dtype, y.dtype)
self.assertEqual(dtype, z.dtype)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_throw(self):
mode = FakeTensorMode(inner=None)
x = torch.tensor(0.) # TODO: tensor() errors
with enable_torch_dispatch_mode(mode):
x_conv = mode.from_tensor(x)
y = torch.rand([4, 4], device="cuda")
z = torch.rand([4, 4], device="cpu")
self.assertRaises(Exception, lambda: torch.lerp(x_conv, y, z))
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_type_as(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
x = torch.rand([16, 1], device="cpu")
y = torch.rand([4, 4], device="cuda")
out = x.type_as(y)
self.assertEqual(out.device.type, "cuda")
self.assertTrue(isinstance(out, FakeTensor))
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_setitem(self):
for device in ["cpu", "cuda"]:
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
x = torch.rand([16, 1], device=device)
x[..., 0] = 0
def test_fake_dispatch_keys(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
x = torch.rand([4])
f = FileCheck().check("CPU").check("ADInplaceOrView").check("AutogradCPU").check("AutocastCPU")
f.run(torch._C._dispatch_key_set(x))
with torch.inference_mode():
x = torch.rand([4])
y = x + x
FileCheck().check("CPU").check("AutocastCPU").run(torch._C._dispatch_key_set(y))
FileCheck().check_not("ADInplaceOrView").check_not("Autograd").run(torch._C._dispatch_key_set(y))
def test_constructor(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
x = torch.rand([4, 4], device="cpu")
self.assertTrue(isinstance(x, FakeTensor))
self.assertTrue(x.device.type == "cpu")
def test_mode(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
y = torch.rand([4], device="cpu")
out = y + y
self.assertTrue(isinstance(out, FakeTensor))
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_non_kwarg_device(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
x = torch.rand([16, 1], device="cpu")
y = x.to(torch.device("cpu"))
self.assertIs(x, y)
z = x.to(torch.device("cuda"))
self.assertEqual(z.device.type, "cuda")
def test_fake_mode_error(self):
x = torch.rand([4, 4])
with self.assertRaisesRegex(Exception, "non-Fake Tensor inputs"):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
y = x[0]
def test_fake_grad_copy(self):
x = torch.rand([4, 4], requires_grad=True)
x.grad = torch.rand([4, 4])
mode = FakeTensorMode()
fake_x = mode.from_tensor(x)
prims.utils.compare_tensor_meta(fake_x, x)
prims.utils.compare_tensor_meta(fake_x.grad, x.grad)
self.assertTrue(isinstance(fake_x.grad, FakeTensor))
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_like_constructor(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
x = torch.rand([4, 4])
y = torch.ones_like(x)
self.assertTrue(isinstance(y, FakeTensor))
self.assertEqual(y.device.type, "cpu")
z = torch.ones_like(x, device="cuda")
self.assertTrue(isinstance(z, FakeTensor))
self.assertEqual(z.device.type, "cuda")
def test_binary_op_type_promotion(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
x = torch.empty([2, 2], dtype=torch.float)
y = torch.empty([2, 2], dtype=torch.int64)
out = x / y
self.assertEqual(out.dtype, torch.float)
self.assertEqual(out.device.type, "cpu")
def test_from_numpy(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
x = torch.tensor(np.zeros([4, 4]))
self.checkType(x, "cpu", [4, 4])
def test_randperm(self):
x = torch.randperm(10)
y = torch.randperm(5, device="cpu")
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
x1 = torch.randperm(10)
prims.utils.compare_tensor_meta(x, x1)
y1 = torch.randperm(5, device="cpu")
prims.utils.compare_tensor_meta(y, y1)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_cpu_fallback(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None, allow_fallback_kernels=False)):
filters = torch.randn(8, 4, 3, 3).cuda()
inputs = torch.randn(1, 4, 5, 5).cuda()
out = torch.nn.functional.conv2d(inputs, filters, padding=1)
self.assertEqual(out.device.type, "cuda")
self.assertEqual(list(out.size()), [1, 8, 5, 5])
with enable_torch_dispatch_mode(FakeTensorMode(inner=None, allow_fallback_kernels=True)):
# intentionally bad inputs
filters = torch.randn(8, 20, 3, 3).cuda()
inputs = torch.randn(1, 7, 10, 5).cuda()
with self.assertRaises(RuntimeError):
torch.nn.functional.conv2d(inputs, filters, padding=1)
with enable_torch_dispatch_mode(FakeTensorMode(inner=None, allow_fallback_kernels=True)):
filters = torch.randn(8, 4, 3, 3).cuda()
inputs = torch.randn(1, 4, 5, 5).cuda()
out = torch.nn.functional.conv2d(inputs, filters, padding=1)
self.assertEqual(out.device.type, "cuda")
self.assertEqual(list(out.size()), [1, 8, 5, 5])
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_normalize_device(self):
with FakeTensorMode():
x = torch.empty(1, device="cuda")
y = torch.empty(1, device=f"cuda:{torch.cuda.current_device()}")
out = x + y
self.checkType(out, "cuda", [1])
@skipIfRocm
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_cudnn_rnn(self):
def fn(
a0,
b0,
b1,
b2,
b3,
b4,
b5,
b6,
b7,
b8,
b9,
b10,
b11,
b12,
b13,
b14,
b15,
a3,
a4,
a5,
):
a1 = [
b0,
b1,
b2,
b3,
b4,
b5,
b6,
b7,
b8,
b9,
b10,
b11,
b12,
b13,
b14,
b15,
]
return torch.ops.aten._cudnn_rnn(
a0,
a1,
4,
a3,
a4,
a5,
2,
2048,
0,
2,
False,
0.0,
False,
True,
[],
None,
)
mode = FakeTensorMode(inner=None)
for i, context in enumerate([contextlib.nullcontext, lambda: enable_torch_dispatch_mode(mode)]):
with context():
inps = (
torch.randn([92, 8, 2048]).cuda(),
torch.randn([8192, 2048]).cuda(),
torch.randn([8192, 2048]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192, 2048]).cuda(),
torch.randn([8192, 2048]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192, 4096]).cuda(),
torch.randn([8192, 2048]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192, 4096]).cuda(),
torch.randn([8192, 2048]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([167837696]).cuda(),
torch.randn([4, 8, 2048]).cuda(),
torch.randn([4, 8, 2048]).cuda(),
)
out = fn(*inps)
self.assertIs(out[4], inps[-3])
for ten in out:
if i == 1:
self.assertTrue(isinstance(ten, FakeTensor))
self.assertEqual(ten.device.type, 'cuda')
@skipIfRocm
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_fallback_memory_prop(self):
m = nn.Conv2d(16, 33, 3, stride=2, device="cuda", dtype=torch.half)
m = m.to(memory_format=torch.channels_last)
mode = FakeTensorMode(inner=None)
# TODO: module.to() doesn't work because it assigns .data, which is ignored
with torch._subclasses.fake_tensor.FakeCopyMode(mode):
mod_copied = copy.deepcopy(m)
with enable_torch_dispatch_mode(mode):
input = torch.rand(20, 16, 50, 100, dtype=torch.half, device="cuda").to(memory_format=torch.channels_last)
out = mod_copied(input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.checkType(out, "cuda", [20, 33, 24, 49])
def test_data_dependent_operator(self):
with enable_torch_dispatch_mode(
FakeTensorMode(inner=None, allow_fallback_kernels=False)
):
x = torch.rand([10, 10])
self.assertRaises(DynamicOutputShapeException, lambda: torch.nonzero(x))
def checkMetaProps(self, t1, t2):
prims.utils.compare_tensor_meta(t1, t2)
@skipIfCrossRef
def test_deepcopy(self):
mode = FakeTensorMode(inner=None)
mod = torch.nn.BatchNorm2d(10)
with torch._subclasses.fake_tensor.FakeCopyMode(mode):
mod_copied = copy.deepcopy(mod)
def check_copy(mod, mod_copied):
for name, param in itertools.chain(mod.named_parameters(), mod.named_buffers()):
param_copied = getattr(mod_copied, name)
self.checkMetaProps(param, param_copied)
self.assertTrue(isinstance(param_copied, FakeTensor))
self.assertEqual(isinstance(param, torch.nn.Parameter), isinstance(param_copied, torch.nn.Parameter))
self.assertEqual(param.requires_grad, param_copied.requires_grad)
check_copy(mod, mod_copied)
class ModuleNew(torch.nn.Module):
def __init__(self):
super(ModuleNew, self).__init__()
self.a = torch.rand([10, 2])
self.b = self.a
self.c = self.a[0]
mod = ModuleNew()
with torch._subclasses.fake_tensor.FakeCopyMode(mode):
mod_copied = copy.deepcopy(mod)
self.assertIs(mod_copied.a, mod_copied.b)
self.assertEqual(mod_copied.b.storage()._cdata, mod_copied.a.storage()._cdata)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_new(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
a = torch.rand([16, 1])
self.checkType(a.new(10, 10), "cpu", [10, 10])
self.checkType(a.new([1, 2, 3, 4]), "cpu", [4])
b = torch.rand([4, 4], device='cuda')
self.checkType(b.new(device='cuda'), "cuda", [0])
def contains_type(type: torch._C.Type, maybe_contained_type: torch._C.Type):
return maybe_contained_type.isSubtypeOf(type) or any(
contains_type(e, maybe_contained_type) for e in type.containedTypes()
)
class FakeTensorConverterTest(TestCase):
def test_memoized_conversion_to_meta(self):
x = torch.rand(2, 2, 2)
mode = FakeTensorMode(inner=None)
self.assertTrue(mode.from_tensor(x) is mode.from_tensor(x))
def test_memoized_conversion_from_meta(self):
x = torch.rand(2, 2).to(device="meta")
mode = FakeTensorMode(inner=None)
converter = mode.fake_tensor_converter
self.assertTrue(converter(mode, x, "cpu") is converter(mode, x, "cpu"))
def test_separate_tensor_storages_view(self):
x = torch.rand(2, 2, 2)
y = x[0]
mode = FakeTensorMode(inner=None)
converter = mode.fake_tensor_converter
x_conv = converter(mode, x)
y_conv = converter(mode, y)
self.assertEqual(torch._C._storage_id(x_conv), torch._C._storage_id(y_conv))
def test_separate_tensor_storages_non_view(self):
x = torch.rand(2, 2, 2)
y = torch.rand(4, 2)
y.set_(x.storage())
mode = FakeTensorMode(inner=None)
converter = mode.fake_tensor_converter
x_conv = converter(mode, x)
y_conv = converter(mode, y)
stor_id = torch._C._storage_id(x_conv)
self.assertEqual(stor_id, torch._C._storage_id(y_conv))
del x
self.assertEqual(len(converter.tensor_memo), 1)
converter.meta_converter.check_for_expired_weak_storages()
self.assertEqual(len(converter.meta_converter.storage_memo), 1)
del y
self.assertEqual(len(converter.tensor_memo), 0)
converter.meta_converter.check_for_expired_weak_storages()
self.assertEqual(len(converter.meta_converter.storage_memo), 0)
def test_dead_weak_ref(self):
x = torch.rand(2, 2, 2)
y = x[0]
mode = FakeTensorMode(inner=None)
converter = FakeTensorConverter()
x_conv = converter(mode, x)
x_conv_storage = torch._C._storage_id(x_conv)
del x_conv
self.assertFalse(x in converter.tensor_memo)
y_conv = converter(mode, y)
self.assertEqual(x_conv_storage, torch._C._storage_id(y_conv))
def test_dead_key(self):
x = torch.rand(2, 2, 2)
mode = FakeTensorMode(inner=None)
converter = FakeTensorConverter()
x_conv = converter(mode, x)
self.assertEqual(len(converter.tensor_memo), 1)
self.assertEqual(len(converter.meta_converter.tensor_memo), 1)
del x
self.assertEqual(len(converter.tensor_memo), 0)
self.assertEqual(len(converter.meta_converter.tensor_memo), 0)
def test_no_active_mode(self):
mode = FakeTensorMode(inner=None)
with enable_torch_dispatch_mode(mode):
x = torch.empty(2, 2, device="cpu")
y = torch.empty(2, 2, device="cpu")
out = x + y
self.assertEqual(mode, out.fake_mode)
self.assertTrue(isinstance(out, FakeTensor))
self.assertEqual(out.device.type, "cpu")
def test_separate_mode_error(self):
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
x = torch.empty(2, 2, device="cpu")
with enable_torch_dispatch_mode(FakeTensorMode(inner=None)):
y = torch.empty(2, 2, device="cpu")
self.assertRaises(Exception, lambda: x, y)
def test_no_ref_cycle(self):
x = torch.rand([4])
mode = torch._prims.get_prim_fake_mode()
y = mode.from_tensor(x)
assert mode is torch._prims.get_prim_fake_mode()
self.assertEqual(len(mode.fake_tensor_converter.tensor_memo), 1)
del mode
del y
new_mode = torch._prims.get_prim_fake_mode()
self.assertEqual(len(new_mode.fake_tensor_converter.tensor_memo), 0)
class FakeTensorOperatorInvariants(TestCase):
@staticmethod
def get_aten_op(schema):
namespace, name = schema.name.split("::")
overload = schema.overload_name if schema.overload_name else "default"
assert namespace == "aten"
return getattr(getattr(torch.ops.aten, name), overload)
@staticmethod
def get_all_aten_schemas():
for schema in torch._C._jit_get_all_schemas():
namespace = schema.name.split("::")[0]
if namespace != "aten":
continue
yield schema
def test_non_kwarg_only_device(self):
for schema in self.get_all_aten_schemas():
ten_type = torch._C.TensorType.get()
if not any(
contains_type(arg.type, ten_type)
for arg in itertools.chain(schema.arguments, schema.returns)
):
continue
opt_device = torch._C.OptionalType(torch._C.DeviceObjType.get())
has_non_kwarg_device = any(
not arg.kwarg_only and arg.type.isSubtypeOf(opt_device)
for arg in schema.arguments
)
if has_non_kwarg_device:
self.assertTrue(
self.get_aten_op(schema) in torch._subclasses.fake_tensor._device_not_kwarg_ops
)
def test_tensor_constructors_all_have_kwarg_device(self):
for schema in self.get_all_aten_schemas():
op = self.get_aten_op(schema)
if not torch._subclasses.fake_tensor._is_tensor_constructor(op):
continue
opt_device = torch._C.OptionalType(torch._C.DeviceObjType.get())
has_kwarg_device = any(
arg.kwarg_only and arg.type.isSubtypeOf(opt_device)
for arg in schema.arguments
)
self.assertTrue(
has_kwarg_device or op == torch.ops.aten._list_to_tensor.default
)
def test_like_ops(self):
for schema in self.get_all_aten_schemas():
if "_like" == schema.name[-5:]:
op = self.get_aten_op(schema)
self.assertTrue(op in torch._subclasses.fake_tensor._like_tensor_constructors)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/test_fake_tensor.py
|
# Owner(s): ["oncall: jit"]
import sys
sys.argv.append("--jit_executor=profiling")
from test_jit import * # noqa: F403
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_jit_profiling.py
|
import argparse
import torch
def run_model(level):
m = torch.nn.Linear(20, 30)
input = torch.randn(128, 20)
with torch.backends.mkl.verbose(level):
m(input)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--verbose-level", default=0, type=int)
args = parser.parse_args()
try:
run_model(args.verbose_level)
except Exception as e:
print(e)
|
pytorch-master
|
test/mkl_verbose.py
|
# Owner(s): ["module: nn"]
from itertools import product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck, gradgradcheck, skipIfMps)
from unittest.mock import patch, call
class TestModule(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
precision = 1e-5
rel_tol = 1e-5
def _assert_module_parameters_and_buffer_are(self, module, device, dtype):
# Check device placement and dtype for created parameters and buffers.
# Only verify floating point dtypes since that's what the kwarg or methods
# such as `float()` applies to.
if not isinstance(device, torch.device):
device = torch.device(device)
def _check_module(items, name, device=device, dtype=dtype):
for item_name, item in items:
self.assertEqual(
item.device, device,
f'{name} {item_name} is on device {item.device} instead of the expected device {device}')
if item.dtype.is_floating_point:
self.assertEqual(
item.dtype, dtype,
f'{name} {item_name} is of dtype {item.dtype} instead of the expected dtype {dtype}')
_check_module(module.named_parameters(), "Parameter")
_check_module(module.named_buffers(), "Buffer")
@skipIfMps # the test doesn't work on MPS as double types are not supported
@modules(module_db)
def test_forward(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
dtype_to_method_caller = {
torch.float32: methodcaller("float"),
torch.float64: methodcaller("double"),
}
for module_input in module_inputs:
if module_input.forward_input is None:
continue
with freeze_rng_state():
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# === Do forward pass. ===
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
outputs = m(*args, **kwargs)
# === Compare outputs to a reference if one is specified. ===
# TODO: Handle precision
reference_fn = module_input.reference_fn
if reference_fn is not None:
ref_outputs = reference_fn(m, *args, **kwargs)
self.assertEqual(outputs, ref_outputs)
# === Use the method call and verify the parameters and buffers ===
if dtype in dtype_to_method_caller:
dtype_to_method_caller[dtype](m)
m(*args, **kwargs)
self._assert_module_parameters_and_buffer_are(m, device, dtype)
# Tests passing factory kwargs (e.g. device / dtype) during module instantiation.
# They should be applied to any created parameters and buffers.
@modules(module_db)
def test_factory_kwargs(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
for module_input in module_inputs:
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
# Check if this module creates parameters or registers buffers.
# The mock magic here passes through to the real Parameter / register_buffer
# logic and is only used to check call inputs.
module_creates_params_or_buffers = False
parameter_new = mock_wrapper(torch.nn.Parameter.__new__)
with patch.object(torch.nn.Parameter, '__new__', parameter_new):
register_buffer = mock_wrapper(torch.nn.Module.register_buffer)
with patch.object(torch.nn.Module, 'register_buffer', register_buffer):
m = module_cls(*args, **kwargs)
m.train(training)
# Check if a parameter or buffer was created with a tensor not passed to the constructor.
constructor_tensors = get_tensors_from(args, kwargs)
for mock in [parameter_new.mock, register_buffer.mock]:
for call_args, call_kwargs in mock.call_args_list:
call_tensors = get_tensors_from(call_args, call_kwargs)
if len(call_tensors) > 0 and not constructor_tensors.intersection(call_tensors):
module_creates_params_or_buffers = True
break
if not module_creates_params_or_buffers:
continue
# Instantiate module with the factory kwargs.
kwargs.update({
'device': device,
'dtype': dtype,
})
if issubclass(module_info.module_cls, torch.nn.modules.lazy.LazyModuleMixin):
# Ensure device and dtype are passed to all UninitializedParameters and UninitializedBuffers.
uninit_param_new = mock_wrapper(torch.nn.UninitializedParameter.__new__)
with patch.object(torch.nn.UninitializedParameter, '__new__', uninit_param_new):
uninit_buffer_new = mock_wrapper(torch.nn.UninitializedBuffer.__new__)
with patch.object(torch.nn.UninitializedBuffer, '__new__', uninit_buffer_new):
m = module_cls(*args, **kwargs)
m.train(training)
uninit_param_new.mock.assert_has_calls(
[call(device=device, dtype=dtype) for _ in uninit_param_new.mock.mock_calls])
uninit_buffer_new.mock.assert_has_calls(
[call(device=device, dtype=dtype) for _ in uninit_buffer_new.mock.mock_calls])
else:
# Check device placement and dtype for created parameters and buffers.
# Only verify floating point dtypes since that's what the kwarg applies to.
m = module_cls(*args, **kwargs)
m.train(training)
self._assert_module_parameters_and_buffer_are(m, device, dtype)
@onlyCUDA
@modules(module_db)
def test_multiple_device_transfer(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs_device = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
module_inputs_cpu = module_info.module_inputs_func(module_info, device="cpu", dtype=dtype,
requires_grad=False, training=training)
for module_input_device, module_input_cpu in zip(module_inputs_device, module_inputs_cpu):
if module_input_device.forward_input is None:
continue
with freeze_rng_state():
# === Instantiate the module. ===
args, kwargs = module_input_device.constructor_input.args, module_input_device.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# === Do forward pass on GPU ===
input_device_args = module_input_device.forward_input.args
input_device_kwargs = module_input_device.forward_input.kwargs
m(*input_device_args, **input_device_kwargs)
self._assert_module_parameters_and_buffer_are(m, device, dtype)
# === Move to CPU ===
input_cpu_args = module_input_cpu.forward_input.args
input_cpu_kwargs = module_input_cpu.forward_input.kwargs
m.cpu()
m(*input_cpu_args, **input_cpu_kwargs)
self._assert_module_parameters_and_buffer_are(m, "cpu", dtype)
# === Move back to GPU and forward pass ===
m.cuda()
m(*input_device_args, **input_device_kwargs)
self._assert_module_parameters_and_buffer_are(m, device, dtype)
if torch.cuda.device_count() >= 2:
# === test cross-GPU transfer works
def _to_device1(objs):
if isinstance(objs, (tuple, list)):
return type(objs)(_to_device1(item) for item in objs)
elif isinstance(objs, dict):
return {name: _to_device1(item) for name, item in objs.items()}
elif isinstance(objs, torch.Tensor):
return objs.cuda(1)
else:
return objs
input_device_1_args = _to_device1(input_device_args)
input_device_1_kwargs = _to_device1(input_device_kwargs)
m.cuda(1)
with torch.cuda.device(1):
m(*input_device_1_args, **input_device_1_kwargs)
self._assert_module_parameters_and_buffer_are(m, torch.device("cuda:1"), dtype)
@modules(module_db)
def test_repr(self, device, dtype, module_info, training):
# Test module can be represented with repr and str without errors.
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
for module_input in module_inputs:
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# Check that these methods do not raise errors
m.__repr__()
str(m)
@skipIfMps
@modules(module_db)
def test_pickle(self, device, dtype, module_info, training):
# Test that module can be pickled and unpickled.
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
for module_input in module_inputs:
if module_input.forward_input is None:
continue
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
with freeze_rng_state():
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# === Do forward pass. ===
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
output = m(*args, **kwargs)
# === Check unpickled module gives the same output. ===
with tempfile.TemporaryFile() as f:
torch.save(m, f)
f.seek(0)
m_copy = torch.load(f)
output_from_copy = m_copy(*args, **kwargs)
self.assertEqual(output, output_from_copy)
@skipMeta
@modules([module_info for module_info in module_db
if 'inplace' in signature(module_info.module_cls).parameters])
def test_check_inplace(self, device, dtype, module_info, training):
# Check if the inplace variant of the module gives the same result as the out of place
# variant.
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=True, training=training)
for module_input in module_inputs:
if module_input.forward_input is None:
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m_op = module_cls(*args, **kwargs, inplace=False)
m_op.to(device).to(dtype)
m_op.train(training)
m_inplace = module_cls(*args, **kwargs, inplace=True)
m_inplace.to(device).to(dtype)
m_inplace.train(training)
# === Inplace modules only supports inplace operations on the first argument ===
input_args, input_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
# === Do not allow the first input to be in input_kwargs ===
forward_sig = signature(m_op).parameters
self.assertGreaterEqual(len(forward_sig), 1)
first_param_name = next(iter(forward_sig.items()))
self.assertNotIn(first_param_name, input_kwargs)
# === Out of place operation does not write to original tensor ===
self.assertGreaterEqual(len(input_args), 1)
input_version = input_args[0]._version
with freeze_rng_state():
output_op = m_op(*input_args, **input_kwargs)
self.assertEqual(input_args[0]._version, input_version)
# === Check that the inplace operation gives the same result ===
input_arg_copy = deepcopy(input_args)
input_arg_clone = tuple(i.clone() for i in input_arg_copy)
with freeze_rng_state():
output_ip = m_inplace(*input_arg_clone, **input_kwargs)
self.assertNotEqual(input_arg_clone[0]._version, input_version)
self.assertEqual(output_op, output_ip)
# === Check that the gradients are the same ===
grad = output_op.data.clone().normal_()
output_op.backward(grad)
output_ip.backward(grad)
self.assertEqual(input_args[0].grad, input_arg_copy[0].grad)
def _traverse_obj(self, obj, func):
if isinstance(obj, (tuple, list)):
return type(obj)(self._traverse_obj(o, func) for o in obj)
elif isgenerator(obj):
return tuple(self._traverse_obj(o, func) for o in obj)
elif isinstance(obj, dict):
return {name: self._traverse_obj(o, func) for name, o in obj.items()}
elif isinstance(obj, (torch.Tensor, torch.nn.Parameter)):
return func(obj)
def _retain_grad(self, obj):
# gradients needs to be retained to check for grad. This is useful when
# non-leafs are present in the graph.
def inner_retain_grad(obj):
if obj.requires_grad:
obj.retain_grad()
self._traverse_obj(obj, inner_retain_grad)
def _get_grads(self, obj):
def inner_get_grad(obj):
if obj.requires_grad:
return obj.grad
return self._traverse_obj(obj, inner_get_grad)
def _zero_grad(self, obj):
def inner_zero_grad(obj):
if obj.grad is not None:
obj.grad = None
self._traverse_obj(obj, inner_zero_grad)
@skipIfMps
@modules(module_db)
def test_non_contiguous_tensors(self, device, dtype, module_info, training):
# Check modules work with non-contiguous tensors
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=True, training=training)
def _make_non_contiguous(obj):
def inner_make_non_contiguous(obj):
# Scalar tensors can not be made non-contiguous
if not isinstance(obj, torch.Tensor) or obj.dim() == 0:
return obj
out = torch.repeat_interleave(obj, 2, dim=-1)
out = out[..., ::2].detach()
out.requires_grad = obj.requires_grad
return out
return self._traverse_obj(obj, inner_make_non_contiguous)
def _can_be_noncontiguous(obj):
if isinstance(obj, (tuple, list)):
return any(_can_be_noncontiguous(o) for o in obj)
elif isinstance(obj, dict):
return any(_can_be_noncontiguous(o) for o in obj.values())
# scalar tensors can not be non-contiguous
if not isinstance(obj, torch.Tensor) or obj.dim() == 0:
return False
return True
for module_input in module_inputs:
if module_input.forward_input is None:
continue
input_args, input_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
if not (_can_be_noncontiguous(input_args) or _can_be_noncontiguous(input_kwargs)):
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
self._retain_grad((input_args, input_kwargs))
# === Forward with default input
with freeze_rng_state():
default_output = m(*input_args, **input_kwargs)
if isinstance(default_output, torch.Tensor):
grad_output = default_output.clone().detach_().normal_()
default_output.backward(grad_output, retain_graph=True)
else:
grad_output = tuple(self._traverse_obj(o, lambda o: o.clone().detach_().normal_())
for o in default_output)
flattened_default_output, _ = torch.utils._pytree.tree_flatten(default_output)
flattened_grad_output, _ = torch.utils._pytree.tree_flatten(grad_output)
for o, g_o in zip(flattened_default_output, flattened_grad_output):
o.backward(g_o, retain_graph=True)
default_input_args_grad, default_input_kwargs_grad = deepcopy(self._get_grads((input_args, input_kwargs)))
default_param_grad = deepcopy([p.grad for p in m.parameters()])
# === Construct non-contiguous tensors ===
nc_input_args, nc_input_kwargs = _make_non_contiguous((input_args, input_kwargs))
nc_grad_output = _make_non_contiguous(grad_output)
# === Compare results with non-contiguous and contiguous tensors ===
inputs = [(input_args, input_kwargs), (nc_input_args, nc_input_kwargs)]
grads = [grad_output, nc_grad_output]
for (in_args, in_kwargs), g_out in product(inputs, grads):
g_out_copy = deepcopy(g_out)
self._zero_grad((in_args, in_kwargs))
self._zero_grad(m.parameters())
with freeze_rng_state():
out = m(*in_args, **in_kwargs)
if isinstance(out, torch.Tensor):
out.backward(g_out_copy, retain_graph=True)
else:
flattened_out, _ = torch.utils._pytree.tree_flatten(out)
flattened_g_out_copy, _ = torch.utils._pytree.tree_flatten(g_out_copy)
for o, g_o in zip(flattened_out, flattened_g_out_copy):
o.backward(g_o, retain_graph=True)
input_args_grad, input_kwargs_grad = self._get_grads((in_args, in_kwargs))
self.assertEqual(out, default_output)
self.assertEqual(input_args_grad, default_input_args_grad, atol=1e-4, rtol=0)
self.assertEqual(input_kwargs_grad, default_input_kwargs_grad, atol=1e-4, rtol=0)
param_grad = [p.grad for p in m.parameters()]
self.assertEqual(param_grad, default_param_grad)
def _test_gradients_helper(self, device, dtype, module_info, training, check):
# Check gradients
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=True, training=training)
# === Set nondet tol for gradcheck to user-defined value if on CUDA and cudNN is enabled
gradcheck_nondet_tol = 0.0
if (torch.device(device).type == 'cuda' and torch.backends.cudnn.enabled):
gradcheck_nondet_tol = module_info.gradcheck_nondet_tol
for module_input in module_inputs:
if module_input.forward_input is None:
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
params = tuple(m.parameters())
# === Lazy modules need to see an input to initialize params before gradcheck is run. ===
input_args, input_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
if issubclass(module_info.module_cls, torch.nn.modules.lazy.LazyModuleMixin):
with torch.no_grad():
m(*input_args, **input_kwargs)
# === Perform gradient check on the input_args ===
other_kwargs = {}
kwarg_tensors = []
for name, obj in input_kwargs.items():
if isinstance(obj, torch.Tensor):
kwarg_tensors.append((name, obj))
else:
other_kwargs[name] = obj
grad_input = input_args + params + tuple(obj for (_, obj) in kwarg_tensors)
flat_input, flat_spec = torch.utils._pytree.tree_flatten(grad_input)
def fn_to_gradcheck(*flat_input_and_params):
input_and_params = torch.utils._pytree.tree_unflatten(flat_input_and_params, flat_spec)
new_input_args = input_and_params[:len(input_args)]
kwarg_args = input_and_params[-len(kwarg_tensors):]
new_kwargs = {name: obj for (name, _), obj in zip(kwarg_tensors, kwarg_args)}
with freeze_rng_state():
output = m(*new_input_args, **new_kwargs, **other_kwargs)
output_flattened, _ = torch.utils._pytree.tree_flatten(output)
return output_flattened
self.assertTrue(check(fn_to_gradcheck, flat_input, nondet_tol=gradcheck_nondet_tol))
@modules(module_db, allowed_dtypes=[torch.double])
def test_grad(self, device, dtype, module_info, training):
self._test_gradients_helper(device, dtype, module_info, training, gradcheck)
@modules([m for m in module_db if m.supports_gradgrad],
allowed_dtypes=[torch.double])
def test_gradgrad(self, device, dtype, module_info, training):
self._test_gradients_helper(device, dtype, module_info, training, gradgradcheck)
@onlyCUDA
@toleranceOverride({torch.float32: tol(5e-2, 0),
torch.float64: tol(4e-4, 0)})
@modules(module_db)
def test_cpu_gpu_parity(self, device, dtype, module_info, training):
# TODO: RNN / GRU / LSTM don't support backwards on eval mode for cuDNN; skip this in a
# nicer way for eval mode only.
# See https://github.com/pytorch/pytorch/issues/79161
rnn_modules = set([torch.nn.RNN, torch.nn.GRU, torch.nn.LSTM])
if (module_info.module_cls in rnn_modules
and not training
and 'cuda' in device
and torch.backends.cudnn.enabled):
return
# Test cpu and gpu results are the same
module_cls = module_info.module_cls
module_inputs_cpu = module_info.module_inputs_func(module_info, device="cpu", dtype=dtype,
requires_grad=True, training=training)
def _to_device(obj):
if isinstance(obj, torch.Tensor):
res = obj.detach().to(device=device)
res.requires_grad = obj.requires_grad
return res
elif isinstance(obj, tuple):
return tuple(_to_device(o) for o in obj)
elif isinstance(obj, dict):
return {key: _to_device(o) for key, o in obj.items()}
else:
return deepcopy(obj)
for module_input in module_inputs_cpu:
# === Move input from cpu to device ===
cpu_forward_args = module_input.forward_input.args
cpu_forward_kwargs = module_input.forward_input.kwargs
gpu_forward_args, gpu_forward_kwargs = _to_device((cpu_forward_args, cpu_forward_kwargs))
self._retain_grad((cpu_forward_args, cpu_forward_kwargs, gpu_forward_args, gpu_forward_kwargs))
# === Construct module on cpu and gpu ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
cpu_module = module_cls(*args, **kwargs).to(dtype).to("cpu")
cpu_module.train(training)
gpu_module = module_cls(*args, **kwargs).to(dtype).to(device)
gpu_module.train(training)
# === Lazy modules need to see an input to initialize params ===
if issubclass(module_cls, torch.nn.modules.lazy.LazyModuleMixin):
with torch.no_grad():
cpu_module(*cpu_forward_args, **cpu_forward_kwargs)
gpu_module(*gpu_forward_args, **gpu_forward_kwargs)
for cpu_p, gpu_p in zip(cpu_module.parameters(), gpu_module.parameters()):
gpu_p.data.copy_(cpu_p)
# === Compare forward output between cpu and gpu ===
cpu_outputs = cpu_module(*cpu_forward_args, **cpu_forward_kwargs)
gpu_outputs = gpu_module(*gpu_forward_args, **gpu_forward_kwargs)
self.assertEqual(cpu_outputs, gpu_outputs)
# === Run backwards on CPU and GPU and compare results ===
def check_backward(cpu_output, gpu_output):
cpu_grad_output = cpu_output.clone().normal_()
gpu_grad_output = cpu_grad_output.type_as(gpu_output)
cpu_output.backward(cpu_grad_output, retain_graph=True)
gpu_output.backward(gpu_grad_output, retain_graph=True)
cpu_grad_input = self._get_grads(cpu_forward_args)
gpu_grad_input = self._get_grads(gpu_forward_args)
self.assertEqual(cpu_grad_input, gpu_grad_input)
for cpu_p, gpu_p in zip(cpu_module.parameters(), gpu_module.parameters()):
self.assertEqual(cpu_p.grad, gpu_p.grad)
cpu_grad_kwarg_input = self._get_grads(cpu_forward_kwargs)
gpu_grad_kwarg_input = self._get_grads(gpu_forward_kwargs)
self.assertEqual(cpu_grad_kwarg_input, gpu_grad_kwarg_input)
for _ in range(5):
if isinstance(cpu_outputs, torch.Tensor):
check_backward(cpu_outputs, gpu_outputs)
else:
flatten_cpu_outputs, _ = torch.utils._pytree.tree_flatten(cpu_outputs)
flatten_gpu_outputs, _ = torch.utils._pytree.tree_flatten(gpu_outputs)
for cpu_output, gpu_output in zip(flatten_cpu_outputs, flatten_gpu_outputs):
check_backward(cpu_output, gpu_output)
@skipIfMps
@modules(module_db)
def test_memory_format(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
module_memformat_affects_out = module_info.module_memformat_affects_out
def _get_mem_formats(channels_last=False, channels_last_3d=False):
if channels_last:
return ([torch.contiguous_format, torch.channels_last],
[torch.preserve_format, torch.contiguous_format, torch.channels_last])
elif channels_last_3d:
return ([torch.contiguous_format, torch.channels_last_3d],
[torch.preserve_format, torch.contiguous_format, torch.channels_last_3d])
else:
return ([torch.contiguous_format],
[torch.preserve_format, torch.contiguous_format])
# Check that at least one Tensor input has dim == n
def _check_dims(obj, n):
if isinstance(obj, torch.Tensor):
return obj.dim() == n
elif isinstance(obj, (tuple, list)):
return any(_check_dims(o, n) for o in obj)
else:
return False
# Called after _check_dims, when we know that >= 1 tensor can be converted to mem_format
def _to_mem_format(mem_format, obj):
def inner_to_mem_format(obj):
d = obj.dim()
if ((mem_format == torch.channels_last and d != 4)
or (mem_format == torch.channels_last_3d and d != 5)):
return obj
return obj.to(memory_format=mem_format)
return self._traverse_obj(obj, inner_to_mem_format)
def _check_out_mem_format(output, input_mem_format, module_mem_format):
def inner_check_out_mem_format(output):
d = output.dim()
if (d == 4 and ((input_mem_format == torch.channels_last)
or (module_mem_format == torch.channels_last and module_memformat_affects_out))):
self.assertTrue(output.is_contiguous(memory_format=torch.channels_last))
elif (d == 5 and ((input_mem_format == torch.channels_last_3d)
or (module_mem_format == torch.channels_last_3d and module_memformat_affects_out))):
self.assertTrue(output.is_contiguous(memory_format=torch.channels_last_3d))
else:
self.assertTrue(output.is_contiguous())
return self._traverse_obj(output, inner_check_out_mem_format)
for module_input in module_inputs:
if module_input.forward_input is None:
continue
supports_channels_last = _check_dims(module_input.forward_input.args, 4)
supports_channels_last_3d = _check_dims(module_input.forward_input.args, 5)
input_mem_formats, module_mem_formats = _get_mem_formats(supports_channels_last, supports_channels_last_3d)
with freeze_rng_state():
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# === Get output in (contiguous, contiguous) configuration. ===
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
desired_outputs = m(*args, **kwargs)
for input_mem_format in input_mem_formats:
# === Change memformat of input. ===
module_input.forward_input.args = _to_mem_format(input_mem_format,
module_input.forward_input.args)
module_input.forward_input.kwargs = _to_mem_format(input_mem_format,
module_input.forward_input.kwargs)
for module_mem_format in module_mem_formats:
# === Change memformat of module ===
m.to(memory_format=module_mem_format)
# === Do forward pass. ===
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
outputs = m(*args, **kwargs)
# === Compare outputs to (contiguous, contiguous) output. ===
if input_mem_format != torch.contiguous_format or module_mem_formats != torch.contiguous_format:
self.assertEqual(outputs, desired_outputs)
# === Check mem format of output. ===
_check_out_mem_format(outputs, input_mem_format, module_mem_format)
# Test whether train and eval modes differ for each module. Use to verify
# that the ModuleInfo entry flag is correct.
@skipIfMps # the test doesn't work on MPS as double types are not supported
@modules(module_db, train_eval_mode=TrainEvalMode.train_only)
def test_if_train_and_eval_modes_differ(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
# Run forward inputs through to see if the training flag is accessed during forward.
for module_input in module_inputs:
if module_input.forward_input is None:
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# Remove training attribute and see if forward still works.
delattr(m, 'training')
# === Do forward pass. ===
try:
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
m(*args, **kwargs)
except AttributeError as e:
if "'training'" in str(e):
self.assertTrue(module_info.train_and_eval_differ,
f"The ModuleInfo entry for {module_info.name} has "
"train_and_eval_differ=False, but the training mode was found to "
"affect the forward pass. Consider setting train_and_eval_differ=True "
"for this ModuleInfo entry.")
else:
raise e
instantiate_device_type_tests(TestModule, globals())
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_modules.py
|
# Owner(s): ["module: nn"]
import contextlib
import torch
import torch.nn as nn
import torch.nn.functional as F
import unittest
from unittest.mock import patch
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
TEST_FAIRSEQ,
run_tests,
parametrize,
instantiate_parametrized_tests,
freeze_rng_state,
TEST_WITH_CROSSREF
)
from torch.testing._internal.common_cuda import TEST_CUDA
if TEST_FAIRSEQ:
import fairseq.models.transformer as fairseq_transformer
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
try:
yield
finally:
torch.set_default_dtype(saved_dtype)
class TestTransformers(NNTestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
device_list = ['cpu'] # TODO: is there a way to do parametrize for this?
if TEST_CUDA:
device_list.append('cuda')
@unittest.skip("4D mask not supported yet - activate when 4D mask supported")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable") # TODO: make this work for both cuda and cpu
def test_self_attn_TxT_attn_mask(self):
embed_dim = 16
num_heads = 4
batch_size = 10
tgt_len = 16
query = torch.rand(batch_size, tgt_len, embed_dim, device="cuda") # [N, T, D]
attn_mask = torch.randint(0, 2, (tgt_len, tgt_len)).cuda().float() # [T, T]
attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0))
attn_mask_4d = attn_mask.expand(batch_size, num_heads, tgt_len, tgt_len)
mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads, batch_first=True).cuda()
mta_model.eval()
# Generate 3D results
with torch.inference_mode():
output_mask_4d = mta_model(query, query, query, attn_mask=attn_mask_4d)[0]
output_mask_4d = output_mask_4d.transpose(0, 1) # [N, T, D]
output_mask_TxT = mta_model(query, query, query, attn_mask=attn_mask)[0]
output_mask_TxT = output_mask_TxT.transpose(0, 1) # [N, T, D]
self.assertEqual(output_mask_4d, output_mask_TxT)
@parametrize("device", device_list)
def test_transformerencoderlayer_src_mask(self, device):
batch_size = 2
seqlen = 4
d_model = 8
nhead = 8
dim_feedforward = 32
model = torch.nn.TransformerEncoderLayer(
d_model=d_model,
nhead=nhead,
dim_feedforward=dim_feedforward,
batch_first=True).to(device)
src = torch.rand(batch_size, seqlen, d_model).to(device) # bs, seqlen, d_model
src_mask = torch.zeros(seqlen, seqlen).to(torch.bool).to(device)
model(src, src_mask=src_mask)
model.eval()
with torch.no_grad():
model(src, src_mask=src_mask)
@parametrize("use_torchscript", [True, False])
@parametrize("with_no_grad", [True, False])
@parametrize("training", [True, False])
def test_transformerencoder_fastpath_torchscript(self, use_torchscript, with_no_grad, training):
"""
Test TransformerEncoder does not crash
"""
model = torch.nn.TransformerEncoder(
torch.nn.TransformerEncoderLayer(d_model=2, nhead=2, dim_feedforward=8, batch_first=True),
num_layers=2,
enable_nested_tensor=True
)
if training:
model = model.train()
else:
model = model.eval()
if use_torchscript:
model = torch.jit.script(model)
x = torch.Tensor([[[1, 2], [3, 4]]]).to(torch.float)
mask = torch.Tensor([[0, 1]]).to(torch.bool)
if with_no_grad:
cm = torch.no_grad()
else:
cm = contextlib.nullcontext()
with cm:
model(x, src_key_padding_mask=mask)
@parametrize("with_no_grad", [True, False])
@parametrize("training", [True, False])
@parametrize("enable_nested_tensor", [False])
@parametrize("device", device_list)
def test_transformerencoder_square_input(self, with_no_grad, training, enable_nested_tensor, device):
"""
Test for edge cases when input of shape (batch size, sequence length, embedding dimension) has
batch size == sequence length
"""
model = torch.nn.TransformerEncoder(
torch.nn.TransformerEncoderLayer(d_model=4, nhead=2, dim_feedforward=16, dropout=0.0, batch_first=True),
num_layers=2,
enable_nested_tensor=enable_nested_tensor
).to(device)
with torch.no_grad():
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
if training:
model = model.train()
else:
model = model.eval()
x = torch.arange(0, 16).reshape(2, 2, 4).to(torch.float).to(device)
src_mask = torch.Tensor([[0, 1], [0, 0]]).to(torch.bool).to(device)
if with_no_grad:
cm = torch.no_grad()
else:
cm = contextlib.nullcontext()
with cm:
result = model(x, mask=src_mask)
ref_output = torch.Tensor([[[2.420306205749512, 0.017629241570830, -0.607857942581177, -0.085519507527351],
[2.420306205749512, 0.017629241570830, -0.607857942581177, -0.085519507527351]],
[[2.419836044311523, 0.017548924311996, -0.608187675476074, -0.085347734391689],
[2.419836044311523, 0.017548924311996, -0.608187675476074, -0.085347734391689]]]
).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
@parametrize("batch_first", [True, False])
@parametrize("training", [True, False])
@parametrize("enable_nested_tensor", [True, False])
@parametrize("device", device_list)
def test_transformerencoder(self, batch_first, training, enable_nested_tensor, device):
def get_a_test_layer(activation, batch_first=False):
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
layer = nn.TransformerEncoderLayer(
d_model,
nhead,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation,
batch_first=batch_first,
).to(device)
with torch.no_grad():
# set constant weights of the model
for idx, p in enumerate(layer.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
return layer
# this is a deterministic test for TransformerEncoder
activation = F.relu
def _test(batch_first, training, enable_nested_tensor):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
encoder_layer = get_a_test_layer(activation=activation,
batch_first=batch_first)
model = nn.TransformerEncoder(encoder_layer, 1).to(device)
if not training:
model = model.eval()
# deterministic input
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.428589, 0.020835, -0.602055, -0.085249],
[2.427987, 0.021213, -0.602496, -0.084103]],
[[2.424689, 0.019155, -0.604793, -0.085672],
[2.413863, 0.022211, -0.612486, -0.072490]],
[[2.433774, 0.021598, -0.598343, -0.087548],
[2.425104, 0.019748, -0.604515, -0.084839]],
[[2.436185, 0.022682, -0.596625, -0.087261],
[2.433556, 0.021891, -0.598509, -0.086832]],
[[2.416246, 0.017512, -0.610712, -0.082961],
[2.422901, 0.024187, -0.606178, -0.074929]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# all 0 src_mask
src_mask = torch.zeros([5, 5]).to(device) == 1
result = model(encoder_input, mask=src_mask)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# all 0
mask = torch.zeros([2, 5]).to(device) == 1
result = model(encoder_input, src_key_padding_mask=mask)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
mask[0, 1] = 1
mask[1, 3] = 1
mask[1, 4] = 1
# If mask is not left aligned
# We disable nested tensor
model.enable_nested_tensor = enable_nested_tensor
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.429026, 0.020793, -0.601741, -0.085642],
[2.428811, 0.021445, -0.601912, -0.084252]],
[[2.425009, 0.019155, -0.604566, -0.085899],
[2.415408, 0.02249, -0.611415, -0.073]],
[[2.434199, 0.021682, -0.598039, -0.087699],
[2.42598, 0.019941, -0.603896, -0.085091]],
[[2.436457, 0.022736, -0.59643, -0.08736],
[2.434021, 0.022093, -0.598179, -0.08679]],
[[2.416531, 0.017498, -0.610513, -0.083181],
[2.4242, 0.024653, -0.605266, -0.074959]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# test case 2, multiple layers no norm
model = nn.TransformerEncoder(encoder_layer, 2, enable_nested_tensor=enable_nested_tensor).to(device)
if not training:
model = model.eval()
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.419051, 0.017446, -0.608738, -0.085003],
[2.419102, 0.017452, -0.608703, -0.085026]],
[[2.419043, 0.017445, -0.608744, -0.084999],
[2.419052, 0.017446, -0.608738, -0.085004]],
[[2.419067, 0.017448, -0.608727, -0.085010],
[2.419098, 0.017452, -0.608706, -0.085024]],
[[2.419072, 0.017449, -0.608724, -0.085012],
[2.419119, 0.017455, -0.608691, -0.085034]],
[[2.419019, 0.017442, -0.608761, -0.084989],
[2.419075, 0.017449, -0.608722, -0.085014]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerEncoder(encoder_layer, 6, enable_nested_tensor=enable_nested_tensor).to(device)
if not training:
model = model.eval()
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# test case 3, multiple layers with norm
# d_model = 4
norm = nn.LayerNorm(4)
model = nn.TransformerEncoder(encoder_layer, 2, norm=norm, enable_nested_tensor=enable_nested_tensor).to(device)
if not training:
model = model.eval()
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[1.695949, -0.357635, -0.893077, -0.445238],
[1.695955, -0.357639, -0.893050, -0.445266]],
[[1.695948, -0.357634, -0.893082, -0.445233],
[1.695950, -0.357635, -0.893077, -0.445238]],
[[1.695951, -0.357636, -0.893069, -0.445246],
[1.695955, -0.357639, -0.893052, -0.445264]],
[[1.695952, -0.357636, -0.893066, -0.445249],
[1.695957, -0.357641, -0.893041, -0.445276]],
[[1.695946, -0.357632, -0.893095, -0.445220],
[1.695952, -0.357637, -0.893065, -0.445251]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerEncoder(encoder_layer, 6, norm=norm, enable_nested_tensor=enable_nested_tensor).to(device)
if not training:
model = model.eval()
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# TODO: remove set default dtype to double by making ref_output more precise.
# Added because this test was copied from test_nn.py, which has default
# dtype double. If default dtype is float, tests will say tensors not close because
# ref output precision too low
with set_default_dtype(torch.double):
if training:
cm = contextlib.nullcontext()
else:
cm = torch.no_grad() # transformer fast path requires no grad
with cm:
_test(batch_first, training, enable_nested_tensor)
@unittest.skipIf(not TEST_FAIRSEQ, "Fairseq not found")
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_decoder_only_layer(self):
DEFAULT_PADDING_IDX = 0
class FairseqDecoder(torch.nn.Module):
def __init__(
self,
embed_dim,
attention_heads,
ffn_embed_dim,
num_layers,
embedding_layer, # torch.nn.Embedding. Must have a padding_idx field
dropout=0,
normalize_before=False,
torch_encoder=None, # torch encoder that you can map weights from
activation="relu",
):
super().__init__()
cfg = fairseq_transformer.TransformerConfig()
cfg.decoder.embed_dim = embed_dim
cfg.decoder.output_dim = embed_dim
cfg.decoder.attention_heads = attention_heads
cfg.decoder.ffn_embed_dim = ffn_embed_dim
cfg.dropout = dropout
cfg.decoder.normalize_before = normalize_before
cfg.decoder.layers = num_layers
# make embedding behavior same as other encoders
cfg.no_token_positional_embeddings = True
cfg.no_scale_embedding = True
cfg.activation_fn = activation
dictionary = {} # TODO: verify what this is
self.decoder = fairseq_transformer.TransformerDecoder(
cfg,
dictionary,
embedding_layer,
no_encoder_attn=True,
output_projection=None,
)
if torch_encoder is not None:
self.decoder = torch_to_fairseq(torch_encoder, self.decoder)
self.decoder = self.decoder.eval().cuda().half()
def forward(
self,
tokens,
src_lengths=None,
with_triangle_mask=False,
incremental_state=None,
):
return self.decoder(
prev_output_tokens=tokens,
encoder_out=None,
incremental_state=incremental_state,
features_only=True,
full_context_alignment=not with_triangle_mask,
alignment_layer=None,
alignment_heads=None,
src_lengths=src_lengths,
return_all_hiddens=False,
)[0]
class BetterDecoder(torch.nn.Module):
"""
Only incremental decoder for now
"""
def __init__(self, transformer, embedding, pad_idx):
super().__init__()
self.transformer = transformer
self.embedding = embedding
self.padding_idx = pad_idx
def forward(
self,
x,
src_mask=None,
include_padding_mask=True,
incr_key_lst=None,
incr_value_lst=None,
is_incremental_decoding=False,
):
padding_mask = None
if not x.is_nested and include_padding_mask:
padding_mask = x.eq(self.padding_idx)
if(is_incremental_decoding):
x = x[:, -1:] # only take the last token
x = self.embedding(x)
one_encoder_layer = self.transformer.layers[0]
self_attn = one_encoder_layer.self_attn
embed_dim = self_attn.embed_dim
num_heads = self_attn.num_heads
use_gelu = (
one_encoder_layer.activation_relu_or_gelu == 2
) # see torch/nn/modules/activation attention impl. 1 == relu, 2 == gelu
assert (
one_encoder_layer.activation_relu_or_gelu != 0
) # 0 == not relu or gelu
norm_first = one_encoder_layer.norm_first
# TODO: make this a bit less janky. but for now we initialize with an empty tensor.
if(not is_incremental_decoding):
assert len(incr_key_lst) == 0 or incr_key_lst[0] is None
assert len(incr_value_lst) == 0 or incr_value_lst[0] is None
while len(incr_key_lst) <= len(self.transformer.layers):
if(is_incremental_decoding):
incr_key_lst.append(torch.Tensor([]).cuda().half())
incr_value_lst.append(torch.Tensor([]).cuda().half())
else:
incr_key_lst.append(None)
incr_value_lst.append(None)
for i, layer in enumerate(self.transformer.layers):
incr_key = incr_key_lst[i]
incr_value = incr_value_lst[i]
x, incr_key, incr_value = torch._transformer_decoder_only_layer_fwd(
src=x,
embed_dim=embed_dim,
num_heads=num_heads,
qkv_weight=layer.self_attn.in_proj_weight,
qkv_bias=layer.self_attn.in_proj_bias,
proj_weight=layer.self_attn.out_proj.weight,
proj_bias=layer.self_attn.out_proj.bias,
use_gelu=use_gelu,
norm_first=norm_first,
# TODO: layer_norm_eps hardcoded to be same as nn.TransformerEncoder default.
# fix by pulling from self_attn.norm1
eps=1e-5,
norm_weight_1=layer.norm1.weight,
norm_bias_1=layer.norm1.bias,
norm_weight_2=layer.norm2.weight,
norm_bias_2=layer.norm2.bias,
ffn_weight_1=layer.linear1.weight,
ffn_bias_1=layer.linear1.bias,
ffn_weight_2=layer.linear2.weight,
ffn_bias_2=layer.linear2.bias,
mask=src_mask,
incr_key=incr_key, # altered in place
incr_value=incr_value,
)
# not in-place
if(not is_incremental_decoding):
incr_key = None
incr_value = None
incr_key_lst[i] = incr_key
incr_value_lst[i] = incr_value
return x, incr_key_lst, incr_value_lst
def torch_to_fairseq(torch_encoder, fairseq_encoder):
for src_layer, dst_layer in zip(torch_encoder.layers, fairseq_encoder.layers):
w_q, w_k, w_v = src_layer.self_attn.in_proj_weight.chunk(3, dim=0)
b_q, b_k, b_v = src_layer.self_attn.in_proj_bias.chunk(3, dim=0)
dst_layer.self_attn.q_proj.weight = torch.nn.Parameter(w_q)
dst_layer.self_attn.q_proj.bias = torch.nn.Parameter(b_q)
dst_layer.self_attn.k_proj.weight = torch.nn.Parameter(w_k)
dst_layer.self_attn.k_proj.bias = torch.nn.Parameter(b_k)
dst_layer.self_attn.v_proj.weight = torch.nn.Parameter(w_v)
dst_layer.self_attn.v_proj.bias = torch.nn.Parameter(b_v)
dst_layer.self_attn.out_proj.weight = src_layer.self_attn.out_proj.weight
dst_layer.self_attn.out_proj.bias = src_layer.self_attn.out_proj.bias
dst_layer.fc1.weight = src_layer.linear1.weight
dst_layer.fc1.bias = src_layer.linear1.bias
# fairseq may use fusedlayernorm from nvidia apex - diff properties
dst_layer.self_attn_layer_norm.load_state_dict(src_layer.norm1.state_dict())
dst_layer.fc2.weight = src_layer.linear2.weight
dst_layer.fc2.bias = src_layer.linear2.bias
dst_layer.final_layer_norm.load_state_dict(src_layer.norm2.state_dict())
return fairseq_encoder
def set_weights_deterministic(model):
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
D = 4 # d_model
H = 2 # nhead
FD = 16 # dim_feedforward
V = 100 # vocab size
L = 2 # num layers
embedding_layer = torch.nn.Embedding(V, D, DEFAULT_PADDING_IDX)
layer = torch.nn.TransformerEncoderLayer(
d_model=D,
nhead=H,
dim_feedforward=FD,
batch_first=True,
activation="gelu",
)
transformer = torch.nn.TransformerEncoder(
layer,
num_layers=L,
).eval().cuda().half()
set_weights_deterministic(embedding_layer)
set_weights_deterministic(transformer)
better_decoder = (
BetterDecoder(transformer, embedding_layer, DEFAULT_PADDING_IDX)
.eval()
.cuda()
.half()
)
fairseq_decoder = (
FairseqDecoder(
D,
H,
FD,
L,
embedding_layer,
dropout=0,
normalize_before=False,
torch_encoder=transformer,
activation="gelu",
)
.eval()
.cuda()
.half()
)
tokens = torch.Tensor([
[5, 6, 7, 8],
[9, 10, 11, 12]
]).to(torch.int).cuda()
lengths_tensor = torch.Tensor([2, 2]).to(torch.int).cuda()
# bs = 2, seqlen = 4
bs, seqlen = tokens.shape
upper_triangle = torch.zeros(seqlen, seqlen)
upper_triangle.fill_(-100000000)
upper_triangle = torch.triu(upper_triangle, 1)
upper_triangle = upper_triangle.cuda().half()
upper_triangle_expanded = upper_triangle.unsqueeze(0).unsqueeze(0)
upper_triangle_expanded = upper_triangle_expanded.expand(
bs, H, -1, -1
)
# test forced decoding
with torch.no_grad():
result, _, _ = better_decoder(
tokens,
src_mask=upper_triangle_expanded,
include_padding_mask=False,
incr_key_lst=[],
incr_value_lst=[],
is_incremental_decoding=False,
)
ref_output = fairseq_decoder(tokens, lengths_tensor, with_triangle_mask=True)
self.assertEqual(result.shape, ref_output.shape)
torch.testing.assert_close(result, ref_output, atol=1e-3, rtol=1e-2)
# test incremental decoding
bs, seqlen = tokens.shape
incr_state = {}
ref_outputs = [fairseq_decoder(
tokens[:, :i],
src_lengths=None,
with_triangle_mask=False,
incremental_state=incr_state,
) for i in range(1, seqlen + 1)]
ref_output = torch.stack(ref_outputs)
incr_key_lst = []
incr_value_lst = []
results = []
for i in range(1, seqlen + 1):
res, incr_key_lst, incr_value_lst = better_decoder(
tokens[:, :i],
src_mask=None,
include_padding_mask=False,
incr_key_lst=incr_key_lst,
incr_value_lst=incr_value_lst,
is_incremental_decoding=True,
)
results.append(res)
result = torch.stack(results)
self.assertEqual(result.shape, ref_output.shape)
torch.testing.assert_close(result, ref_output, atol=1e-3, rtol=1e-2)
@parametrize("input_dim,attn_mask_dim,is_causal",
[(3, None, False), (3, 2, False), (3, 2, True), (3, 3, False), (3, 3, True),
(4, None, False), (4, 2, False), (4, 2, True), (4, 4, False), (4, 4, True)],
name_fn=lambda input_dim, attn_dim, is_causal: (
f"{input_dim}D_input_dim_" + (
f"{attn_dim}D_{'causal_' if is_causal else ''}attn_mask"
if attn_dim is not None else "no_attn_mask")))
@parametrize("dropout_p", [0.0, 0.2, 0.5])
@parametrize("device", device_list)
def test_scaled_dot_product_attention(self, device, input_dim, attn_mask_dim, is_causal, dropout_p):
# TODO: Support cross-device / dtype testing properly when instantiate_device_type_tests() is used.
dtypes = [torch.double, torch.float]
for dtype in dtypes:
def rand_tensor(*shape):
return torch.randn(shape, device=device, dtype=dtype)
# This test compares python and C++ implementations of SDP.
N, N_prime, L, S, E = 5, 2, 4, 3, 6
if input_dim == 3:
query = rand_tensor(N, L, E)
key = rand_tensor(N, S, E)
value = rand_tensor(N, S, E)
elif input_dim == 4:
query = rand_tensor(N, N_prime, L, E)
key = rand_tensor(N, N_prime, S, E)
value = rand_tensor(N, N_prime, S, E)
else:
self.fail(f'Invalid input_dim {input_dim} encountered in SDP test')
attn_mask = None
if attn_mask_dim is not None:
assert attn_mask_dim in [2, input_dim]
mask_size = (L, S) if attn_mask_dim == 2 else ((N, L, S) if input_dim == 3 else (N, N_prime, L, S))
attn_mask = (torch.ones(mask_size, device=device, dtype=torch.bool).tril() if is_causal
else torch.randint(0, 2, size=mask_size, device=device, dtype=torch.bool))
with freeze_rng_state():
# Python impl only supports float mask and 3D inputs.
attn_mask_float = attn_mask
if attn_mask_float is not None:
attn_mask_float = torch.zeros_like(attn_mask, dtype=query.dtype)
attn_mask_float.masked_fill_(attn_mask.logical_not(), float("-inf"))
q, k, v = query.view(-1, L, E), key.view(-1, S, E), value.view(-1, S, E)
a = attn_mask_float
if a is not None and attn_mask_dim > 3:
a = a.view(-1, L, S)
expected = F._scaled_dot_product_attention(
q, k, v, attn_mask=a, dropout_p=dropout_p)
if input_dim > 3:
expected = (expected[0].view(-1, N_prime, L, E), expected[1].view(-1, N_prime, L, S))
need_attn_weights: bool = True
with freeze_rng_state():
if is_causal:
# NB: Don't pass attn_mask here
actual = torch.ops.aten._scaled_dot_product_attention(
query, key, value, None, dropout_p, need_attn_weights, is_causal)
# Error case: both explicit attn_mask and is_causal are set
with self.assertRaisesRegex(RuntimeError,
"Explicit attn_mask should not be set when is_causal=True"):
torch.ops.aten._scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal)
else:
actual = torch.ops.aten._scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal)
# freeze_rng_state() doesn't seem to work outside of CPU, so dropout makes the results incomparable.
# TODO: Do this skipping in a nicer way once the granular test skipping logic lands.
if dropout_p == 0.0 or device == 'cpu':
self.assertEqual(actual, expected)
@unittest.skipIf(TEST_WITH_CROSSREF, 'Fastpath not available with crossref')
@torch.no_grad()
def test_mask_check_fastpath(self):
"""
Test that fastpath is executed independently of the mask that is passed.
If the passed mask is left aligned or mask_check=False, test that nested tensors are used (sparsity fastpath),
otherwise use fastpath with traditional tensors.
"""
x = torch.Tensor([[[1, 2], [3, 4], [5, 6]]]).to(torch.float)
def _test_fastpath(model, mask, mock_return_value, nested_tensors=True):
with patch('torch._transformer_encoder_layer_fwd') as fastpath_mock:
fastpath_mock.return_value = mock_return_value
model(x, src_key_padding_mask=mask)
# If mock was called, fastpath was taken
self.assertTrue(fastpath_mock.called)
# If mock was called with nested tensors, sparsity fastpath was taken
for call_args, _ in fastpath_mock.call_args_list:
self.assertEqual(call_args[0].is_nested, nested_tensors)
encoder_layer = torch.nn.TransformerEncoderLayer(d_model=2, nhead=2, dim_feedforward=8, batch_first=True)
model = torch.nn.TransformerEncoder(encoder_layer, num_layers=2, enable_nested_tensor=True, mask_check=True)
model.eval()
aligned_mask = torch.Tensor([[0, 0, 1]]).to(torch.bool)
not_aligned_mask = torch.Tensor([[1, 0, 1]]).to(torch.bool)
nested_tensor_return_value = torch.nested_tensor([torch.ones((2, 2), dtype=torch.float)])
tensor_return_value = torch.ones((1, 3, 2), dtype=torch.float)
# Left aligned mask results in sparsity fastpath
_test_fastpath(model, aligned_mask, nested_tensor_return_value, nested_tensors=True)
# Not aligned mask results in fastpath
_test_fastpath(model, not_aligned_mask, tensor_return_value, nested_tensors=False)
model = torch.nn.TransformerEncoder(encoder_layer, num_layers=2, enable_nested_tensor=False, mask_check=True)
model.eval()
# If nested tensor disabled, fastpath is always taken
_test_fastpath(model, aligned_mask, tensor_return_value, nested_tensors=False)
_test_fastpath(model, not_aligned_mask, tensor_return_value, nested_tensors=False)
model = torch.nn.TransformerEncoder(encoder_layer, num_layers=2, enable_nested_tensor=True, mask_check=False)
model.eval()
# Mask check disabled results in sparisty fastpath, independently of the mask
_test_fastpath(model, aligned_mask, nested_tensor_return_value, nested_tensors=True)
_test_fastpath(model, not_aligned_mask, nested_tensor_return_value, nested_tensors=True)
# TODO: Replace this with instantiate_device_type_tests() to take advantage of test framework support for
# cross device / dtype testing.
instantiate_parametrized_tests(TestTransformers)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_transformers.py
|
# Owner(s): ["module: cpp-extensions"]
import os
import shutil
import sys
import unittest
import warnings
import re
import tempfile
import subprocess
import glob
import torch.testing._internal.common_utils as common
import torch
import torch.backends.cudnn
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
from torch.testing._internal.common_utils import gradcheck, skipIfSlowGradcheckEnv, IS_ARM64
TEST_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
TEST_CUDNN = False
TEST_ROCM = torch.cuda.is_available() and torch.version.hip is not None and ROCM_HOME is not None
if TEST_CUDA and torch.version.cuda is not None: # the skip CUDNN test for ROCm
CUDNN_HEADER_EXISTS = os.path.isfile(os.path.join(CUDA_HOME, "include/cudnn.h"))
TEST_CUDNN = (
TEST_CUDA and CUDNN_HEADER_EXISTS and torch.backends.cudnn.is_available()
)
IS_WINDOWS = sys.platform == "win32"
def remove_build_path():
if sys.platform == "win32":
print("Not wiping extensions build folder because Windows")
return
default_build_root = torch.utils.cpp_extension.get_default_build_root()
if os.path.exists(default_build_root):
shutil.rmtree(default_build_root)
# There's only one test that runs gracheck, run slow mode manually
@skipIfSlowGradcheckEnv
@unittest.skipIf(IS_ARM64, "Does not work on arm")
class TestCppExtensionJIT(common.TestCase):
"""Tests just-in-time cpp extensions.
Don't confuse this with the PyTorch JIT (aka TorchScript).
"""
def setUp(self):
super().setUp()
# cpp extensions use relative paths. Those paths are relative to
# this file, so we'll change the working directory temporarily
self.old_working_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
def tearDown(self):
super().tearDown()
# return the working directory (see setUp)
os.chdir(self.old_working_dir)
@classmethod
def setUpClass(cls):
remove_build_path()
@classmethod
def tearDownClass(cls):
remove_build_path()
def test_jit_compile_extension(self):
module = torch.utils.cpp_extension.load(
name="jit_extension",
sources=[
"cpp_extensions/jit_extension.cpp",
"cpp_extensions/jit_extension2.cpp",
],
extra_include_paths=["cpp_extensions"],
extra_cflags=["-g"],
verbose=True,
)
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = module.tanh_add(x, y)
self.assertEqual(z, x.tanh() + y.tanh())
# Checking we can call a method defined not in the main C++ file.
z = module.exp_add(x, y)
self.assertEqual(z, x.exp() + y.exp())
# Checking we can use this JIT-compiled class.
doubler = module.Doubler(2, 2)
self.assertIsNone(doubler.get().grad)
self.assertEqual(doubler.get().sum(), 4)
self.assertEqual(doubler.forward().sum(), 8)
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_jit_cuda_extension(self):
# NOTE: The name of the extension must equal the name of the module.
module = torch.utils.cpp_extension.load(
name="torch_test_cuda_extension",
sources=[
"cpp_extensions/cuda_extension.cpp",
"cpp_extensions/cuda_extension.cu",
],
extra_cuda_cflags=["-O2"],
verbose=True,
keep_intermediates=False,
)
x = torch.zeros(100, device="cuda", dtype=torch.float32)
y = torch.zeros(100, device="cuda", dtype=torch.float32)
z = module.sigmoid_add(x, y).cpu()
# 2 * sigmoid(0) = 2 * 0.5 = 1
self.assertEqual(z, torch.ones_like(z))
def _run_jit_cuda_archflags(self, flags, expected):
# Compile an extension with given `flags`
def _check_cuobjdump_output(expected_values, is_ptx=False):
elf_or_ptx = '--list-ptx' if is_ptx else '--list-elf'
lib_ext = '.pyd' if IS_WINDOWS else '.so'
# Note, .extension name may include _v1, _v2, so first find exact name
ext_filename = glob.glob(os.path.join(temp_dir,
'cudaext_archflag*' + lib_ext))[0]
command = ['cuobjdump', elf_or_ptx, ext_filename]
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = p.communicate()
output = output.decode("ascii")
err = err.decode("ascii")
if not p.returncode == 0 or not err == '':
raise AssertionError("Flags: {}\nReturncode: {}\nStderr: {}\n"
"Output: {} ".format(flags, p.returncode,
err, output))
actual_arches = sorted(re.findall(r'sm_\d\d', output))
expected_arches = sorted(['sm_' + xx for xx in expected_values])
self.assertEqual(actual_arches, expected_arches,
msg="Flags: {}, Actual: {}, Expected: {}\n"
"Stderr: {}\nOutput: {}".format(
flags, actual_arches, expected_arches,
err, output))
temp_dir = tempfile.mkdtemp()
old_envvar = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
try:
os.environ['TORCH_CUDA_ARCH_LIST'] = flags
torch.utils.cpp_extension.load(
name="cudaext_archflags",
sources=[
"cpp_extensions/cuda_extension.cpp",
"cpp_extensions/cuda_extension.cu",
],
extra_cuda_cflags=["-O2"],
verbose=True,
build_directory=temp_dir,
)
# Expected output for --list-elf:
# ELF file 1: cudaext_archflags.1.sm_61.cubin
# ELF file 2: cudaext_archflags.2.sm_52.cubin
_check_cuobjdump_output(expected[0])
if expected[1] is not None:
# Expected output for --list-ptx:
# PTX file 1: cudaext_archflags.1.sm_61.ptx
_check_cuobjdump_output(expected[1], is_ptx=True)
finally:
if IS_WINDOWS:
print("Not wiping extensions build folder because Windows")
else:
shutil.rmtree(temp_dir)
if old_envvar is None:
os.environ.pop('TORCH_CUDA_ARCH_LIST')
else:
os.environ['TORCH_CUDA_ARCH_LIST'] = old_envvar
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
@unittest.skipIf(TEST_ROCM, "disabled on rocm")
def test_jit_cuda_archflags(self):
# Test a number of combinations:
# - the default for the machine we're testing on
# - Separators, can be ';' (most common) or ' '
# - Architecture names
# - With/without '+PTX'
n = torch.cuda.device_count()
capabilities = {torch.cuda.get_device_capability(i) for i in range(n)}
# expected values is length-2 tuple: (list of ELF, list of PTX)
# note: there should not be more than one PTX value
archflags = {
'': (['{}{}'.format(capability[0], capability[1]) for capability in capabilities], None),
"Maxwell+Tegra;6.1": (['53', '61'], None),
"Pascal 3.5": (['35', '60', '61'], None),
"Volta": (['70'], ['70']),
}
if int(torch.version.cuda.split('.')[0]) >= 10:
# CUDA 9 only supports compute capability <= 7.2
archflags["7.5+PTX"] = (['75'], ['75'])
archflags["5.0;6.0+PTX;7.0;7.5"] = (['50', '60', '70', '75'], ['60'])
for flags, expected in archflags.items():
self._run_jit_cuda_archflags(flags, expected)
@unittest.skipIf(not TEST_CUDNN, "CuDNN not found")
def test_jit_cudnn_extension(self):
# implementation of CuDNN ReLU
if IS_WINDOWS:
extra_ldflags = ["cudnn.lib"]
else:
extra_ldflags = ["-lcudnn"]
module = torch.utils.cpp_extension.load(
name="torch_test_cudnn_extension",
sources=["cpp_extensions/cudnn_extension.cpp"],
extra_ldflags=extra_ldflags,
verbose=True,
with_cuda=True,
)
x = torch.randn(100, device="cuda", dtype=torch.float32)
y = torch.zeros(100, device="cuda", dtype=torch.float32)
module.cudnn_relu(x, y) # y=relu(x)
self.assertEqual(torch.nn.functional.relu(x), y)
with self.assertRaisesRegex(RuntimeError, "same size"):
y_incorrect = torch.zeros(20, device="cuda", dtype=torch.float32)
module.cudnn_relu(x, y_incorrect)
def test_inline_jit_compile_extension_with_functions_as_list(self):
cpp_source = """
torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) {
return x.tanh() + y.tanh();
}
"""
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_with_functions_list",
cpp_sources=cpp_source,
functions="tanh_add",
verbose=True,
)
self.assertEqual(module.tanh_add.__doc__.split("\n")[2], "tanh_add")
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = module.tanh_add(x, y)
self.assertEqual(z, x.tanh() + y.tanh())
def test_inline_jit_compile_extension_with_functions_as_dict(self):
cpp_source = """
torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) {
return x.tanh() + y.tanh();
}
"""
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_with_functions_dict",
cpp_sources=cpp_source,
functions={"tanh_add": "Tanh and then sum :D"},
verbose=True,
)
self.assertEqual(module.tanh_add.__doc__.split("\n")[2], "Tanh and then sum :D")
def test_inline_jit_compile_extension_multiple_sources_and_no_functions(self):
cpp_source1 = """
torch::Tensor sin_add(torch::Tensor x, torch::Tensor y) {
return x.sin() + y.sin();
}
"""
cpp_source2 = """
#include <torch/extension.h>
torch::Tensor sin_add(torch::Tensor x, torch::Tensor y);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("sin_add", &sin_add, "sin(x) + sin(y)");
}
"""
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension",
cpp_sources=[cpp_source1, cpp_source2],
verbose=True,
)
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = module.sin_add(x, y)
self.assertEqual(z, x.sin() + y.sin())
@unittest.skip("Temporarily disabled")
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_inline_jit_compile_extension_cuda(self):
cuda_source = """
__global__ void cos_add_kernel(
const float* __restrict__ x,
const float* __restrict__ y,
float* __restrict__ output,
const int size) {
const auto index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
output[index] = __cosf(x[index]) + __cosf(y[index]);
}
}
torch::Tensor cos_add(torch::Tensor x, torch::Tensor y) {
auto output = torch::zeros_like(x);
const int threads = 1024;
const int blocks = (output.numel() + threads - 1) / threads;
cos_add_kernel<<<blocks, threads>>>(x.data<float>(), y.data<float>(), output.data<float>(), output.numel());
return output;
}
"""
# Here, the C++ source need only declare the function signature.
cpp_source = "torch::Tensor cos_add(torch::Tensor x, torch::Tensor y);"
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_cuda",
cpp_sources=cpp_source,
cuda_sources=cuda_source,
functions=["cos_add"],
verbose=True,
)
self.assertEqual(module.cos_add.__doc__.split("\n")[2], "cos_add")
x = torch.randn(4, 4, device="cuda", dtype=torch.float32)
y = torch.randn(4, 4, device="cuda", dtype=torch.float32)
z = module.cos_add(x, y)
self.assertEqual(z, x.cos() + y.cos())
@unittest.skip("Temporarily disabled")
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_inline_jit_compile_custom_op_cuda(self):
cuda_source = """
__global__ void cos_add_kernel(
const float* __restrict__ x,
const float* __restrict__ y,
float* __restrict__ output,
const int size) {
const auto index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
output[index] = __cosf(x[index]) + __cosf(y[index]);
}
}
torch::Tensor cos_add(torch::Tensor x, torch::Tensor y) {
auto output = torch::zeros_like(x);
const int threads = 1024;
const int blocks = (output.numel() + threads - 1) / threads;
cos_add_kernel<<<blocks, threads>>>(x.data_ptr<float>(), y.data_ptr<float>(), output.data_ptr<float>(), output.numel());
return output;
}
"""
# Here, the C++ source need only declare the function signature.
cpp_source = """
#include <torch/library.h>
torch::Tensor cos_add(torch::Tensor x, torch::Tensor y);
TORCH_LIBRARY(inline_jit_extension_custom_op_cuda, m) {
m.def("cos_add", cos_add);
}
"""
torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_custom_op_cuda",
cpp_sources=cpp_source,
cuda_sources=cuda_source,
verbose=True,
is_python_module=False,
)
x = torch.randn(4, 4, device="cuda", dtype=torch.float32)
y = torch.randn(4, 4, device="cuda", dtype=torch.float32)
z = torch.ops.inline_jit_extension_custom_op_cuda.cos_add(x, y)
self.assertEqual(z, x.cos() + y.cos())
def test_inline_jit_compile_extension_throws_when_functions_is_bad(self):
with self.assertRaises(ValueError):
torch.utils.cpp_extension.load_inline(
name="invalid_jit_extension", cpp_sources="", functions=5
)
def test_lenient_flag_handling_in_jit_extensions(self):
cpp_source = """
torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) {
return x.tanh() + y.tanh();
}
"""
module = torch.utils.cpp_extension.load_inline(
name="lenient_flag_handling_extension",
cpp_sources=cpp_source,
functions="tanh_add",
extra_cflags=["-g\n\n", "-O0 -Wall"],
extra_include_paths=[" cpp_extensions\n"],
verbose=True,
)
x = torch.zeros(100, dtype=torch.float32)
y = torch.zeros(100, dtype=torch.float32)
z = module.tanh_add(x, y).cpu()
self.assertEqual(z, x.tanh() + y.tanh())
@unittest.skip("Temporarily disabled")
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_half_support(self):
"""
Checks for an issue with operator< ambiguity for half when certain
THC headers are included.
See https://github.com/pytorch/pytorch/pull/10301#issuecomment-416773333
for the corresponding issue.
"""
cuda_source = """
template<typename T, typename U>
__global__ void half_test_kernel(const T* input, U* output) {
if (input[0] < input[1] || input[0] >= input[1]) {
output[0] = 123;
}
}
torch::Tensor half_test(torch::Tensor input) {
auto output = torch::empty(1, input.options().dtype(torch::kFloat));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "half_test", [&] {
half_test_kernel<scalar_t><<<1, 1>>>(
input.data<scalar_t>(),
output.data<float>());
});
return output;
}
"""
module = torch.utils.cpp_extension.load_inline(
name="half_test_extension",
cpp_sources="torch::Tensor half_test(torch::Tensor input);",
cuda_sources=cuda_source,
functions=["half_test"],
verbose=True,
)
x = torch.randn(3, device="cuda", dtype=torch.half)
result = module.half_test(x)
self.assertEqual(result[0], 123)
def test_reload_jit_extension(self):
def compile(code):
return torch.utils.cpp_extension.load_inline(
name="reloaded_jit_extension",
cpp_sources=code,
functions="f",
verbose=True,
)
module = compile("int f() { return 123; }")
self.assertEqual(module.f(), 123)
module = compile("int f() { return 456; }")
self.assertEqual(module.f(), 456)
module = compile("int f() { return 456; }")
self.assertEqual(module.f(), 456)
module = compile("int f() { return 789; }")
self.assertEqual(module.f(), 789)
def test_cpp_frontend_module_has_same_output_as_python(self, dtype=torch.double):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
input = torch.randn(2, 5, dtype=dtype)
cpp_linear = extension.Net(5, 2)
cpp_linear.to(dtype)
python_linear = torch.nn.Linear(5, 2).to(dtype)
# First make sure they have the same parameters
cpp_parameters = dict(cpp_linear.named_parameters())
with torch.no_grad():
python_linear.weight.copy_(cpp_parameters["fc.weight"])
python_linear.bias.copy_(cpp_parameters["fc.bias"])
cpp_output = cpp_linear.forward(input)
python_output = python_linear(input)
self.assertEqual(cpp_output, python_output)
cpp_output.sum().backward()
python_output.sum().backward()
for p in cpp_linear.parameters():
self.assertFalse(p.grad is None)
self.assertEqual(cpp_parameters["fc.weight"].grad, python_linear.weight.grad)
self.assertEqual(cpp_parameters["fc.bias"].grad, python_linear.bias.grad)
def test_cpp_frontend_module_python_inter_op(self):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
# Create a torch.nn.Module which uses the C++ module as a submodule.
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.x = torch.nn.Parameter(torch.tensor(1.0))
self.net = extension.Net(3, 5)
def forward(self, input):
return self.net.forward(input) + self.x
net = extension.Net(5, 2)
net.double()
net.to(torch.get_default_dtype())
self.assertEqual(str(net), "Net")
# Further embed the torch.nn.Module into a Sequential, and also add the
# C++ module as an element of the Sequential.
sequential = torch.nn.Sequential(M(), torch.nn.Tanh(), net, torch.nn.Sigmoid())
input = torch.randn(2, 3)
# Try calling the module!
output = sequential.forward(input)
# The call operator is bound to forward too.
self.assertEqual(output, sequential(input))
self.assertEqual(list(output.shape), [2, 2])
# Do changes on the module hierarchy.
old_dtype = torch.get_default_dtype()
sequential.to(torch.float64)
sequential.to(torch.float32)
sequential.to(old_dtype)
self.assertEqual(sequential[2].parameters()[0].dtype, old_dtype)
# Make sure we can access these methods recursively.
self.assertEqual(len(list(sequential.parameters())), len(net.parameters()) * 2 + 1)
self.assertEqual(len(list(sequential.named_parameters())), len(net.named_parameters()) * 2 + 1)
self.assertEqual(len(list(sequential.buffers())), len(net.buffers()) * 2)
self.assertEqual(len(list(sequential.modules())), 8)
# Test clone()
net2 = net.clone()
self.assertEqual(len(net.parameters()), len(net2.parameters()))
self.assertEqual(len(net.buffers()), len(net2.buffers()))
self.assertEqual(len(net.modules()), len(net2.modules()))
# Try differentiating through the whole module.
for parameter in net.parameters():
self.assertIsNone(parameter.grad)
output.sum().backward()
for parameter in net.parameters():
self.assertFalse(parameter.grad is None)
self.assertGreater(parameter.grad.sum(), 0)
# Try calling zero_grad()
net.zero_grad()
for p in net.parameters():
self.assertEqual(p.grad, torch.zeros_like(p))
# Test train(), eval(), training (a property)
self.assertTrue(net.training)
net.eval()
self.assertFalse(net.training)
net.train()
self.assertTrue(net.training)
net.eval()
# Try calling the additional methods we registered.
biased_input = torch.randn(4, 5)
output_before = net.forward(biased_input)
bias = net.get_bias().clone()
self.assertEqual(list(bias.shape), [2])
net.set_bias(bias + 1)
self.assertEqual(net.get_bias(), bias + 1)
output_after = net.forward(biased_input)
self.assertNotEqual(output_before, output_after)
# Try accessing parameters
self.assertEqual(len(net.parameters()), 2)
np = net.named_parameters()
self.assertEqual(len(np), 2)
self.assertIn("fc.weight", np)
self.assertIn("fc.bias", np)
self.assertEqual(len(net.buffers()), 1)
nb = net.named_buffers()
self.assertEqual(len(nb), 1)
self.assertIn("buf", nb)
self.assertEqual(nb[0][1], torch.eye(5))
def test_cpp_frontend_module_has_up_to_date_attributes(self):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
net = extension.Net(5, 2)
self.assertEqual(len(net._parameters), 0)
net.add_new_parameter("foo", torch.eye(5))
self.assertEqual(len(net._parameters), 1)
self.assertEqual(len(net._buffers), 1)
net.add_new_buffer("bar", torch.eye(5))
self.assertEqual(len(net._buffers), 2)
self.assertEqual(len(net._modules), 1)
net.add_new_submodule("fc2")
self.assertEqual(len(net._modules), 2)
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_cpp_frontend_module_python_inter_op_with_cuda(self):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
net = extension.Net(5, 2)
for p in net.parameters():
self.assertTrue(p.device.type == "cpu")
cpu_parameters = [p.clone() for p in net.parameters()]
device = torch.device("cuda", 0)
net.to(device)
for i, p in enumerate(net.parameters()):
self.assertTrue(p.device.type == "cuda")
self.assertTrue(p.device.index == 0)
self.assertEqual(cpu_parameters[i], p)
net.cpu()
net.add_new_parameter("a", torch.eye(5))
net.add_new_parameter("b", torch.eye(5))
net.add_new_buffer("c", torch.eye(5))
net.add_new_buffer("d", torch.eye(5))
net.add_new_submodule("fc2")
net.add_new_submodule("fc3")
for p in net.parameters():
self.assertTrue(p.device.type == "cpu")
net.cuda()
for p in net.parameters():
self.assertTrue(p.device.type == "cuda")
def test_returns_shared_library_path_when_is_python_module_is_true(self):
source = """
#include <torch/script.h>
torch::Tensor func(torch::Tensor x) { return x; }
static torch::RegisterOperators r("test::func", &func);
"""
torch.utils.cpp_extension.load_inline(
name="is_python_module",
cpp_sources=source,
functions="func",
verbose=True,
is_python_module=False,
)
self.assertEqual(torch.ops.test.func(torch.eye(5)), torch.eye(5))
def test_set_default_type_also_changes_aten_default_type(self):
module = torch.utils.cpp_extension.load_inline(
name="test_set_default_type",
cpp_sources="torch::Tensor get() { return torch::empty({}); }",
functions="get",
verbose=True,
)
initial_default = torch.get_default_dtype()
try:
self.assertEqual(module.get().dtype, initial_default)
torch.set_default_dtype(torch.float64)
self.assertEqual(module.get().dtype, torch.float64)
torch.set_default_dtype(torch.float32)
self.assertEqual(module.get().dtype, torch.float32)
torch.set_default_dtype(torch.float16)
self.assertEqual(module.get().dtype, torch.float16)
finally:
torch.set_default_dtype(initial_default)
def test_compilation_error_formatting(self):
# Test that the missing-semicolon error message has linebreaks in it.
# This'll fail if the message has been munged into a single line.
# It's hard to write anything more specific as every compiler has it's own
# error formatting.
with self.assertRaises(RuntimeError) as e:
torch.utils.cpp_extension.load_inline(
name="test_compilation_error_formatting",
cpp_sources="int main() { return 0 }")
pattern = r'.*(\\n|\\r).*'
self.assertNotRegex(str(e), pattern)
def test_warning(self):
# Note: the module created from this source will include the py::key_error
# symbol. But because of visibility and the fact that it lives in a
# different compilation unit than pybind, this trips up ubsan even though
# it is fine. "ubsan.supp" thus needs to contain "vptr:warn_mod.so".
source = '''
// error_type:
// 0: no error
// 1: torch::TypeError
// 2: python_error()
// 3: py::error_already_set
at::Tensor foo(at::Tensor x, int error_type) {
std::ostringstream err_stream;
err_stream << "Error with " << x.type();
TORCH_WARN(err_stream.str());
if(error_type == 1) {
throw torch::TypeError(err_stream.str().c_str());
}
if(error_type == 2) {
PyObject* obj = PyTuple_New(-1);
TORCH_CHECK(!obj);
// Pretend it was caught in a different thread and restored here
auto e = python_error();
e.persist();
e.restore();
throw e;
}
if(error_type == 3) {
throw py::key_error(err_stream.str());
}
return x.cos();
}
'''
# Ensure double type for hard-coded c name below
t = torch.rand(2).double()
cpp_tensor_name = r"CPUDoubleType"
# Without error handling, the warnings cannot be catched
warn_mod = torch.utils.cpp_extension.load_inline(name='warn_mod',
cpp_sources=[source],
functions=['foo'],
with_pytorch_error_handling=False)
with warnings.catch_warnings(record=True) as w:
warn_mod.foo(t, 0)
self.assertEqual(len(w), 0)
with self.assertRaisesRegex(TypeError, t.type()):
warn_mod.foo(t, 1)
self.assertEqual(len(w), 0)
with self.assertRaisesRegex(SystemError, "bad argument to internal function"):
warn_mod.foo(t, 2)
self.assertEqual(len(w), 0)
with self.assertRaisesRegex(KeyError, cpp_tensor_name):
warn_mod.foo(t, 3)
self.assertEqual(len(w), 0)
warn_mod = torch.utils.cpp_extension.load_inline(name='warn_mod',
cpp_sources=[source],
functions=['foo'],
with_pytorch_error_handling=True)
with warnings.catch_warnings(record=True) as w:
# Catched with no error should be detected
warn_mod.foo(t, 0)
self.assertEqual(len(w), 1)
# Catched with cpp error should also be detected
with self.assertRaisesRegex(TypeError, t.type()):
warn_mod.foo(t, 1)
self.assertEqual(len(w), 2)
# Catched with python error should also be detected
with self.assertRaisesRegex(SystemError, "bad argument to internal function"):
warn_mod.foo(t, 2)
self.assertEqual(len(w), 3)
# Catched with pybind error should also be detected
# Note that there is no type name translation for pybind errors
with self.assertRaisesRegex(KeyError, cpp_tensor_name):
warn_mod.foo(t, 3)
self.assertEqual(len(w), 4)
# Make sure raising warnings are handled properly
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
# No error, the warning should raise
with self.assertRaisesRegex(UserWarning, t.type()):
warn_mod.foo(t, 0)
self.assertEqual(len(w), 0)
# Another error happened, the warning is ignored
with self.assertRaisesRegex(TypeError, t.type()):
warn_mod.foo(t, 1)
self.assertEqual(len(w), 0)
def test_autograd_from_cpp(self):
source = '''
void run_back(at::Tensor x) {
x.backward({});
}
void run_back_no_gil(at::Tensor x) {
pybind11::gil_scoped_release no_gil;
x.backward({});
}
'''
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, gx):
return gx
test_backward_deadlock = torch.utils.cpp_extension.load_inline(name='test_backward_deadlock',
cpp_sources=[source],
functions=['run_back', 'run_back_no_gil'],)
# This used to deadlock
inp = torch.rand(20, requires_grad=True)
loss = MyFn.apply(inp).sum()
with self.assertRaisesRegex(RuntimeError, "The autograd engine was called while holding the GIL."):
test_backward_deadlock.run_back(loss)
inp = torch.rand(20, requires_grad=True)
loss = MyFn.apply(inp).sum()
test_backward_deadlock.run_back_no_gil(loss)
def test_custom_compound_op_autograd(self):
# Test that a custom compound op (i.e. a custom op that just calls other aten ops)
# correctly returns gradients of those other ops
source = """
#include <torch/library.h>
torch::Tensor my_add(torch::Tensor x, torch::Tensor y) {
return x + y;
}
TORCH_LIBRARY(my, m) {
m.def("add", &my_add);
}
"""
torch.utils.cpp_extension.load_inline(
name="is_python_module",
cpp_sources=source,
verbose=True,
is_python_module=False,
)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
for fast_mode in (True, False):
gradcheck(torch.ops.my.add, [a, b], eps=1e-2, fast_mode=fast_mode)
if __name__ == "__main__":
common.run_tests()
|
pytorch-master
|
test/test_cpp_extensions_jit.py
|
# Owner(s): ["module: typing"]
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, set_cwd
import tempfile
import torch
import doctest
import os
import inspect
from pathlib import Path
try:
import mypy.api
HAVE_MYPY = True
except ImportError:
HAVE_MYPY = False
def get_examples_from_docstring(docstr):
"""
Extracts all runnable python code from the examples
in docstrings; returns a list of lines.
"""
examples = doctest.DocTestParser().get_examples(docstr)
return [f' {l}' for e in examples for l in e.source.splitlines()]
def get_all_examples():
"""get_all_examples() -> str
This function grabs (hopefully all) examples from the torch documentation
strings and puts them in one nonsensical module returned as a string.
"""
blocklist = {
"_np",
}
allexamples = ""
example_file_lines = [
"import torch",
"import torch.nn.functional as F",
"import math",
"import numpy",
"import io",
"import itertools",
"",
# for requires_grad_ example
# NB: We are parsing this file as Python 2, so we must use
# Python 2 type annotation syntax
"def preprocess(inp):",
" # type: (torch.Tensor) -> torch.Tensor",
" return inp",
]
for fname in dir(torch):
fn = getattr(torch, fname)
docstr = inspect.getdoc(fn)
if docstr and fname not in blocklist:
e = get_examples_from_docstring(docstr)
if e:
example_file_lines.append(f"\n\ndef example_torch_{fname}():")
example_file_lines += e
for fname in dir(torch.Tensor):
fn = getattr(torch.Tensor, fname)
docstr = inspect.getdoc(fn)
if docstr and fname not in blocklist:
e = get_examples_from_docstring(docstr)
if e:
example_file_lines.append(f"\n\ndef example_torch_tensor_{fname}():")
example_file_lines += e
return "\n".join(example_file_lines)
class TestTypeHints(TestCase):
@unittest.skipIf(not HAVE_MYPY, "need mypy")
def test_doc_examples(self):
"""
Run documentation examples through mypy.
"""
fn = Path(__file__).resolve().parent / 'generated_type_hints_smoketest.py'
with open(fn, "w") as f:
print(get_all_examples(), file=f)
# OK, so here's the deal. mypy treats installed packages
# and local modules differently: if a package is installed,
# mypy will refuse to use modules from that package for type
# checking unless the module explicitly says that it supports
# type checking. (Reference:
# https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports
# )
#
# Now, PyTorch doesn't support typechecking, and we shouldn't
# claim that it supports typechecking (it doesn't.) However, not
# claiming we support typechecking is bad for this test, which
# wants to use the partial information we get from the bits of
# PyTorch which are typed to check if it typechecks. And
# although mypy will work directly if you are working in source,
# some of our tests involve installing PyTorch and then running
# its tests.
#
# The guidance we got from Michael Sullivan and Joshua Oreman,
# and also independently developed by Thomas Viehmann,
# is that we should create a fake directory and add symlinks for
# the packages that should typecheck. So that is what we do
# here.
#
# If you want to run mypy by hand, and you run from PyTorch
# root directory, it should work fine to skip this step (since
# mypy will preferentially pick up the local files first). The
# temporary directory here is purely needed for CI. For this
# reason, we also still drop the generated file in the test
# source folder, for ease of inspection when there are failures.
with tempfile.TemporaryDirectory() as tmp_dir:
try:
os.symlink(
os.path.dirname(torch.__file__),
os.path.join(tmp_dir, 'torch'),
target_is_directory=True
)
except OSError:
raise unittest.SkipTest('cannot symlink') from None
repo_rootdir = Path(__file__).resolve().parent.parent
# TODO: Would be better not to chdir here, this affects the
# entire process!
with set_cwd(str(repo_rootdir)):
(stdout, stderr, result) = mypy.api.run([
'--cache-dir=.mypy_cache/doc',
'--no-strict-optional', # needed because of torch.lu_unpack, see gh-36584
str(fn),
])
if result != 0:
self.fail(f"mypy failed:\n{stderr}\n{stdout}")
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_type_hints.py
|
# Owner(s): ["module: fft"]
import torch
import unittest
import math
from contextlib import contextmanager
from itertools import product
import itertools
import doctest
import inspect
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_NUMPY, TEST_LIBROSA, TEST_MKL, first_sample, TEST_WITH_ROCM,
make_tensor)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, onlyNativeDeviceTypes,
skipCPUIfNoFFT, deviceCountAtLeast, onlyCUDA, OpDTypes, skipIf, toleranceOverride, tol)
from torch.testing._internal.common_methods_invocations import (
spectral_funcs, SpectralFuncType)
from torch.testing._internal.common_cuda import SM53OrLater
from setuptools import distutils
from typing import Optional, List
if TEST_NUMPY:
import numpy as np
if TEST_LIBROSA:
import librosa
has_scipy_fft = False
try:
import scipy.fft
has_scipy_fft = True
except ModuleNotFoundError:
pass
LooseVersion = distutils.version.LooseVersion
REFERENCE_NORM_MODES = (
(None, "forward", "backward", "ortho")
if LooseVersion(np.__version__) >= '1.20.0' and (
not has_scipy_fft or LooseVersion(scipy.__version__) >= '1.6.0')
else (None, "ortho"))
def _complex_stft(x, *args, **kwargs):
# Transform real and imaginary components separably
stft_real = torch.stft(x.real, *args, **kwargs, return_complex=True, onesided=False)
stft_imag = torch.stft(x.imag, *args, **kwargs, return_complex=True, onesided=False)
return stft_real + 1j * stft_imag
def _hermitian_conj(x, dim):
"""Returns the hermitian conjugate along a single dimension
H(x)[i] = conj(x[-i])
"""
out = torch.empty_like(x)
mid = (x.size(dim) - 1) // 2
idx = [slice(None)] * out.dim()
idx_center = list(idx)
idx_center[dim] = 0
out[idx] = x[idx]
idx_neg = list(idx)
idx_neg[dim] = slice(-mid, None)
idx_pos = idx
idx_pos[dim] = slice(1, mid + 1)
out[idx_pos] = x[idx_neg].flip(dim)
out[idx_neg] = x[idx_pos].flip(dim)
if (2 * mid + 1 < x.size(dim)):
idx[dim] = mid + 1
out[idx] = x[idx]
return out.conj()
def _complex_istft(x, *args, **kwargs):
# Decompose into Hermitian (FFT of real) and anti-Hermitian (FFT of imaginary)
n_fft = x.size(-2)
slc = (Ellipsis, slice(None, n_fft // 2 + 1), slice(None))
hconj = _hermitian_conj(x, dim=-2)
x_hermitian = (x + hconj) / 2
x_antihermitian = (x - hconj) / 2
istft_real = torch.istft(x_hermitian[slc], *args, **kwargs, onesided=True)
istft_imag = torch.istft(-1j * x_antihermitian[slc], *args, **kwargs, onesided=True)
return torch.complex(istft_real, istft_imag)
def _stft_reference(x, hop_length, window):
r"""Reference stft implementation
This doesn't implement all of torch.stft, only the STFT definition:
.. math:: X(m, \omega) = \sum_n x[n]w[n - m] e^{-jn\omega}
"""
n_fft = window.numel()
X = torch.empty((n_fft, (x.numel() - n_fft + hop_length) // hop_length),
device=x.device, dtype=torch.cdouble)
for m in range(X.size(1)):
start = m * hop_length
if start + n_fft > x.numel():
slc = torch.empty(n_fft, device=x.device, dtype=x.dtype)
tmp = x[start:]
slc[:tmp.numel()] = tmp
else:
slc = x[start: start + n_fft]
X[:, m] = torch.fft.fft(slc * window)
return X
def skip_helper_for_fft(device, dtype):
device_type = torch.device(device).type
if dtype not in (torch.half, torch.complex32):
return
if device_type == 'cpu':
raise unittest.SkipTest("half and complex32 are not supported on CPU")
if TEST_WITH_ROCM:
raise unittest.SkipTest("half and complex32 are not supported on ROCM")
if not SM53OrLater:
raise unittest.SkipTest("half and complex32 are only supported on CUDA device with SM>53")
# Tests of functions related to Fourier analysis in the torch.fft namespace
class TestFFT(TestCase):
exact_dtype = True
@onlyNativeDeviceTypes
@ops([op for op in spectral_funcs if op.ndimensional == SpectralFuncType.OneD],
allowed_dtypes=(torch.float, torch.cfloat))
def test_reference_1d(self, device, dtype, op):
if op.ref is None:
raise unittest.SkipTest("No reference implementation")
norm_modes = REFERENCE_NORM_MODES
test_args = [
*product(
# input
(torch.randn(67, device=device, dtype=dtype),
torch.randn(80, device=device, dtype=dtype),
torch.randn(12, 14, device=device, dtype=dtype),
torch.randn(9, 6, 3, device=device, dtype=dtype)),
# n
(None, 50, 6),
# dim
(-1, 0),
# norm
norm_modes
),
# Test transforming middle dimensions of multi-dim tensor
*product(
(torch.randn(4, 5, 6, 7, device=device, dtype=dtype),),
(None,),
(1, 2, -2,),
norm_modes
)
]
for iargs in test_args:
args = list(iargs)
input = args[0]
args = args[1:]
expected = op.ref(input.cpu().numpy(), *args)
exact_dtype = dtype in (torch.double, torch.complex128)
actual = op(input, *args)
self.assertEqual(actual, expected, exact_dtype=exact_dtype)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@toleranceOverride({
torch.half : tol(1e-2, 1e-2),
torch.chalf : tol(1e-2, 1e-2),
})
@dtypes(torch.half, torch.float, torch.double, torch.complex32, torch.complex64, torch.complex128)
def test_fft_round_trip(self, device, dtype):
skip_helper_for_fft(device, dtype)
# Test that round trip through ifft(fft(x)) is the identity
if dtype not in (torch.half, torch.complex32):
test_args = list(product(
# input
(torch.randn(67, device=device, dtype=dtype),
torch.randn(80, device=device, dtype=dtype),
torch.randn(12, 14, device=device, dtype=dtype),
torch.randn(9, 6, 3, device=device, dtype=dtype)),
# dim
(-1, 0),
# norm
(None, "forward", "backward", "ortho")
))
else:
# cuFFT supports powers of 2 for half and complex half precision
test_args = list(product(
# input
(torch.randn(64, device=device, dtype=dtype),
torch.randn(128, device=device, dtype=dtype),
torch.randn(4, 16, device=device, dtype=dtype),
torch.randn(8, 6, 2, device=device, dtype=dtype)),
# dim
(-1, 0),
# norm
(None, "forward", "backward", "ortho")
))
fft_functions = [(torch.fft.fft, torch.fft.ifft)]
# Real-only functions
if not dtype.is_complex:
# NOTE: Using ihfft as "forward" transform to avoid needing to
# generate true half-complex input
fft_functions += [(torch.fft.rfft, torch.fft.irfft),
(torch.fft.ihfft, torch.fft.hfft)]
for forward, backward in fft_functions:
for x, dim, norm in test_args:
kwargs = {
'n': x.size(dim),
'dim': dim,
'norm': norm,
}
y = backward(forward(x, **kwargs), **kwargs)
if x.dtype is torch.half and y.dtype is torch.complex32:
# Since type promotion currently doesn't work with complex32
# manually promote `x` to complex32
x = x.to(torch.complex32)
# For real input, ifft(fft(x)) will convert to complex
self.assertEqual(x, y, exact_dtype=(
forward != torch.fft.fft or x.is_complex()))
# Note: NumPy will throw a ValueError for an empty input
@onlyNativeDeviceTypes
@ops(spectral_funcs, allowed_dtypes=(torch.half, torch.float, torch.complex32, torch.cfloat))
def test_empty_fft(self, device, dtype, op):
t = torch.empty(1, 0, device=device, dtype=dtype)
match = r"Invalid number of data points \([-\d]*\) specified"
with self.assertRaisesRegex(RuntimeError, match):
op(t)
@onlyNativeDeviceTypes
def test_empty_ifft(self, device):
t = torch.empty(2, 1, device=device, dtype=torch.complex64)
match = r"Invalid number of data points \([-\d]*\) specified"
for f in [torch.fft.irfft, torch.fft.irfft2, torch.fft.irfftn,
torch.fft.hfft, torch.fft.hfft2, torch.fft.hfftn]:
with self.assertRaisesRegex(RuntimeError, match):
f(t)
@onlyNativeDeviceTypes
def test_fft_invalid_dtypes(self, device):
t = torch.randn(64, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "rfft expects a real input tensor"):
torch.fft.rfft(t)
with self.assertRaisesRegex(RuntimeError, "rfftn expects a real-valued input tensor"):
torch.fft.rfftn(t)
with self.assertRaisesRegex(RuntimeError, "ihfft expects a real input tensor"):
torch.fft.ihfft(t)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.int8, torch.half, torch.float, torch.double,
torch.complex32, torch.complex64, torch.complex128)
def test_fft_type_promotion(self, device, dtype):
skip_helper_for_fft(device, dtype)
if dtype.is_complex or dtype.is_floating_point:
t = torch.randn(64, device=device, dtype=dtype)
else:
t = torch.randint(-2, 2, (64,), device=device, dtype=dtype)
PROMOTION_MAP = {
torch.int8: torch.complex64,
torch.half: torch.complex32,
torch.float: torch.complex64,
torch.double: torch.complex128,
torch.complex32: torch.complex32,
torch.complex64: torch.complex64,
torch.complex128: torch.complex128,
}
T = torch.fft.fft(t)
self.assertEqual(T.dtype, PROMOTION_MAP[dtype])
PROMOTION_MAP_C2R = {
torch.int8: torch.float,
torch.half: torch.half,
torch.float: torch.float,
torch.double: torch.double,
torch.complex32: torch.half,
torch.complex64: torch.float,
torch.complex128: torch.double,
}
if dtype in (torch.half, torch.complex32):
# cuFFT supports powers of 2 for half and complex half precision
# NOTE: With hfft and default args where output_size n=2*(input_size - 1),
# we make sure that logical fft size is a power of two.
x = torch.randn(65, device=device, dtype=dtype)
R = torch.fft.hfft(x)
else:
R = torch.fft.hfft(t)
self.assertEqual(R.dtype, PROMOTION_MAP_C2R[dtype])
if not dtype.is_complex:
PROMOTION_MAP_R2C = {
torch.int8: torch.complex64,
torch.half: torch.complex32,
torch.float: torch.complex64,
torch.double: torch.complex128,
}
C = torch.fft.rfft(t)
self.assertEqual(C.dtype, PROMOTION_MAP_R2C[dtype])
@onlyNativeDeviceTypes
@ops(spectral_funcs, dtypes=OpDTypes.unsupported,
allowed_dtypes=[torch.half, torch.bfloat16])
def test_fft_half_and_bfloat16_errors(self, device, dtype, op):
# TODO: Remove torch.half error when complex32 is fully implemented
sample = first_sample(self, op.sample_inputs(device, dtype))
device_type = torch.device(device).type
if dtype is torch.half and device_type == 'cuda' and TEST_WITH_ROCM:
err_msg = "Unsupported dtype "
elif dtype is torch.half and device_type == 'cuda' and not SM53OrLater:
err_msg = "cuFFT doesn't support signals of half type with compute capability less than SM_53"
else:
err_msg = "Unsupported dtype "
with self.assertRaisesRegex(RuntimeError, err_msg):
op(sample.input, *sample.args, **sample.kwargs)
@onlyNativeDeviceTypes
@ops(spectral_funcs, allowed_dtypes=(torch.half, torch.chalf))
def test_fft_half_and_chalf_not_power_of_two_error(self, device, dtype, op):
t = make_tensor(13, 13, device=device, dtype=dtype)
err_msg = "cuFFT only supports dimensions whose sizes are powers of two"
with self.assertRaisesRegex(RuntimeError, err_msg):
op(t)
if op.ndimensional in (SpectralFuncType.ND, SpectralFuncType.TwoD):
kwargs = {'s': (12, 12)}
else:
kwargs = {'n': 12}
with self.assertRaisesRegex(RuntimeError, err_msg):
op(t, **kwargs)
# nd-fft tests
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_NUMPY, 'NumPy not found')
@ops([op for op in spectral_funcs if op.ndimensional == SpectralFuncType.ND],
allowed_dtypes=(torch.cfloat, torch.cdouble))
def test_reference_nd(self, device, dtype, op):
if op.ref is None:
raise unittest.SkipTest("No reference implementation")
norm_modes = REFERENCE_NORM_MODES
# input_ndim, s, dim
transform_desc = [
*product(range(2, 5), (None,), (None, (0,), (0, -1))),
*product(range(2, 5), (None, (4, 10)), (None,)),
(6, None, None),
(5, None, (1, 3, 4)),
(3, None, (1,)),
(1, None, (0,)),
(4, (10, 10), None),
(4, (10, 10), (0, 1))
]
for input_ndim, s, dim in transform_desc:
shape = itertools.islice(itertools.cycle(range(4, 9)), input_ndim)
input = torch.randn(*shape, device=device, dtype=dtype)
for norm in norm_modes:
expected = op.ref(input.cpu().numpy(), s, dim, norm)
exact_dtype = dtype in (torch.double, torch.complex128)
actual = op(input, s, dim, norm)
self.assertEqual(actual, expected, exact_dtype=exact_dtype)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@toleranceOverride({
torch.half : tol(1e-2, 1e-2),
torch.chalf : tol(1e-2, 1e-2),
})
@dtypes(torch.half, torch.float, torch.double,
torch.complex32, torch.complex64, torch.complex128)
def test_fftn_round_trip(self, device, dtype):
skip_helper_for_fft(device, dtype)
norm_modes = (None, "forward", "backward", "ortho")
# input_ndim, dim
transform_desc = [
*product(range(2, 5), (None, (0,), (0, -1))),
(7, None),
(5, (1, 3, 4)),
(3, (1,)),
(1, 0),
]
fft_functions = [(torch.fft.fftn, torch.fft.ifftn)]
# Real-only functions
if not dtype.is_complex:
# NOTE: Using ihfftn as "forward" transform to avoid needing to
# generate true half-complex input
fft_functions += [(torch.fft.rfftn, torch.fft.irfftn),
(torch.fft.ihfftn, torch.fft.hfftn)]
for input_ndim, dim in transform_desc:
if dtype in (torch.half, torch.complex32):
# cuFFT supports powers of 2 for half and complex half precision
shape = itertools.islice(itertools.cycle((2, 4, 8)), input_ndim)
else:
shape = itertools.islice(itertools.cycle(range(4, 9)), input_ndim)
x = torch.randn(*shape, device=device, dtype=dtype)
for (forward, backward), norm in product(fft_functions, norm_modes):
if isinstance(dim, tuple):
s = [x.size(d) for d in dim]
else:
s = x.size() if dim is None else x.size(dim)
kwargs = {'s': s, 'dim': dim, 'norm': norm}
y = backward(forward(x, **kwargs), **kwargs)
# For real input, ifftn(fftn(x)) will convert to complex
if x.dtype is torch.half and y.dtype is torch.chalf:
# Since type promotion currently doesn't work with complex32
# manually promote `x` to complex32
self.assertEqual(x.to(torch.chalf), y)
else:
self.assertEqual(x, y, exact_dtype=(
forward != torch.fft.fftn or x.is_complex()))
@onlyNativeDeviceTypes
@ops([op for op in spectral_funcs if op.ndimensional == SpectralFuncType.ND],
allowed_dtypes=[torch.float, torch.cfloat])
def test_fftn_invalid(self, device, dtype, op):
a = torch.rand(10, 10, 10, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "dims must be unique"):
op(a, dim=(0, 1, 0))
with self.assertRaisesRegex(RuntimeError, "dims must be unique"):
op(a, dim=(2, -1))
with self.assertRaisesRegex(RuntimeError, "dim and shape .* same length"):
op(a, s=(1,), dim=(0, 1))
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
op(a, dim=(3,))
with self.assertRaisesRegex(RuntimeError, "tensor only has 3 dimensions"):
op(a, s=(10, 10, 10, 10))
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@toleranceOverride({
torch.half : tol(1e-2, 1e-2),
})
@dtypes(torch.half, torch.float, torch.double)
def test_hfftn(self, device, dtype):
skip_helper_for_fft(device, dtype)
# input_ndim, dim
transform_desc = [
*product(range(2, 5), (None, (0,), (0, -1))),
(6, None),
(5, (1, 3, 4)),
(3, (1,)),
(1, (0,)),
(4, (0, 1))
]
for input_ndim, dim in transform_desc:
actual_dims = list(range(input_ndim)) if dim is None else dim
if dtype is torch.half:
shape = tuple(itertools.islice(itertools.cycle((2, 4, 8)), input_ndim))
else:
shape = tuple(itertools.islice(itertools.cycle(range(4, 9)), input_ndim))
expect = torch.randn(*shape, device=device, dtype=dtype)
input = torch.fft.ifftn(expect, dim=dim, norm="ortho")
lastdim = actual_dims[-1]
lastdim_size = input.size(lastdim) // 2 + 1
idx = [slice(None)] * input_ndim
idx[lastdim] = slice(0, lastdim_size)
input = input[idx]
s = [shape[dim] for dim in actual_dims]
actual = torch.fft.hfftn(input, s=s, dim=dim, norm="ortho")
self.assertEqual(expect, actual)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@toleranceOverride({
torch.half : tol(1e-2, 1e-2),
})
@dtypes(torch.half, torch.float, torch.double)
def test_ihfftn(self, device, dtype):
skip_helper_for_fft(device, dtype)
# input_ndim, dim
transform_desc = [
*product(range(2, 5), (None, (0,), (0, -1))),
(6, None),
(5, (1, 3, 4)),
(3, (1,)),
(1, (0,)),
(4, (0, 1))
]
for input_ndim, dim in transform_desc:
if dtype is torch.half:
shape = tuple(itertools.islice(itertools.cycle((2, 4, 8)), input_ndim))
else:
shape = tuple(itertools.islice(itertools.cycle(range(4, 9)), input_ndim))
input = torch.randn(*shape, device=device, dtype=dtype)
expect = torch.fft.ifftn(input, dim=dim, norm="ortho")
# Slice off the half-symmetric component
lastdim = -1 if dim is None else dim[-1]
lastdim_size = expect.size(lastdim) // 2 + 1
idx = [slice(None)] * input_ndim
idx[lastdim] = slice(0, lastdim_size)
expect = expect[idx]
actual = torch.fft.ihfftn(input, dim=dim, norm="ortho")
self.assertEqual(expect, actual)
# 2d-fft tests
# NOTE: 2d transforms are only thin wrappers over n-dim transforms,
# so don't require exhaustive testing.
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.double, torch.complex128)
def test_fft2_numpy(self, device, dtype):
norm_modes = REFERENCE_NORM_MODES
# input_ndim, s
transform_desc = [
*product(range(2, 5), (None, (4, 10))),
]
fft_functions = ['fft2', 'ifft2', 'irfft2', 'hfft2']
if dtype.is_floating_point:
fft_functions += ['rfft2', 'ihfft2']
for input_ndim, s in transform_desc:
shape = itertools.islice(itertools.cycle(range(4, 9)), input_ndim)
input = torch.randn(*shape, device=device, dtype=dtype)
for fname, norm in product(fft_functions, norm_modes):
torch_fn = getattr(torch.fft, fname)
if "hfft" in fname:
if not has_scipy_fft:
continue # Requires scipy to compare against
numpy_fn = getattr(scipy.fft, fname)
else:
numpy_fn = getattr(np.fft, fname)
def fn(t: torch.Tensor, s: Optional[List[int]], dim: List[int] = (-2, -1), norm: Optional[str] = None):
return torch_fn(t, s, dim, norm)
torch_fns = (torch_fn, torch.jit.script(fn))
# Once with dim defaulted
input_np = input.cpu().numpy()
expected = numpy_fn(input_np, s, norm=norm)
for fn in torch_fns:
actual = fn(input, s, norm=norm)
self.assertEqual(actual, expected)
# Once with explicit dims
dim = (1, 0)
expected = numpy_fn(input_np, s, dim, norm)
for fn in torch_fns:
actual = fn(input, s, dim, norm)
self.assertEqual(actual, expected)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.complex64)
def test_fft2_fftn_equivalence(self, device, dtype):
norm_modes = (None, "forward", "backward", "ortho")
# input_ndim, s, dim
transform_desc = [
*product(range(2, 5), (None, (4, 10)), (None, (1, 0))),
(3, None, (0, 2)),
]
fft_functions = ['fft', 'ifft', 'irfft', 'hfft']
# Real-only functions
if dtype.is_floating_point:
fft_functions += ['rfft', 'ihfft']
for input_ndim, s, dim in transform_desc:
shape = itertools.islice(itertools.cycle(range(4, 9)), input_ndim)
x = torch.randn(*shape, device=device, dtype=dtype)
for func, norm in product(fft_functions, norm_modes):
f2d = getattr(torch.fft, func + '2')
fnd = getattr(torch.fft, func + 'n')
kwargs = {'s': s, 'norm': norm}
if dim is not None:
kwargs['dim'] = dim
expect = fnd(x, **kwargs)
else:
expect = fnd(x, dim=(-2, -1), **kwargs)
actual = f2d(x, **kwargs)
self.assertEqual(actual, expect)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
def test_fft2_invalid(self, device):
a = torch.rand(10, 10, 10, device=device)
fft_funcs = (torch.fft.fft2, torch.fft.ifft2,
torch.fft.rfft2, torch.fft.irfft2)
for func in fft_funcs:
with self.assertRaisesRegex(RuntimeError, "dims must be unique"):
func(a, dim=(0, 0))
with self.assertRaisesRegex(RuntimeError, "dims must be unique"):
func(a, dim=(2, -1))
with self.assertRaisesRegex(RuntimeError, "dim and shape .* same length"):
func(a, s=(1,))
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
func(a, dim=(2, 3))
c = torch.complex(a, a)
with self.assertRaisesRegex(RuntimeError, "rfftn expects a real-valued input"):
torch.fft.rfft2(c)
# Helper functions
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_NUMPY, 'NumPy not found')
@dtypes(torch.float, torch.double)
def test_fftfreq_numpy(self, device, dtype):
test_args = [
*product(
# n
range(1, 20),
# d
(None, 10.0),
)
]
functions = ['fftfreq', 'rfftfreq']
for fname in functions:
torch_fn = getattr(torch.fft, fname)
numpy_fn = getattr(np.fft, fname)
for n, d in test_args:
args = (n,) if d is None else (n, d)
expected = numpy_fn(*args)
actual = torch_fn(*args, device=device, dtype=dtype)
self.assertEqual(actual, expected, exact_dtype=False)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_fftfreq_out(self, device, dtype):
for func in (torch.fft.fftfreq, torch.fft.rfftfreq):
expect = func(n=100, d=.5, device=device, dtype=dtype)
actual = torch.empty((), device=device, dtype=dtype)
with self.assertWarnsRegex(UserWarning, "out tensor will be resized"):
func(n=100, d=.5, out=actual)
self.assertEqual(actual, expect)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_NUMPY, 'NumPy not found')
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_fftshift_numpy(self, device, dtype):
test_args = [
# shape, dim
*product(((11,), (12,)), (None, 0, -1)),
*product(((4, 5), (6, 6)), (None, 0, (-1,))),
*product(((1, 1, 4, 6, 7, 2),), (None, (3, 4))),
]
functions = ['fftshift', 'ifftshift']
for shape, dim in test_args:
input = torch.rand(*shape, device=device, dtype=dtype)
input_np = input.cpu().numpy()
for fname in functions:
torch_fn = getattr(torch.fft, fname)
numpy_fn = getattr(np.fft, fname)
expected = numpy_fn(input_np, axes=dim)
actual = torch_fn(input, dim=dim)
self.assertEqual(actual, expected)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_NUMPY, 'NumPy not found')
@dtypes(torch.float, torch.double)
def test_fftshift_frequencies(self, device, dtype):
for n in range(10, 15):
sorted_fft_freqs = torch.arange(-(n // 2), n - (n // 2),
device=device, dtype=dtype)
x = torch.fft.fftfreq(n, d=1 / n, device=device, dtype=dtype)
# Test fftshift sorts the fftfreq output
shifted = torch.fft.fftshift(x)
self.assertEqual(shifted, shifted.sort().values)
self.assertEqual(sorted_fft_freqs, shifted)
# And ifftshift is the inverse
self.assertEqual(x, torch.fft.ifftshift(shifted))
# Legacy fft tests
def _test_fft_ifft_rfft_irfft(self, device, dtype):
complex_dtype = {
torch.float16: torch.complex32,
torch.float32: torch.complex64,
torch.float64: torch.complex128
}[dtype]
def _test_complex(sizes, signal_ndim, prepro_fn=lambda x: x):
x = prepro_fn(torch.randn(*sizes, dtype=complex_dtype, device=device))
dim = tuple(range(-signal_ndim, 0))
for norm in ('ortho', None):
res = torch.fft.fftn(x, dim=dim, norm=norm)
rec = torch.fft.ifftn(res, dim=dim, norm=norm)
self.assertEqual(x, rec, atol=1e-8, rtol=0, msg='fft and ifft')
res = torch.fft.ifftn(x, dim=dim, norm=norm)
rec = torch.fft.fftn(res, dim=dim, norm=norm)
self.assertEqual(x, rec, atol=1e-8, rtol=0, msg='ifft and fft')
def _test_real(sizes, signal_ndim, prepro_fn=lambda x: x):
x = prepro_fn(torch.randn(*sizes, dtype=dtype, device=device))
signal_numel = 1
signal_sizes = x.size()[-signal_ndim:]
dim = tuple(range(-signal_ndim, 0))
for norm in (None, 'ortho'):
res = torch.fft.rfftn(x, dim=dim, norm=norm)
rec = torch.fft.irfftn(res, s=signal_sizes, dim=dim, norm=norm)
self.assertEqual(x, rec, atol=1e-8, rtol=0, msg='rfft and irfft')
res = torch.fft.fftn(x, dim=dim, norm=norm)
rec = torch.fft.ifftn(res, dim=dim, norm=norm)
x_complex = torch.complex(x, torch.zeros_like(x))
self.assertEqual(x_complex, rec, atol=1e-8, rtol=0, msg='fft and ifft (from real)')
# contiguous case
_test_real((100,), 1)
_test_real((10, 1, 10, 100), 1)
_test_real((100, 100), 2)
_test_real((2, 2, 5, 80, 60), 2)
_test_real((50, 40, 70), 3)
_test_real((30, 1, 50, 25, 20), 3)
_test_complex((100,), 1)
_test_complex((100, 100), 1)
_test_complex((100, 100), 2)
_test_complex((1, 20, 80, 60), 2)
_test_complex((50, 40, 70), 3)
_test_complex((6, 5, 50, 25, 20), 3)
# non-contiguous case
_test_real((165,), 1, lambda x: x.narrow(0, 25, 100)) # input is not aligned to complex type
_test_real((100, 100, 3), 1, lambda x: x[:, :, 0])
_test_real((100, 100), 2, lambda x: x.t())
_test_real((20, 100, 10, 10), 2, lambda x: x.view(20, 100, 100)[:, :60])
_test_real((65, 80, 115), 3, lambda x: x[10:60, 13:53, 10:80])
_test_real((30, 20, 50, 25), 3, lambda x: x.transpose(1, 2).transpose(2, 3))
_test_complex((100,), 1, lambda x: x.expand(100, 100))
_test_complex((20, 90, 110), 2, lambda x: x[:, 5:85].narrow(2, 5, 100))
_test_complex((40, 60, 3, 80), 3, lambda x: x.transpose(2, 0).select(0, 2)[5:55, :, 10:])
_test_complex((30, 55, 50, 22), 3, lambda x: x[:, 3:53, 15:40, 1:21])
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.double)
def test_fft_ifft_rfft_irfft(self, device, dtype):
self._test_fft_ifft_rfft_irfft(device, dtype)
@deviceCountAtLeast(1)
@onlyCUDA
@dtypes(torch.double)
def test_cufft_plan_cache(self, devices, dtype):
@contextmanager
def plan_cache_max_size(device, n):
if device is None:
plan_cache = torch.backends.cuda.cufft_plan_cache
else:
plan_cache = torch.backends.cuda.cufft_plan_cache[device]
original = plan_cache.max_size
plan_cache.max_size = n
yield
plan_cache.max_size = original
with plan_cache_max_size(devices[0], max(1, torch.backends.cuda.cufft_plan_cache.size - 10)):
self._test_fft_ifft_rfft_irfft(devices[0], dtype)
with plan_cache_max_size(devices[0], 0):
self._test_fft_ifft_rfft_irfft(devices[0], dtype)
torch.backends.cuda.cufft_plan_cache.clear()
# check that stll works after clearing cache
with plan_cache_max_size(devices[0], 10):
self._test_fft_ifft_rfft_irfft(devices[0], dtype)
with self.assertRaisesRegex(RuntimeError, r"must be non-negative"):
torch.backends.cuda.cufft_plan_cache.max_size = -1
with self.assertRaisesRegex(RuntimeError, r"read-only property"):
torch.backends.cuda.cufft_plan_cache.size = -1
with self.assertRaisesRegex(RuntimeError, r"but got device with index"):
torch.backends.cuda.cufft_plan_cache[torch.cuda.device_count() + 10]
# Multigpu tests
if len(devices) > 1:
# Test that different GPU has different cache
x0 = torch.randn(2, 3, 3, device=devices[0])
x1 = x0.to(devices[1])
self.assertEqual(torch.fft.rfftn(x0, dim=(-2, -1)), torch.fft.rfftn(x1, dim=(-2, -1)))
# If a plan is used across different devices, the following line (or
# the assert above) would trigger illegal memory access. Other ways
# to trigger the error include
# (1) setting CUDA_LAUNCH_BLOCKING=1 (pytorch/pytorch#19224) and
# (2) printing a device 1 tensor.
x0.copy_(x1)
# Test that un-indexed `torch.backends.cuda.cufft_plan_cache` uses current device
with plan_cache_max_size(devices[0], 10):
with plan_cache_max_size(devices[1], 11):
self.assertEqual(torch.backends.cuda.cufft_plan_cache[0].max_size, 10)
self.assertEqual(torch.backends.cuda.cufft_plan_cache[1].max_size, 11)
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 10) # default is cuda:0
with torch.cuda.device(devices[1]):
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 11) # default is cuda:1
with torch.cuda.device(devices[0]):
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 10) # default is cuda:0
self.assertEqual(torch.backends.cuda.cufft_plan_cache[0].max_size, 10)
with torch.cuda.device(devices[1]):
with plan_cache_max_size(None, 11): # default is cuda:1
self.assertEqual(torch.backends.cuda.cufft_plan_cache[0].max_size, 10)
self.assertEqual(torch.backends.cuda.cufft_plan_cache[1].max_size, 11)
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 11) # default is cuda:1
with torch.cuda.device(devices[0]):
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 10) # default is cuda:0
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 11) # default is cuda:1
# passes on ROCm w/ python 2.7, fails w/ python 3.6
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.double)
def test_stft(self, device, dtype):
if not TEST_LIBROSA:
raise unittest.SkipTest('librosa not found')
def librosa_stft(x, n_fft, hop_length, win_length, window, center):
if window is None:
window = np.ones(n_fft if win_length is None else win_length)
else:
window = window.cpu().numpy()
input_1d = x.dim() == 1
if input_1d:
x = x.view(1, -1)
# NOTE: librosa 0.9 changed default pad_mode to 'constant' (zero padding)
# however, we use the pre-0.9 default ('reflect')
pad_mode = 'reflect'
result = []
for xi in x:
ri = librosa.stft(xi.cpu().numpy(), n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
result.append(torch.from_numpy(np.stack([ri.real, ri.imag], -1)))
result = torch.stack(result, 0)
if input_1d:
result = result[0]
return result
def _test(sizes, n_fft, hop_length=None, win_length=None, win_sizes=None,
center=True, expected_error=None):
x = torch.randn(*sizes, dtype=dtype, device=device)
if win_sizes is not None:
window = torch.randn(*win_sizes, dtype=dtype, device=device)
else:
window = None
if expected_error is None:
result = x.stft(n_fft, hop_length, win_length, window,
center=center, return_complex=False)
# NB: librosa defaults to np.complex64 output, no matter what
# the input dtype
ref_result = librosa_stft(x, n_fft, hop_length, win_length, window, center)
self.assertEqual(result, ref_result, atol=7e-6, rtol=0, msg='stft comparison against librosa', exact_dtype=False)
# With return_complex=True, the result is the same but viewed as complex instead of real
result_complex = x.stft(n_fft, hop_length, win_length, window, center=center, return_complex=True)
self.assertEqual(result_complex, torch.view_as_complex(result))
else:
self.assertRaises(expected_error,
lambda: x.stft(n_fft, hop_length, win_length, window, center=center))
for center in [True, False]:
_test((10,), 7, center=center)
_test((10, 4000), 1024, center=center)
_test((10,), 7, 2, center=center)
_test((10, 4000), 1024, 512, center=center)
_test((10,), 7, 2, win_sizes=(7,), center=center)
_test((10, 4000), 1024, 512, win_sizes=(1024,), center=center)
# spectral oversample
_test((10,), 7, 2, win_length=5, center=center)
_test((10, 4000), 1024, 512, win_length=100, center=center)
_test((10, 4, 2), 1, 1, expected_error=RuntimeError)
_test((10,), 11, 1, center=False, expected_error=RuntimeError)
_test((10,), -1, 1, expected_error=RuntimeError)
_test((10,), 3, win_length=5, expected_error=RuntimeError)
_test((10,), 5, 4, win_sizes=(11,), expected_error=RuntimeError)
_test((10,), 5, 4, win_sizes=(1, 1), expected_error=RuntimeError)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.double)
def test_istft_against_librosa(self, device, dtype):
if not TEST_LIBROSA:
raise unittest.SkipTest('librosa not found')
def librosa_istft(x, n_fft, hop_length, win_length, window, length, center):
if window is None:
window = np.ones(n_fft if win_length is None else win_length)
else:
window = window.cpu().numpy()
return librosa.istft(x.cpu().numpy(), n_fft=n_fft, hop_length=hop_length,
win_length=win_length, length=length, window=window, center=center)
def _test(size, n_fft, hop_length=None, win_length=None, win_sizes=None,
length=None, center=True):
x = torch.randn(size, dtype=dtype, device=device)
if win_sizes is not None:
window = torch.randn(*win_sizes, dtype=dtype, device=device)
else:
window = None
x_stft = x.stft(n_fft, hop_length, win_length, window, center=center,
onesided=True, return_complex=True)
ref_result = librosa_istft(x_stft, n_fft, hop_length, win_length,
window, length, center)
result = x_stft.istft(n_fft, hop_length, win_length, window,
length=length, center=center)
self.assertEqual(result, ref_result)
for center in [True, False]:
_test(10, 7, center=center)
_test(4000, 1024, center=center)
_test(4000, 1024, center=center, length=4000)
_test(10, 7, 2, center=center)
_test(4000, 1024, 512, center=center)
_test(4000, 1024, 512, center=center, length=4000)
_test(10, 7, 2, win_sizes=(7,), center=center)
_test(4000, 1024, 512, win_sizes=(1024,), center=center)
_test(4000, 1024, 512, win_sizes=(1024,), center=center, length=4000)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double, torch.cdouble)
def test_complex_stft_roundtrip(self, device, dtype):
test_args = list(product(
# input
(torch.randn(600, device=device, dtype=dtype),
torch.randn(807, device=device, dtype=dtype),
torch.randn(12, 60, device=device, dtype=dtype)),
# n_fft
(50, 27),
# hop_length
(None, 10),
# center
(True,),
# pad_mode
("constant", "reflect", "circular"),
# normalized
(True, False),
# onesided
(True, False) if not dtype.is_complex else (False,),
))
for args in test_args:
x, n_fft, hop_length, center, pad_mode, normalized, onesided = args
common_kwargs = {
'n_fft': n_fft, 'hop_length': hop_length, 'center': center,
'normalized': normalized, 'onesided': onesided,
}
# Functional interface
x_stft = torch.stft(x, pad_mode=pad_mode, return_complex=True, **common_kwargs)
x_roundtrip = torch.istft(x_stft, return_complex=dtype.is_complex,
length=x.size(-1), **common_kwargs)
self.assertEqual(x_roundtrip, x)
# Tensor method interface
x_stft = x.stft(pad_mode=pad_mode, return_complex=True, **common_kwargs)
x_roundtrip = torch.istft(x_stft, return_complex=dtype.is_complex,
length=x.size(-1), **common_kwargs)
self.assertEqual(x_roundtrip, x)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double, torch.cdouble)
def test_stft_roundtrip_complex_window(self, device, dtype):
test_args = list(product(
# input
(torch.randn(600, device=device, dtype=dtype),
torch.randn(807, device=device, dtype=dtype),
torch.randn(12, 60, device=device, dtype=dtype)),
# n_fft
(50, 27),
# hop_length
(None, 10),
# pad_mode
("constant", "reflect", "replicate", "circular"),
# normalized
(True, False),
))
for args in test_args:
x, n_fft, hop_length, pad_mode, normalized = args
window = torch.rand(n_fft, device=device, dtype=torch.cdouble)
x_stft = torch.stft(
x, n_fft=n_fft, hop_length=hop_length, window=window,
center=True, pad_mode=pad_mode, normalized=normalized)
self.assertEqual(x_stft.dtype, torch.cdouble)
self.assertEqual(x_stft.size(-2), n_fft) # Not onesided
x_roundtrip = torch.istft(
x_stft, n_fft=n_fft, hop_length=hop_length, window=window,
center=True, normalized=normalized, length=x.size(-1),
return_complex=True)
self.assertEqual(x_stft.dtype, torch.cdouble)
if not dtype.is_complex:
self.assertEqual(x_roundtrip.imag, torch.zeros_like(x_roundtrip.imag),
atol=1e-6, rtol=0)
self.assertEqual(x_roundtrip.real, x)
else:
self.assertEqual(x_roundtrip, x)
@skipCPUIfNoFFT
@dtypes(torch.cdouble)
def test_complex_stft_definition(self, device, dtype):
test_args = list(product(
# input
(torch.randn(600, device=device, dtype=dtype),
torch.randn(807, device=device, dtype=dtype)),
# n_fft
(50, 27),
# hop_length
(10, 15)
))
for args in test_args:
window = torch.randn(args[1], device=device, dtype=dtype)
expected = _stft_reference(args[0], args[2], window)
actual = torch.stft(*args, window=window, center=False)
self.assertEqual(actual, expected)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.cdouble)
def test_complex_stft_real_equiv(self, device, dtype):
test_args = list(product(
# input
(torch.rand(600, device=device, dtype=dtype),
torch.rand(807, device=device, dtype=dtype),
torch.rand(14, 50, device=device, dtype=dtype),
torch.rand(6, 51, device=device, dtype=dtype)),
# n_fft
(50, 27),
# hop_length
(None, 10),
# win_length
(None, 20),
# center
(False, True),
# pad_mode
("constant", "reflect", "circular"),
# normalized
(True, False),
))
for args in test_args:
x, n_fft, hop_length, win_length, center, pad_mode, normalized = args
expected = _complex_stft(x, n_fft, hop_length=hop_length,
win_length=win_length, pad_mode=pad_mode,
center=center, normalized=normalized)
actual = torch.stft(x, n_fft, hop_length=hop_length,
win_length=win_length, pad_mode=pad_mode,
center=center, normalized=normalized)
self.assertEqual(expected, actual)
@skipCPUIfNoFFT
@dtypes(torch.cdouble)
def test_complex_istft_real_equiv(self, device, dtype):
test_args = list(product(
# input
(torch.rand(40, 20, device=device, dtype=dtype),
torch.rand(25, 1, device=device, dtype=dtype),
torch.rand(4, 20, 10, device=device, dtype=dtype)),
# hop_length
(None, 10),
# center
(False, True),
# normalized
(True, False),
))
for args in test_args:
x, hop_length, center, normalized = args
n_fft = x.size(-2)
expected = _complex_istft(x, n_fft, hop_length=hop_length,
center=center, normalized=normalized)
actual = torch.istft(x, n_fft, hop_length=hop_length,
center=center, normalized=normalized,
return_complex=True)
self.assertEqual(expected, actual)
@skipCPUIfNoFFT
def test_complex_stft_onesided(self, device):
# stft of complex input cannot be onesided
for x_dtype, window_dtype in product((torch.double, torch.cdouble), repeat=2):
x = torch.rand(100, device=device, dtype=x_dtype)
window = torch.rand(10, device=device, dtype=window_dtype)
if x_dtype.is_complex or window_dtype.is_complex:
with self.assertRaisesRegex(RuntimeError, 'complex'):
x.stft(10, window=window, pad_mode='constant', onesided=True)
else:
y = x.stft(10, window=window, pad_mode='constant', onesided=True,
return_complex=True)
self.assertEqual(y.dtype, torch.cdouble)
self.assertEqual(y.size(), (6, 51))
x = torch.rand(100, device=device, dtype=torch.cdouble)
with self.assertRaisesRegex(RuntimeError, 'complex'):
x.stft(10, pad_mode='constant', onesided=True)
# stft is currently warning that it requires return-complex while an upgrader is written
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
def test_stft_requires_complex(self, device):
x = torch.rand(100)
y = x.stft(10, pad_mode='constant')
# with self.assertRaisesRegex(RuntimeError, 'stft requires the return_complex parameter'):
# y = x.stft(10, pad_mode='constant')
@skipCPUIfNoFFT
def test_fft_input_modification(self, device):
# FFT functions should not modify their input (gh-34551)
signal = torch.ones((2, 2, 2), device=device)
signal_copy = signal.clone()
spectrum = torch.fft.fftn(signal, dim=(-2, -1))
self.assertEqual(signal, signal_copy)
spectrum_copy = spectrum.clone()
_ = torch.fft.ifftn(spectrum, dim=(-2, -1))
self.assertEqual(spectrum, spectrum_copy)
half_spectrum = torch.fft.rfftn(signal, dim=(-2, -1))
self.assertEqual(signal, signal_copy)
half_spectrum_copy = half_spectrum.clone()
_ = torch.fft.irfftn(half_spectrum_copy, s=(2, 2), dim=(-2, -1))
self.assertEqual(half_spectrum, half_spectrum_copy)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
def test_fft_plan_repeatable(self, device):
# Regression test for gh-58724 and gh-63152
for n in [2048, 3199, 5999]:
a = torch.randn(n, device=device, dtype=torch.complex64)
res1 = torch.fft.fftn(a)
res2 = torch.fft.fftn(a.clone())
self.assertEqual(res1, res2)
a = torch.randn(n, device=device, dtype=torch.float64)
res1 = torch.fft.rfft(a)
res2 = torch.fft.rfft(a.clone())
self.assertEqual(res1, res2)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_round_trip_simple_cases(self, device, dtype):
"""stft -> istft should recover the original signale"""
def _test(input, n_fft, length):
stft = torch.stft(input, n_fft=n_fft, return_complex=True)
inverse = torch.istft(stft, n_fft=n_fft, length=length)
self.assertEqual(input, inverse, exact_dtype=True)
_test(torch.ones(4, dtype=dtype, device=device), 4, 4)
_test(torch.zeros(4, dtype=dtype, device=device), 4, 4)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_round_trip_various_params(self, device, dtype):
"""stft -> istft should recover the original signale"""
def _test_istft_is_inverse_of_stft(stft_kwargs):
# generates a random sound signal for each tril and then does the stft/istft
# operation to check whether we can reconstruct signal
data_sizes = [(2, 20), (3, 15), (4, 10)]
num_trials = 100
istft_kwargs = stft_kwargs.copy()
del istft_kwargs['pad_mode']
for sizes in data_sizes:
for i in range(num_trials):
original = torch.randn(*sizes, dtype=dtype, device=device)
stft = torch.stft(original, return_complex=True, **stft_kwargs)
inversed = torch.istft(stft, length=original.size(1), **istft_kwargs)
self.assertEqual(
inversed, original, msg='istft comparison against original',
atol=7e-6, rtol=0, exact_dtype=True)
patterns = [
# hann_window, centered, normalized, onesided
{
'n_fft': 12,
'hop_length': 4,
'win_length': 12,
'window': torch.hann_window(12, dtype=dtype, device=device),
'center': True,
'pad_mode': 'reflect',
'normalized': True,
'onesided': True,
},
# hann_window, centered, not normalized, not onesided
{
'n_fft': 12,
'hop_length': 2,
'win_length': 8,
'window': torch.hann_window(8, dtype=dtype, device=device),
'center': True,
'pad_mode': 'reflect',
'normalized': False,
'onesided': False,
},
# hamming_window, centered, normalized, not onesided
{
'n_fft': 15,
'hop_length': 3,
'win_length': 11,
'window': torch.hamming_window(11, dtype=dtype, device=device),
'center': True,
'pad_mode': 'constant',
'normalized': True,
'onesided': False,
},
# hamming_window, centered, not normalized, onesided
# window same size as n_fft
{
'n_fft': 5,
'hop_length': 2,
'win_length': 5,
'window': torch.hamming_window(5, dtype=dtype, device=device),
'center': True,
'pad_mode': 'constant',
'normalized': False,
'onesided': True,
},
]
for i, pattern in enumerate(patterns):
_test_istft_is_inverse_of_stft(pattern)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_round_trip_with_padding(self, device, dtype):
"""long hop_length or not centered may cause length mismatch in the inversed signal"""
def _test_istft_is_inverse_of_stft_with_padding(stft_kwargs):
# generates a random sound signal for each tril and then does the stft/istft
# operation to check whether we can reconstruct signal
num_trials = 100
sizes = stft_kwargs['size']
del stft_kwargs['size']
istft_kwargs = stft_kwargs.copy()
del istft_kwargs['pad_mode']
for i in range(num_trials):
original = torch.randn(*sizes, dtype=dtype, device=device)
stft = torch.stft(original, return_complex=True, **stft_kwargs)
with self.assertWarnsOnceRegex(UserWarning, "The length of signal is shorter than the length parameter."):
inversed = torch.istft(stft, length=original.size(-1), **istft_kwargs)
n_frames = stft.size(-1)
if stft_kwargs["center"] is True:
len_expected = stft_kwargs["n_fft"] // 2 + stft_kwargs["hop_length"] * (n_frames - 1)
else:
len_expected = stft_kwargs["n_fft"] + stft_kwargs["hop_length"] * (n_frames - 1)
# trim the original for case when constructed signal is shorter than original
padding = inversed[..., len_expected:]
inversed = inversed[..., :len_expected]
original = original[..., :len_expected]
# test the padding points of the inversed signal are all zeros
zeros = torch.zeros_like(padding, device=padding.device)
self.assertEqual(
padding, zeros, msg='istft padding values against zeros',
atol=7e-6, rtol=0, exact_dtype=True)
self.assertEqual(
inversed, original, msg='istft comparison against original',
atol=7e-6, rtol=0, exact_dtype=True)
patterns = [
# hamming_window, not centered, not normalized, not onesided
# window same size as n_fft
{
'size': [2, 20],
'n_fft': 3,
'hop_length': 2,
'win_length': 3,
'window': torch.hamming_window(3, dtype=dtype, device=device),
'center': False,
'pad_mode': 'reflect',
'normalized': False,
'onesided': False,
},
# hamming_window, centered, not normalized, onesided, long hop_length
# window same size as n_fft
{
'size': [2, 500],
'n_fft': 256,
'hop_length': 254,
'win_length': 256,
'window': torch.hamming_window(256, dtype=dtype, device=device),
'center': True,
'pad_mode': 'constant',
'normalized': False,
'onesided': True,
},
]
for i, pattern in enumerate(patterns):
_test_istft_is_inverse_of_stft_with_padding(pattern)
@onlyNativeDeviceTypes
def test_istft_throws(self, device):
"""istft should throw exception for invalid parameters"""
stft = torch.zeros((3, 5, 2), device=device)
# the window is size 1 but it hops 20 so there is a gap which throw an error
self.assertRaises(
RuntimeError, torch.istft, stft, n_fft=4,
hop_length=20, win_length=1, window=torch.ones(1))
# A window of zeros does not meet NOLA
invalid_window = torch.zeros(4, device=device)
self.assertRaises(
RuntimeError, torch.istft, stft, n_fft=4, win_length=4, window=invalid_window)
# Input cannot be empty
self.assertRaises(RuntimeError, torch.istft, torch.zeros((3, 0, 2)), 2)
self.assertRaises(RuntimeError, torch.istft, torch.zeros((0, 3, 2)), 2)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_of_sine(self, device, dtype):
def _test(amplitude, L, n):
# stft of amplitude*sin(2*pi/L*n*x) with the hop length and window size equaling L
x = torch.arange(2 * L + 1, device=device, dtype=dtype)
original = amplitude * torch.sin(2 * math.pi / L * x * n)
# stft = torch.stft(original, L, hop_length=L, win_length=L,
# window=torch.ones(L), center=False, normalized=False)
stft = torch.zeros((L // 2 + 1, 2, 2), device=device, dtype=dtype)
stft_largest_val = (amplitude * L) / 2.0
if n < stft.size(0):
stft[n, :, 1] = -stft_largest_val
if 0 <= L - n < stft.size(0):
# symmetric about L // 2
stft[L - n, :, 1] = stft_largest_val
inverse = torch.istft(
stft, L, hop_length=L, win_length=L,
window=torch.ones(L, device=device, dtype=dtype), center=False, normalized=False)
# There is a larger error due to the scaling of amplitude
original = original[..., :inverse.size(-1)]
self.assertEqual(inverse, original, atol=1e-3, rtol=0)
_test(amplitude=123, L=5, n=1)
_test(amplitude=150, L=5, n=2)
_test(amplitude=111, L=5, n=3)
_test(amplitude=160, L=7, n=4)
_test(amplitude=145, L=8, n=5)
_test(amplitude=80, L=9, n=6)
_test(amplitude=99, L=10, n=7)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_linearity(self, device, dtype):
num_trials = 100
def _test(data_size, kwargs):
for i in range(num_trials):
tensor1 = torch.randn(data_size, device=device, dtype=dtype)
tensor2 = torch.randn(data_size, device=device, dtype=dtype)
a, b = torch.rand(2, dtype=dtype, device=device)
# Also compare method vs. functional call signature
istft1 = tensor1.istft(**kwargs)
istft2 = tensor2.istft(**kwargs)
istft = a * istft1 + b * istft2
estimate = torch.istft(a * tensor1 + b * tensor2, **kwargs)
self.assertEqual(istft, estimate, atol=1e-5, rtol=0)
patterns = [
# hann_window, centered, normalized, onesided
(
(2, 7, 7, 2),
{
'n_fft': 12,
'window': torch.hann_window(12, device=device, dtype=dtype),
'center': True,
'normalized': True,
'onesided': True,
},
),
# hann_window, centered, not normalized, not onesided
(
(2, 12, 7, 2),
{
'n_fft': 12,
'window': torch.hann_window(12, device=device, dtype=dtype),
'center': True,
'normalized': False,
'onesided': False,
},
),
# hamming_window, centered, normalized, not onesided
(
(2, 12, 7, 2),
{
'n_fft': 12,
'window': torch.hamming_window(12, device=device, dtype=dtype),
'center': True,
'normalized': True,
'onesided': False,
},
),
# hamming_window, not centered, not normalized, onesided
(
(2, 7, 3, 2),
{
'n_fft': 12,
'window': torch.hamming_window(12, device=device, dtype=dtype),
'center': False,
'normalized': False,
'onesided': True,
},
)
]
for data_size, kwargs in patterns:
_test(data_size, kwargs)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
def test_batch_istft(self, device):
original = torch.tensor([
[[4., 0.], [4., 0.], [4., 0.], [4., 0.], [4., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.]]
], device=device)
single = original.repeat(1, 1, 1, 1)
multi = original.repeat(4, 1, 1, 1)
i_original = torch.istft(original, n_fft=4, length=4)
i_single = torch.istft(single, n_fft=4, length=4)
i_multi = torch.istft(multi, n_fft=4, length=4)
self.assertEqual(i_original.repeat(1, 1), i_single, atol=1e-6, rtol=0, exact_dtype=True)
self.assertEqual(i_original.repeat(4, 1), i_multi, atol=1e-6, rtol=0, exact_dtype=True)
@onlyCUDA
@skipIf(not TEST_MKL, "Test requires MKL")
def test_stft_window_device(self, device):
# Test the (i)stft window must be on the same device as the input
x = torch.randn(1000, dtype=torch.complex64)
window = torch.randn(100, dtype=torch.complex64)
with self.assertRaisesRegex(RuntimeError, "stft input and window must be on the same device"):
torch.stft(x, n_fft=100, window=window.to(device))
with self.assertRaisesRegex(RuntimeError, "stft input and window must be on the same device"):
torch.stft(x.to(device), n_fft=100, window=window)
X = torch.stft(x, n_fft=100, window=window)
with self.assertRaisesRegex(RuntimeError, "istft input and window must be on the same device"):
torch.istft(X, n_fft=100, window=window.to(device))
with self.assertRaisesRegex(RuntimeError, "istft input and window must be on the same device"):
torch.istft(x.to(device), n_fft=100, window=window)
class FFTDocTestFinder:
'''The default doctest finder doesn't like that function.__module__ doesn't
match torch.fft. It assumes the functions are leaked imports.
'''
def __init__(self):
self.parser = doctest.DocTestParser()
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
doctests = []
modname = name if name is not None else obj.__name__
globs = dict() if globs is None else globs
for fname in obj.__all__:
func = getattr(obj, fname)
if inspect.isroutine(func):
qualname = modname + '.' + fname
docstring = inspect.getdoc(func)
if docstring is None:
continue
examples = self.parser.get_doctest(
docstring, globs=globs, name=fname, filename=None, lineno=None)
doctests.append(examples)
return doctests
class TestFFTDocExamples(TestCase):
pass
def generate_doc_test(doc_test):
def test(self, device):
self.assertEqual(device, 'cpu')
runner = doctest.DocTestRunner()
runner.run(doc_test)
if runner.failures != 0:
runner.summarize()
self.fail('Doctest failed')
setattr(TestFFTDocExamples, 'test_' + doc_test.name, skipCPUIfNoFFT(test))
for doc_test in FFTDocTestFinder().find(torch.fft, globs=dict(torch=torch)):
generate_doc_test(doc_test)
instantiate_device_type_tests(TestFFT, globals())
instantiate_device_type_tests(TestFFTDocExamples, globals(), only_for='cpu')
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_spectral_ops.py
|
# Owner(s): ["module: autograd"]
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest, IS_WINDOWS
import subprocess
import tempfile
import os
import unittest
PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))
# This is a very simple smoke test for the functional autograd benchmarking script.
class TestFunctionalAutogradBenchmark(TestCase):
def _test_runner(self, model, disable_gpu=False):
# Note about windows:
# The temporary file is exclusively open by this process and the child process
# is not allowed to open it again. As this is a simple smoke test, we choose for now
# not to run this on windows and keep the code here simple.
with tempfile.NamedTemporaryFile() as out_file:
cmd = ['python3',
'../benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py']
# Only run the warmup
cmd += ['--num-iters', '0']
# Only run the vjp task (fastest one)
cmd += ['--task-filter', 'vjp']
# Only run the specified model
cmd += ['--model-filter', model]
# Output file
cmd += ['--output', out_file.name]
if disable_gpu:
cmd += ['--gpu', '-1']
res = subprocess.run(cmd)
self.assertTrue(res.returncode == 0)
# Check that something was written to the file
out_file.seek(0, os.SEEK_END)
self.assertTrue(out_file.tell() > 0)
@unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows does not have all the features we need.")
@unittest.skipIf(PYTORCH_COLLECT_COVERAGE, "Can deadlocks with gcov, see https://github.com/pytorch/pytorch/issues/49656")
def test_fast_tasks(self):
fast_tasks = ['resnet18', 'ppl_simple_reg', 'ppl_robust_reg', 'wav2letter',
'transformer', 'multiheadattn']
for task in fast_tasks:
self._test_runner(task)
@slowTest
@unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows does not have all the features we need.")
def test_slow_tasks(self):
slow_tasks = ['fcn_resnet', 'detr']
# deepspeech is voluntarily excluded as it takes too long to run without
# proper tuning of the number of threads it should use.
for task in slow_tasks:
# Disable GPU for slow test as the CI GPU don't have enough memory
self._test_runner(task, disable_gpu=True)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_functional_autograd_benchmark.py
|
# Owner(s): ["module: mkldnn"]
import itertools
import unittest
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
from test_tensorexpr import warmup_and_run_forward
FUSION_GROUP = 'prim::TensorExprGroup'
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnnFusion(JitTestCase):
def assertFused(self, graph, fused_patterns):
for pat in fused_patterns:
self.assertGraphContainsExactly(graph, pat, 0)
def _check_model(self, m, x):
old_fusion_inlining = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
torch._C._jit_override_can_fuse_on_cpu(True)
old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu()
torch._C._jit_set_te_must_use_llvm_cpu(False)
m.eval()
with torch.no_grad():
script = torch.jit.script(m)
script = torch.jit.freeze(script)
with torch.no_grad():
y = warmup_and_run_forward(script, x)
y = script(x)
y_ref = m(x)
graph = script.graph_for(*x)
self.assertEqual(y, y_ref)
torch._C._debug_set_fusion_group_inlining(old_fusion_inlining)
torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuser_state)
torch._C._jit_set_te_must_use_llvm_cpu(old_te_must_use_llvm_cpu)
return graph
def test_single_conv(self):
class M(nn.Module):
def __init__(self, in_channels, out_channels, bias, **kwargs):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=bias, **kwargs)
def forward(self, x):
res = self.conv(x)
return res
for memory_format, enabled in [
[torch.contiguous_format, False],
[torch.channels_last, True],
]:
input_size = 224
batch_size = 1
kernel_size = 3
options = itertools.product([True, False], [1, 2], [1, 4])
for bias, dilation, groups in options:
iC = 3 * groups
oC = 10 * groups
m = M(iC,
oC,
bias,
kernel_size=(kernel_size, kernel_size),
stride=2,
padding=1,
dilation=dilation,
groups=groups).to(memory_format=memory_format)
x = torch.randn(batch_size, iC, input_size, input_size).to(memory_format=memory_format)
graph = self._check_model(m, x)
if enabled:
self.assertFused(graph, ['aten::conv2d'])
self.assertGraphContainsExactly(graph, FUSION_GROUP, 1)
else:
self.assertGraphContains(graph, kind='aten::conv2d')
def test_conv_eltwise(self):
class M(nn.Module):
def __init__(self, eltwise_fn, in_channels, out_channels, bias, **kwargs):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=bias, **kwargs)
self.eltwise = eltwise_fn
def forward(self, x):
x = self.conv(x)
x = self.eltwise(x)
return x
for memory_format, enabled in [
[torch.contiguous_format, False],
[torch.channels_last, True],
]:
for eltwise_fn in [torch.relu]:
for bias in [True, False]:
for oC in [1, 10]:
m = M(eltwise_fn, 3, oC, bias, kernel_size=(3, 3)).to(memory_format=memory_format)
x = torch.randn(1, 3, 224, 224).to(memory_format=memory_format)
graph = self._check_model(m, x)
if enabled:
self.assertFused(graph, ['aten::conv2d', 'aten::' + eltwise_fn.__name__])
self.assertGraphContainsExactly(graph, FUSION_GROUP, 1)
else:
self.assertGraphContains(graph, kind='aten::conv2d')
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/test_mkldnn_fusion.py
|
# Owner(s): ["module: nestedtensor"]
import torch
import torch.nn
import unittest
from torch.testing._internal.common_device_type import (
dtypes,
dtypesIfCUDA,
instantiate_device_type_tests,
skipMeta,
onlyCPU
)
from torch.testing._internal.common_utils import TestCase, IS_FBCODE, run_tests, freeze_rng_state, parametrize, gradcheck
from torch import nested_tensor
# Tests are ported from pytorch/nestedtensor.
# This makes porting as_nested_tensor easier in the future.
def _iter_constructors():
# yield as_nested_tensor
yield nested_tensor
# Helper function to generate a pair of random nested tensors
# one is contiguous, the other is not, but they appear to have same entries
# an output nested tensor consists of
# * `len(ragged_sizes)` matrices
# * matrices[i].shape == (20, ragged_sizes[i])
def random_nt_noncontiguous_pair(ragged_sizes, device="cpu", dtype=torch.float16):
xs = []
for size in ragged_sizes:
xs.append(torch.randn((size, 20), device=device, dtype=dtype))
# contiguous nested tensor
ys = []
for x in xs:
ys.append(x.transpose(-1, -2))
nt_contiguous = torch.nested_tensor(ys)
# noncontiguous nested tensor
n = len(ragged_sizes)
nt_noncontiguous = torch.nested_tensor(xs).transpose(-1, -2)
return nt_contiguous, nt_noncontiguous
# Helper functions to pad a noncontiguous nested tensor
# can be replaced once to_padded_tensor supports noncontiguous memory
def noncontiguous_to_padded_tensor(input, shape=None):
tensors = input.unbind()
ntensors = len(tensors)
assert ntensors > 0
if shape is None:
shape = []
for size in tensors[0].shape:
shape.append(size)
for i in range(1, ntensors):
new_shape = tensors[i].shape
for j in range(len(shape)):
shape[j] = max(shape[j], new_shape[j])
shape = [ntensors] + shape
result = tensors[0].new_zeros(shape)
for itensor in range(ntensors):
tensor = tensors[itensor]
view = result[itensor]
for idim in range(tensor.dim()):
view = view.narrow(idim, 0, tensor.size(idim))
view.copy_(tensor)
return result
class TestNestedTensor(TestCase):
@torch.inference_mode()
def _test_unbind_case(self, a, b):
nt = nested_tensor([a, b])
a1, b1 = nt.unbind()
self.assertTrue(a is not a1)
self.assertTrue(b is not b1)
nt = nested_tensor([a, b], dtype=a.dtype)
a1, b1 = nt.unbind(0)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
a = torch.randn((2, 3)).add_(1)
nt = nested_tensor([a])
self.assertEqual(a, nt.unbind(0)[0])
@torch.inference_mode()
def test_unbind_0(self):
self._test_unbind_case(
torch.tensor([1, 2]), torch.tensor([7, 8]),
)
@torch.inference_mode()
def test_unbind_1(self):
self._test_unbind_case(
torch.tensor([1]), torch.tensor([7]),
)
# @torch.inference_mode()
# def test_unbind_2(self):
# self._test_unbind_case(
# torch.tensor(1), torch.tensor(7),
# )
@torch.inference_mode()
def test_unbind_3(self):
self._test_unbind_case(
torch.tensor([1.0]), torch.tensor([]),
)
@torch.inference_mode()
def test_unbind_4(self):
self._test_unbind_case(
torch.tensor([]), torch.tensor([]),
)
@torch.inference_mode()
def test_unbind_dim(self):
def _test_fn(unbind_fn):
a = torch.rand(3, 2)
b = torch.rand(2, 3)
nt = nested_tensor([a, b])
self.assertRaises(RuntimeError, lambda: unbind_fn(nt, 1))
# Both of these tests are necessary, because we're using
# torch_function.
_test_fn(lambda x, dim: x.unbind(dim))
# TODO: Re-enable this once using torch_dispatch
# _test_fn(lambda x, dim: torch.unbind(x, dim))
@torch.inference_mode()
def test_nested_tensor(self):
self.assertRaises(TypeError, lambda: nested_tensor([3.0]))
self.assertRaises(TypeError, lambda: nested_tensor(torch.tensor([3.0])))
self.assertRaises(TypeError, lambda: nested_tensor(4.0))
@torch.inference_mode()
def test_nested_tensor_matching_dim(self):
self.assertRaisesRegex(
RuntimeError,
"Found dimension 1 for Tensor at index 1 and dimension 0 for Tensor at index 0.",
lambda: nested_tensor([torch.tensor(1.0), torch.tensor([])]),
)
self.assertRaisesRegex(
RuntimeError,
"Found dimension 1 for Tensor at index 2 and dimension 0 for Tensor at index 1.",
lambda: nested_tensor(
[torch.tensor(1.0), torch.tensor(2.0), torch.tensor([])]
),
)
@torch.inference_mode()
def test_default_nested_tensor(self):
self.assertRaises(TypeError, lambda: nested_tensor())
default_nested_tensor = nested_tensor([])
default_tensor = torch.tensor([])
# self.assertEqual(default_nested_tensor.nested_dim(), 1)
# self.assertEqual(default_nested_tensor.nested_size(), ())
self.assertEqual(default_nested_tensor.dim(), default_tensor.dim())
self.assertEqual(default_nested_tensor.layout, default_tensor.layout)
self.assertEqual(default_nested_tensor.device, default_tensor.device)
self.assertEqual(default_nested_tensor.dtype, default_tensor.dtype)
self.assertEqual(
default_nested_tensor.requires_grad, default_tensor.requires_grad
)
self.assertIsNone(default_tensor.grad)
# TODO: Re-enable once we have a performance driven
# use case and implementation.
# self.assertEqual(default_nested_tensor.is_pinned(),
# default_tensor.is_pinned())
@torch.inference_mode()
def test_dim(self):
for constructor in _iter_constructors():
a1 = constructor([])
self.assertEqual(a1.dim(), 1)
a1 = constructor([torch.tensor(3.0)])
self.assertEqual(a1.dim(), 1)
a1 = constructor([torch.tensor([1, 2, 3, 4])])
self.assertEqual(a1.dim(), 2)
@unittest.skipIf(IS_FBCODE, "numel is not virtual in fbcode.")
@torch.inference_mode()
def test_numel(self):
for constructor in _iter_constructors():
a1 = constructor([])
self.assertEqual(a1.numel(), 0)
a1 = constructor([torch.tensor(3.0), torch.tensor(4.0)])
self.assertEqual(a1.numel(), 2)
a1 = constructor([torch.randn(2, 2, 2)])
self.assertEqual(a1.numel(), 8)
a1 = constructor([torch.randn([1, 2, 3]), torch.randn(3, 2, 1)])
self.assertEqual(a1.numel(), 12)
a1 = constructor([torch.randn([1, 1, 3]), torch.randn(3, 2, 4)])
self.assertEqual(a1.numel(), 27)
a1 = constructor([torch.randn([5, 5, 5]), torch.randn(6, 6, 6)])
self.assertEqual(a1.numel(), 341)
# Interesting edge case
a1 = constructor([torch.randn([1, 2, 3]), torch.randn(1, 2, 0)])
self.assertEqual(a1.numel(), 6)
@torch.inference_mode()
def test_size(self):
for constructor in _iter_constructors():
a1 = constructor([])
self.assertRaisesRegex(
RuntimeError,
"Tensors of type NestedTensorImpl do not have sym sizes"
if IS_FBCODE
else "NestedTensorImpl doesn't support sizes",
lambda: a1.size(),
)
@unittest.skipIf(IS_FBCODE, "stride is not virtual in fbcode.")
@torch.inference_mode()
def test_stride(self):
for constructor in _iter_constructors():
a1 = constructor([])
self.assertRaisesRegex(
RuntimeError,
"NestedTensorImpl doesn't support strides",
lambda: a1.stride(),
)
@unittest.skipIf(IS_FBCODE, "is_contiguous is not virtual in fbcode.")
@torch.inference_mode()
def test_is_contiguous(self):
# Test empty case
nt_empty = torch.nested_tensor([])
assert nt_empty.is_contiguous()
self.assertEqual(nt_empty, nt_empty.contiguous())
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7))
# Test contiguous case
assert nt_contiguous.is_contiguous()
self.assertEqual(nt_contiguous, nt_contiguous.contiguous())
# Test non_contiguous case
assert not nt_noncontiguous.is_contiguous()
self.assertRaisesRegex(
RuntimeError,
r"clone_nested only supports memory format Preserve, but got Contiguous instead.",
lambda: nt_noncontiguous.contiguous()
)
@torch.inference_mode()
def test_repr_string(self):
a = nested_tensor([])
expected = "nested_tensor([" "\n\n])"
self.assertEqual(str(a), expected)
self.assertEqual(repr(a), expected)
a = nested_tensor([torch.tensor(1.0)])
expected = "nested_tensor([" "\n tensor(1.)" "\n])"
self.assertEqual(str(a), expected)
self.assertEqual(repr(a), expected)
a = nested_tensor([torch.tensor([[1, 2]]), torch.tensor([[4, 5]])])
expected = (
"nested_tensor([" "\n tensor([[1, 2]])" "," "\n tensor([[4, 5]])" "\n])"
)
self.assertEqual(str(a), expected)
self.assertEqual(repr(a), expected)
@torch.inference_mode()
def test_activations(self):
for func in (torch.nn.functional.relu, torch.nn.functional.relu_, torch.nn.functional.gelu, torch._C._nn.gelu_):
t = torch.tensor([-1, 0, 1], dtype=torch.float)
nt = nested_tensor([t])
nested_result = func(nt)
self.assertTrue(nested_result.is_nested)
self.assertEqual(func(t), nested_result.unbind()[0])
def test_to_padded_tensor_on_empty_tensor(self):
nt = torch.nested_tensor([])
empty = nt.to_padded_tensor(4)
self.assertEqual(empty, torch.tensor([]))
class TestNestedTensorDeviceType(TestCase):
# Helper function to generate a random nested tensor
def random_nt(self, device, dtype, num_tensors, max_dims, min_dims=None):
if min_dims is None:
min_dims = tuple([0] * len(max_dims))
ts1 = []
for _ in range(num_tensors):
tensor_dims = tuple([torch.randint(low=min_dim, high=max_dim, size=(1,)).item()
for (min_dim, max_dim) in zip(min_dims, max_dims)])
t1 = torch.randn(tensor_dims, device=device, dtype=dtype)
ts1.append(t1)
return torch.nested_tensor(ts1, device=device, dtype=dtype)
# Helper function to generate a pair of random nested tensors
# the 2 nested tensors have same shapes
def random_nt_pair(self, device, dtype, num_tensors, max_dims):
ts1 = []
ts2 = []
for _ in range(num_tensors):
tensor_dims = tuple([torch.randint(low=0, high=max_dim, size=(1,)).item() for max_dim in max_dims])
t1 = torch.randn(tensor_dims, device=device, dtype=dtype)
t2 = torch.randn(tensor_dims, device=device, dtype=dtype)
ts1.append(t1)
ts2.append(t2)
return (torch.nested_tensor(ts1, device=device, dtype=dtype),
torch.nested_tensor(ts2, device=device, dtype=dtype))
@dtypes(torch.float, torch.float16, torch.double)
def test_unbind_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7), device, dtype)
ub_contiguous = nt_contiguous.unbind()
ub_noncontiguous = nt_noncontiguous.unbind()
self.assertEqual(len(ub_contiguous), len(ub_noncontiguous))
n = len(ub_contiguous)
for i in range(n):
self.assertEqual(ub_contiguous[i], ub_noncontiguous[i])
@dtypes(torch.float)
@skipMeta
def test_to_then_from_padded_tensor_no_transform0213(self, device, dtype):
t = torch.randn(4, 4, 4, device=device, dtype=dtype)
ts = list(torch.unbind(t))
ts[0] = ts[0][:-1]
nt = torch.nested_tensor(ts, device=device, dtype=dtype)
padded = nt.to_padded_tensor(0)
nt_to = torch._nested_from_padded_and_nested_example(padded, nt)
for (t1, t2) in zip(nt.unbind(), nt_to.unbind()):
self.assertEqual(t1, t2)
self.assertEqual(nt.device, nt_to.device)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.half)
@skipMeta
@torch.inference_mode()
def test_layer_norm(self, device, dtype):
def _test(size):
t0 = torch.randn(2, size, device=device, dtype=dtype, requires_grad=False)
t1 = torch.randn(2, size, device=device, dtype=dtype, requires_grad=False)
ts = [t0, t1, t0, t1]
nt = torch.nested_tensor(ts, device=device, dtype=dtype)
layer_norm = torch.nn.LayerNorm(size, device=device, dtype=dtype)
nt_result = nt._nested_tensor_layer_norm(
layer_norm.weight, layer_norm.bias, 1e-5
)
for (nt_subresult, t) in zip(nt_result.unbind(), ts):
t_result = layer_norm(t.reshape(1, -1, size).squeeze(0))
self.assertEqual(nt_subresult, t_result)
for size in (1024, 1023, 513, 512, 256, 128, 2, 4, 32):
_test(size)
@skipMeta
@torch.inference_mode()
def test_embedding(self, device):
inputs = [
torch.randint(100, (L,), device=device, dtype=torch.int64)
for L in torch.randint(5, 50, (8,))
]
x = torch.nested_tensor(inputs, device=device, dtype=torch.int64)
emb = torch.nn.Embedding(100, 8, device=device)
y = emb(x)
ys = y.unbind()
for i, inp in enumerate(inputs):
self.assertEqual(emb(inp), ys[i])
@dtypes(torch.float, torch.float16)
def test_to_padded_tensor_simple(self, device, dtype):
t = torch.randn(4, 4, 4, device=device, dtype=dtype)
ts = list(torch.unbind(t))
ts[0] = ts[0][:-1]
nt = torch.nested_tensor(ts, device=device, dtype=dtype)
for padding_value in (0, 1):
padded = nt.to_padded_tensor(padding_value)
correct_output = t.clone()
if padding_value == 0:
correct_output[0][-1] = torch.zeros_like(correct_output[0][-1])
else:
correct_output[0][-1] = torch.ones_like(correct_output[0][-1])
self.assertEqual(padded, correct_output)
self.assertEqual(padded.device, torch.device(device))
self.assertEqual(padded.dtype, dtype)
@dtypes(torch.float, torch.float16)
def test_to_padded_tensor_output_size(self, device, dtype):
t = torch.randn(4, 4, 4, device=device, dtype=dtype)
output_size = (4, 6, 5)
ts = list(torch.unbind(t))
ts[0] = ts[0][:-1]
nt = torch.nested_tensor(ts, device=device, dtype=dtype)
for padding_value in (0, 1):
padded = nt.to_padded_tensor(padding_value, output_size=output_size)
correct_output = torch.ones(output_size, device=device, dtype=dtype) * padding_value
correct_output[:4:, :4, :4] = t.clone()
if padding_value == 0:
correct_output[0][3] = torch.zeros_like(correct_output[0][3])
else:
correct_output[0][3] = torch.ones_like(correct_output[0][3])
self.assertEqual(padded, correct_output)
self.assertEqual(padded.device, torch.device(device))
self.assertEqual(padded.dtype, dtype)
@dtypes(torch.float, torch.float16, torch.double)
def test_to_padded_tensor_dim2(self, device, dtype):
ts = [
torch.randn(160, device=device, dtype=dtype),
torch.randn(1240, device=device, dtype=dtype),
torch.randn(2400, device=device, dtype=dtype),
]
nt = torch.nested_tensor(ts, device=device, dtype=dtype)
pad = 42
correct_output = []
for t in ts:
next_output = torch.ones_like(ts[2]) * pad
correct_output.append(next_output)
next_output[:t.size(0)].copy_(t)
correct_output = torch.stack(correct_output)
padded = nt.to_padded_tensor(pad)
self.assertEqual(padded, correct_output)
@dtypes(torch.float, torch.float16, torch.double)
def test_to_padded_tensor_dim3(self, device, dtype):
ts = [
torch.randn(16, 21, device=device, dtype=dtype),
torch.randn(24, 32, device=device, dtype=dtype),
torch.randn(40, 53, device=device, dtype=dtype),
]
nt = torch.nested_tensor(ts, device=device, dtype=dtype)
pad = 42
correct_output = []
for t in ts:
next_output = torch.ones_like(ts[2]) * pad
correct_output.append(next_output)
next_output[:t.size(0), :t.size(1)].copy_(t)
correct_output = torch.stack(correct_output)
padded = nt.to_padded_tensor(pad)
self.assertEqual(padded, correct_output)
@dtypes(torch.float, torch.float16, torch.double)
def test_to_padded_tensor_dim4(self, device, dtype):
ts = [
torch.randn(16, 21, 13, device=device, dtype=dtype),
torch.randn(24, 32, 14, device=device, dtype=dtype),
torch.randn(40, 53, 16, device=device, dtype=dtype),
]
nt = torch.nested_tensor(ts, device=device, dtype=dtype)
pad = 42
correct_output = []
for t in ts:
next_output = torch.ones_like(ts[2]) * pad
correct_output.append(next_output)
next_output[:t.size(0), :t.size(1), :t.size(2)].copy_(t)
correct_output = torch.stack(correct_output)
padded = nt.to_padded_tensor(pad)
self.assertEqual(padded, correct_output)
# TODO: test noncontiguous to_padded_tensor
# For now this tests the functionality of noncontiguous_to_padded_tensor
# and the error message of to_padded_tensor
# since to_padded_tensor does not support noncontiguous buffer yet
@dtypes(torch.float, torch.float16, torch.double)
@torch.inference_mode()
def test_to_padded_tensor_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7), device, dtype)
# test noncontiguous_to_padded_tensor functionality
self.assertEqual(
nt_contiguous.to_padded_tensor(0.0),
noncontiguous_to_padded_tensor(nt_noncontiguous))
# test to_padded_tensor error message
self.assertRaisesRegex(
RuntimeError,
r"for now to_padded_tensor only supports contiguous nested tensor",
lambda: nt_noncontiguous.to_padded_tensor(0.0)
)
@skipMeta
def test_device_checks(self, device):
nt = torch.nested_tensor([], device=device)
is_cuda = 'cuda' in str(device)
self.assertEqual(nt.is_cuda, is_cuda)
@dtypes(torch.float, torch.float16, torch.double)
def test_nested_tensor_indexing(self, device, dtype):
# edge case: empty nested tensor
nt0 = torch.nested_tensor([])
self.assertRaises(IndexError, lambda: nt0[0])
# normal case
x0 = torch.randn((2, 5), device=device, dtype=dtype)
x1 = torch.randn((3, 4), device=device, dtype=dtype)
nt = torch.nested_tensor([x0, x1])
# single index: only support integer in the batch dimension
self.assertEqual(nt[0], x0)
self.assertEqual(nt[-1], x1)
self.assertRaises(IndexError, lambda: nt[2])
self.assertRaises(IndexError, lambda: nt[-3])
self.assertRaises(NotImplementedError, lambda: nt[:])
self.assertRaises(NotImplementedError, lambda: nt[None])
self.assertRaises(NotImplementedError, lambda: nt[...])
# tuple of indices: only support integer in the batch dimension
# + all possible indexing in the original tensor dimensions
self.assertEqual(nt[0, 0, 0], x0[0, 0])
self.assertEqual(nt[0, 1, :], x0[1, :])
self.assertEqual(nt[1, ...], x1)
self.assertRaises(IndexError, lambda: nt[1, 4, 2])
self.assertRaises(NotImplementedError, lambda: nt[:, 1, 1])
# make sure indexing returns a view
nt[0].fill_(100.0)
answer = torch.tensor(100.0, device=device, dtype=dtype).expand((2, 5))
self.assertEqual(nt[0], answer)
nt[1, 1, :].fill_(200.0)
answer = torch.tensor(200.0, device=device, dtype=dtype).expand(4)
self.assertEqual(nt[1, 1, :], answer)
@dtypes(torch.float, torch.float16, torch.double)
@torch.inference_mode()
def test_nested_tensor_indexing_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7), device, dtype)
self.assertEqual(nt_contiguous.size(0), nt_noncontiguous.size(0))
n = nt_contiguous.size(0)
for i in range(n):
self.assertEqual(nt_contiguous[i], nt_noncontiguous[i])
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_nested_tensor_add(self, device, dtype):
(nt1, nt2) = self.random_nt_pair(device, dtype, 4, (4, 4))
ref = torch.nested_tensor([t1 + t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())])
out = nt1 + nt2
self.assertEqual(ref, out)
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_nested_tensor_mul(self, device, dtype):
# nested tensor * nested tensor
(nt1, nt2) = self.random_nt_pair(device, dtype, 4, (4, 4))
ref = torch.nested_tensor([t1 * t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())])
out = nt1 * nt2
self.assertEqual(ref, out)
# nested tensor * scalar
number = 10.0
scalar = torch.tensor(number).to(dtype).to(device)
ref = torch.nested_tensor([t * number for t in nt1.unbind()])
out_number0 = nt1 * number
out_number1 = number * nt1
out_scalar0 = nt1 * scalar
out_scalar1 = scalar * nt1
self.assertEqual(out_number0, ref)
self.assertEqual(out_number1, ref)
self.assertEqual(out_scalar0, ref)
self.assertEqual(out_scalar1, ref)
# error case: numel == 1 but dim > 0
vector = torch.tensor([number]).to(dtype).to(device)
self.assertRaisesRegex(
RuntimeError,
"Expected both self and other to be nested, but got a nested self and non-nested other",
lambda: nt1.mul(vector)
)
self.assertRaisesRegex(
RuntimeError,
"Expected both self and other to be nested, but got a non-nested self and nested other",
lambda: vector.mul(nt1)
)
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_nested_tensor_add_in_place(self, device, dtype):
(nt1, nt2) = self.random_nt_pair(device, dtype, 4, (4, 4))
ref = torch.nested_tensor([t1 + t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())])
nt1 += nt2
self.assertEqual(ref, nt1)
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_nested_tensor_mul_in_place(self, device, dtype):
# nested tensor * nested tensor
(nt1, nt2) = self.random_nt_pair(device, dtype, 4, (4, 4))
ref = torch.nested_tensor([t1 * t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())])
nt1 *= nt2
self.assertEqual(ref, nt1)
# nested tensor * scalar
number = 10.0
scalar = torch.tensor(number).to(dtype).to(device)
ref = torch.nested_tensor([t * number for t in nt1.unbind()])
out_number = nt1.clone()
out_number *= number
out_scalar = nt1.clone()
out_scalar *= scalar
self.assertEqual(out_number, ref)
self.assertEqual(out_scalar, ref)
self.assertRaisesRegex(
RuntimeError,
r"output with shape \[.*\] doesn't match the broadcast shape \[.*\]",
lambda: scalar.mul_(nt1)
)
# error case: numel == 1 but dim > 0
vector = torch.tensor([number]).to(dtype).to(device)
self.assertRaisesRegex(
RuntimeError,
"Expected both self and other to be nested, but got a nested self and non-nested other",
lambda: nt1.mul_(vector)
)
self.assertRaisesRegex(
RuntimeError,
"Expected both self and other to be nested, but got a non-nested self and nested other",
lambda: vector.mul_(nt1)
)
@onlyCPU
@skipMeta
@dtypes(torch.float)
def test_nested_tensor_sum_dim(self, device, dtype):
params = ((2, (1, 1)), ((4), (4, 4)), (10, (3, 5, 7)))
def test_sum(nt, dim, keepdim=True):
nt2 = nt.clone()
nt = nt.sum(dim=dim, keepdim=keepdim)
ub2 = nt2.unbind()
ub2 = [t.sum(-1, keepdim=keepdim) for t in ub2]
nt2 = torch.nested_tensor(ub2)
self.assertEqual(nt, nt2)
return
for ntensors, max_sizes in params:
test_sum(self.random_nt(device, dtype, ntensors, max_sizes), len(max_sizes))
# Test error inputs
with self.assertRaisesRegex(RuntimeError, "NestedTensor can only be reduced across the last"):
torch.nested_tensor([torch.tensor([3, 4, 5]), torch.tensor([1, 2])]).sum(0, keepdim=True)
with self.assertRaisesRegex(RuntimeError, "NestedTensor only allows reduction of a single"):
torch.nested_tensor([torch.tensor([[3, 4, 5]]), torch.tensor([[1, 2]])]).sum([0, 1], keepdim=True)
with self.assertRaisesRegex(RuntimeError, "NestedTensor always requires keepdim=True for now."):
torch.nested_tensor([torch.tensor([3, 4, 5]), torch.tensor([1, 2])]).sum(-1)
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_clone(self, device, dtype):
nt1 = self.random_nt(device, dtype, 4, (4, 4), (1, 1))
nt2 = nt1.clone()
# Verify the values match
self.assertEqual(nt1, nt2)
# Verify modifying nt2 doesn't affect nt1
nt2.mul_(nt1)
ub1 = nt1.unbind()
ub2 = nt2.unbind()
for i in range(len(ub1)):
self.assertNotEqual(ub1[i], ub2[i])
nt1.clone(memory_format=torch.preserve_format)
msg = "clone_nested only supports memory format Preserve, but got ChannelsLast instead."
with self.assertRaisesRegex(RuntimeError, msg):
nt1.clone(memory_format=torch.channels_last)
# cannot test torch.float16 because: RuntimeError: "bernoulli_scalar_cpu_" not implemented for 'Half'
@dtypes(torch.float, torch.double)
@torch.inference_mode()
def test_dropout(self, device, dtype):
# edge case: empty nested tensor
nt0 = torch.nested_tensor([])
y = torch.nn.functional.dropout(nt0, 0.5)
self.assertEqual(nt0, y)
# normal nested tensor
ntensors = 4
nt = self.random_nt(device, dtype, ntensors, (4, 4))
# edge case: invalid dropout
self.assertRaises(ValueError, lambda: torch.nn.Dropout(-0.1))
self.assertRaises(ValueError, lambda: torch.nn.Dropout(1.1))
self.assertRaises(ValueError, lambda: torch.nn.functional.dropout(nt, -0.1))
self.assertRaises(ValueError, lambda: torch.nn.functional.dropout(nt, 1.1))
# edge case: no dropout
dropouter = torch.nn.Dropout(0.0)
y0 = dropouter(nt)
y1 = torch.nn.functional.dropout(nt, 0.0)
self.assertEqual(nt, y0)
self.assertEqual(nt, y1)
# edge case: all dropout
dropouter = torch.nn.Dropout(1.0)
y0 = dropouter(nt)
y1 = torch.nn.functional.dropout(nt, 1.0)
nt0 = nt.clone()
for i in range(ntensors):
nt0[i].fill_(0.0)
self.assertEqual(nt0, y0)
self.assertEqual(nt0, y1)
# normal case: normal dropout
p = 0.2
y = torch.nn.functional.dropout(nt, p)
expect = nt.clone()
for i in range(ntensors):
actual_tensor = y[i].view(-1)
expect_tensor = expect[i].view(-1)
for j in range(actual_tensor.shape[0]):
if actual_tensor[j].item() == 0.0:
expect_tensor[j] = 0.0
else:
expect_tensor[j] /= 1.0 - p
self.assertEqual(y, expect)
with freeze_rng_state():
dropouter = torch.nn.Dropout(p)
y0 = dropouter(nt)
with freeze_rng_state():
y1 = torch.nn.functional.dropout(nt, p)
self.assertEqual(y0, y1)
# inplace
# in principle, since we have established the correctness of functional, we could simply compare inplace vs functional
# in practice, cuda functional has its own implementation to skip `bernoulli_`
# so cuda functional will differ from cuda inplace causing test failure
# in `test_dropout_cuda_float64 (__main__.TestNestedTensorDeviceTypeCUDA)`
# on `linux-xenial-cuda11.3-py3.7-gcc7 / test (default, 2, 4, linux.4xlarge.nvidia.gpu)`
expect = nt.clone()
torch.nn.functional.dropout(nt, p, inplace=True)
for i in range(ntensors):
actual_tensor = nt[i].view(-1)
expect_tensor = expect[i].view(-1)
for j in range(actual_tensor.shape[0]):
if actual_tensor[j].item() == 0.0:
expect_tensor[j] = 0.0
else:
expect_tensor[j] /= 1.0 - p
self.assertEqual(nt, expect)
# dropout works directly on the underlying buffer memory
# so contiguous / noncontiguous does not make any difference
# cannot test torch.float16 because: RuntimeError: "softmax_kernel_impl" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_softmax(self, device, dtype):
# normal nested tensor
ntensors = 4
nt = self.random_nt(device, dtype, ntensors, (4, 4))
# error case: softmax across nested dimension
self.assertRaisesRegex(
RuntimeError,
"Cannot apply softmax across nested dimension 0",
lambda: torch.nn.functional.softmax(nt, 0)
)
self.assertRaisesRegex(
RuntimeError,
"Cannot apply softmax across nested dimension 0",
lambda: torch.nn.functional.softmax(nt, -3)
)
# error case: dimension out of range
self.assertRaises(IndexError, lambda: torch.nn.functional.softmax(nt, 3))
self.assertRaises(IndexError, lambda: torch.nn.functional.softmax(nt, -4))
# normal case: should equal to padding -inf
softmaxer = torch.nn.Softmax(1)
y0 = softmaxer(nt)
y1 = torch.nn.functional.softmax(nt, 1)
self.assertEqual(y0, y1)
pt = nt.to_padded_tensor(float("-inf"))
# if an entire slice is padded, then softmax will return 0.0 / 0.0 = nan
# however, physically speaking that should be 0.0
expect = torch.nn.functional.softmax(pt, 1).nan_to_num_(0.0)
self.assertEqual(y0.to_padded_tensor(0.0), expect)
# edge case: empty nested tensor
nt0 = torch.nested_tensor([])
y = torch.nn.functional.softmax(nt0, 1)
self.assertEqual(nt0, y)
# edge case: nesting scalars
nt1 = torch.nested_tensor([torch.tensor(0.0), torch.tensor(1.0)])
self.assertRaises(RuntimeError, lambda: torch.nn.functional.softmax(nt1, 0))
self.assertRaises(IndexError, lambda: torch.nn.functional.softmax(nt1, 1))
@dtypes(torch.float, torch.double)
@torch.inference_mode()
def test_softmax_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7), device, dtype)
self.assertEqual(
torch.nn.functional.softmax(nt_contiguous, -1),
torch.nn.functional.softmax(nt_noncontiguous, -1))
# cannot test torch.float16 because: RuntimeError: "addmm_impl_cpu_" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_bmm(self, device, dtype):
# error case: one is nested but the other is not
nt = torch.nested_tensor([torch.randn(2), torch.randn(3)], device=device, dtype=dtype)
t = torch.randn(4, device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"Expected both to be nested, but got a nested self and non-nested other",
lambda: nt.bmm(t)
)
self.assertRaisesRegex(
RuntimeError,
"Expected both to be nested, but got a non-nested self and nested other",
lambda: t.bmm(nt)
)
# error case: not 3D tensors
nt0 = torch.nested_tensor([], device=device, dtype=dtype)
nt1 = torch.nested_tensor([torch.randn(2), torch.randn(3)], device=device, dtype=dtype)
nt2 = torch.nested_tensor([torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"batch1 must be a 3D tensor",
lambda: nt0.bmm(nt0)
)
self.assertRaisesRegex(
RuntimeError,
"batch1 must be a 3D tensor",
lambda: nt0.bmm(nt1)
)
self.assertRaisesRegex(
RuntimeError,
"batch1 must be a 3D tensor",
lambda: nt0.bmm(nt2)
)
self.assertRaisesRegex(
RuntimeError,
"batch1 must be a 3D tensor",
lambda: nt1.bmm(nt0)
)
self.assertRaisesRegex(
RuntimeError,
"batch1 must be a 3D tensor",
lambda: nt1.bmm(nt1)
)
self.assertRaisesRegex(
RuntimeError,
"batch1 must be a 3D tensor",
lambda: nt1.bmm(nt2)
)
self.assertRaisesRegex(
RuntimeError,
"batch2 must be a 3D tensor",
lambda: nt2.bmm(nt0)
)
self.assertRaisesRegex(
RuntimeError,
"batch2 must be a 3D tensor",
lambda: nt2.bmm(nt1)
)
# error case: incompatible batch size
nt0 = torch.nested_tensor([torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype)
nt1 = torch.nested_tensor([torch.randn((4, 6)),
torch.randn((4, 5)),
torch.randn((4, 7))],
device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"Expected size for the 1st dimension of batch2 tensor to be: 2 but got: 3.",
lambda: nt0.bmm(nt1)
)
self.assertRaisesRegex(
RuntimeError,
"Expected size for the 1st dimension of batch2 tensor to be: 3 but got: 2.",
lambda: nt1.bmm(nt0)
)
# error case: underlying matrices cannot be multiplied
nt0 = torch.nested_tensor([torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
r"0-th nested matrices in batch cannot be multiplied \(2x4 and 2x4\)",
lambda: nt0.bmm(nt0)
)
# normal nested tensor
nt0 = torch.nested_tensor([torch.randn((2, 4)), torch.randn((3, 7))], device=device, dtype=dtype)
nt1 = torch.nested_tensor([torch.randn((4, 6)), torch.randn((7, 5))], device=device, dtype=dtype)
actual = nt0.bmm(nt1).to_padded_tensor(0.0)
expect = nt0.to_padded_tensor(0.0).bmm(nt1.to_padded_tensor(0.0))
self.assertEqual(actual, expect)
# cannot test torch.float16 because: RuntimeError: "addmm_impl_cpu_" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_bmm_noncontiguous(self, device, dtype):
nt0_contiguous, nt0_noncontiguous = random_nt_noncontiguous_pair((2, 3), device, dtype)
nt1_contiguous, nt1_noncontiguous = random_nt_noncontiguous_pair((6, 7), device, dtype)
self.assertEqual(
nt0_contiguous.transpose(-1, -2).bmm(nt1_contiguous),
nt0_noncontiguous.transpose(-1, -2).bmm(nt1_noncontiguous))
# cannot test torch.float16 because: RuntimeError: "bmm" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_matmul(self, device, dtype):
# error case: one is nested but the other is not
nt = torch.nested_tensor([torch.randn(2), torch.randn(3)], device=device, dtype=dtype)
t = torch.randn(4, device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"Expected both to be nested, but got a nested self and non-nested other",
lambda: torch.matmul(nt, t)
)
self.assertRaisesRegex(
RuntimeError,
"Expected both to be nested, but got a non-nested self and nested other",
lambda: torch.matmul(t, nt)
)
# error case: not 3+D tensors
nt0 = torch.nested_tensor([], device=device, dtype=dtype)
nt1 = torch.nested_tensor([torch.randn(2), torch.randn(3)], device=device, dtype=dtype)
nt2 = torch.nested_tensor([torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt0, nt0)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt0, nt1)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt0, nt2)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt1, nt0)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt1, nt1)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt1, nt2)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 2nd input has rank: [0-9]+",
lambda: torch.matmul(nt2, nt0)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 2nd input has rank: [0-9]+",
lambda: torch.matmul(nt2, nt1)
)
# error case: incompatible batch size
nt0 = torch.nested_tensor([torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype)
nt1 = torch.nested_tensor([torch.randn((4, 6)),
torch.randn((4, 5)),
torch.randn((4, 7))],
device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
r"matmul: Expected size for the 1st dimension of 2nd input tensor to be: [0-9]+ but got: [0-9]+.",
lambda: torch.matmul(nt0, nt1)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: Expected size for the 1st dimension of 2nd input tensor to be: [0-9]+ but got: [0-9]+.",
lambda: torch.matmul(nt1, nt0)
)
# error case: incompatible generalized batch size
nt0 = torch.nested_tensor([torch.randn((2, 2, 4)),
torch.randn((2, 3, 4))],
device=device, dtype=dtype)
nt1 = torch.nested_tensor([torch.randn((3, 4, 6)),
torch.randn((3, 4, 5))],
device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, no broadcasting is currently performed: "
r"[0-9]+-th nested matrices in batch at dimension [0-9]+ "
r"have mismatching sizes [0-9]+ and [0-9]+",
lambda: torch.matmul(nt0, nt1)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, no broadcasting is currently performed: "
r"[0-9]+-th nested matrices in batch at dimension [0-9]+ "
r"have mismatching sizes [0-9]+ and [0-9]+",
lambda: torch.matmul(nt1, nt0)
)
# error case: underlying matrices cannot be multiplied
nt0 = torch.nested_tensor([torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
r"0-th nested matrices in batch cannot be multiplied \(2x4 and 2x4\)",
lambda: torch.matmul(nt0, nt0)
)
# normal nested tensor: 3D
nt0 = torch.nested_tensor([torch.randn((2, 4)), torch.randn((3, 7))], device=device, dtype=dtype)
nt1 = torch.nested_tensor([torch.randn((4, 6)), torch.randn((7, 5))], device=device, dtype=dtype)
actual = torch.matmul(nt0, nt1).to_padded_tensor(0.0)
expect = torch.matmul(nt0.to_padded_tensor(0.0), nt1.to_padded_tensor(0.0))
self.assertEqual(actual, expect)
# normal nested tensor: 4D
nt0 = torch.nested_tensor([torch.randn((8, 2, 4)),
torch.randn((8, 3, 7))],
device=device, dtype=dtype)
nt1 = torch.nested_tensor([torch.randn((8, 4, 6)),
torch.randn((8, 7, 5))],
device=device, dtype=dtype)
actual = torch.matmul(nt0, nt1).to_padded_tensor(0.0)
expect = torch.matmul(nt0.to_padded_tensor(0.0), nt1.to_padded_tensor(0.0))
self.assertEqual(actual, expect)
# normal nested tensor: 5D
nt0 = torch.nested_tensor([torch.randn((8, 9, 2, 4)),
torch.randn((8, 9, 3, 7))],
device=device, dtype=dtype)
nt1 = torch.nested_tensor([torch.randn((8, 9, 4, 6)),
torch.randn((8, 9, 7, 5))],
device=device, dtype=dtype)
actual = torch.matmul(nt0, nt1).to_padded_tensor(0.0)
expect = torch.matmul(nt0.to_padded_tensor(0.0), nt1.to_padded_tensor(0.0))
self.assertEqual(actual, expect)
# cannot test torch.float16 because: RuntimeError: "bmm" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_matmul_noncontiguous(self, device, dtype):
nt0_contiguous, nt0_noncontiguous = random_nt_noncontiguous_pair((2, 3), device, dtype)
nt1_contiguous, nt1_noncontiguous = random_nt_noncontiguous_pair((6, 7), device, dtype)
self.assertEqual(
torch.matmul(nt0_contiguous.transpose(-1, -2), nt1_contiguous),
torch.matmul(nt0_noncontiguous.transpose(-1, -2), nt1_noncontiguous))
@dtypes(torch.float, torch.double)
def test_linear(self, device, dtype):
a = torch.randn(1, 2, device=device, dtype=dtype)
b = torch.randn(2, 2, device=device, dtype=dtype)
c = torch.randn(3, 2, device=device, dtype=dtype)
nt = torch.nested_tensor([a, b, c])
weight = torch.randn(2, 2, device=device, dtype=dtype)
bias = torch.randn(2, device=device, dtype=dtype)
# success case
torch.functional.F.linear(nt, weight, bias)
# invalid nested tensor dimension
msg = r'Linear requires nested_tensor.dim == 3 and dense_matrix.dim == 2. Nested tensor dim: 2. Dense tensor dim: 2'
nt1 = torch.nested_tensor([torch.randn(1, device=device, dtype=dtype),
torch.randn(2, device=device, dtype=dtype)])
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt1, weight, bias)
# invalid weight shape
msg = r'Linear requires nested_tensor.dim == 3 and dense_matrix.dim == 2. Nested tensor dim: 3. Dense tensor dim: 3'
weight1 = torch.randn(2, 2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt, weight1, bias)
# inconsistent last dim of nested tensor
msg = r"all tensors in NestedTensor must have the same trailing dim"
nt2 = torch.nested_tensor([torch.randn(1, 2, device=device, dtype=dtype),
torch.randn(2, 3, device=device, dtype=dtype)])
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt2, weight, bias)
# Mismatch of nested tensor last dim and weight dimension
weight2 = torch.randn(2, 4, device=device, dtype=dtype)
msg = r"Shape mismatch for NestedTensor Linear: Expected input's \(a nested tensor\) 'last_dim'" \
r" to equal 'weight.size\(1\), but got: last_dim = 2, and weight.size\(1\) = 4"
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt, weight2, bias)
# Nested tensor input and nested weight
nt_weight = nt.clone()
msg = r"Linear does not support nested weight when input is a nested tensor."
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt, nt_weight, bias)
# TODO: test noncontiguous linear
# For now this tests the error message of linear
# since linear does not support noncontiguous buffer yet
@dtypes(torch.float, torch.double)
def test_linear_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7), device, dtype)
weight = torch.randn((8, 5), device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
r"for now linear only supports contiguous nested tensor",
lambda: torch.nn.functional.linear(nt_noncontiguous, weight)
)
@dtypes(torch.float, torch.float16, torch.double)
@torch.inference_mode()
def test_transpose(self, device, dtype):
nt = self.random_nt(device, dtype, 4, (4, 4))
# error case: transpose nested dimension
self.assertRaisesRegex(
RuntimeError,
"Nested tensor dimension 0 cannot be transposed",
lambda: nt.transpose(0, 1)
)
self.assertRaisesRegex(
RuntimeError,
"Nested tensor dimension 0 cannot be transposed",
lambda: nt.transpose(1, -3)
)
# error case: dimension out of range
self.assertRaises(IndexError, lambda: nt.transpose(1, 3))
self.assertRaises(IndexError, lambda: nt.transpose(-4, -1))
# normal case
ntT = nt.transpose(-1, -2)
ptT_from_ntT = noncontiguous_to_padded_tensor(ntT)
pt = nt.to_padded_tensor(0.0)
ptT = pt.transpose(-1, -2)
self.assertEqual(ptT, ptT_from_ntT)
@dtypes(torch.float, torch.float16, torch.double)
@torch.inference_mode()
def test_reshape(self, device, dtype):
nt = self.random_nt(device, dtype, 4, (4, 4))
# error case: empty shape
self.assertRaisesRegex(
RuntimeError,
r"shape '\[\]' is invalid for a nested tensor",
lambda: nt.reshape(())
)
# error case: empty nested tensor
nt_empty = torch.nested_tensor([])
self.assertRaisesRegex(
RuntimeError,
"empty nested tensor cannot be reshaped",
lambda: nt_empty.reshape(-1)
)
# error case: invalid proposed shape for underlying tensors
self.assertRaisesRegex(
RuntimeError,
r"invalid shape dimension -2",
lambda: nt.reshape(-2, 2, 3)
)
self.assertRaisesRegex(
RuntimeError,
r"shape '\[.*\]' is invalid for input of size [0-9]+",
lambda: nt.reshape(4, 2, 3)
)
# normal case
x0 = torch.randn((2, 20), device=device, dtype=dtype)
x1 = torch.randn((3, 20), device=device, dtype=dtype)
nt = torch.nested_tensor([x0, x1])
pt = nt.to_padded_tensor(0.0)
self.assertRaisesRegex(
RuntimeError,
r"for now reshape cannot change the implicit batch dimension",
lambda: nt.transpose(-1, -2).reshape(40, -1)
)
# inherit only the ragged dimension
# (2, 20) -> (2, 5, 4)
# (3, 20) -> (3, 5, 4)
nt1 = nt.reshape(2, -1, 5, 4)
# (2, 3, 20) -> (2, 3, 5, 4) -> (2, 4, 5, 4)
pt1 = pt.reshape(2, -1, 5, 4)
self.assertEqual(noncontiguous_to_padded_tensor(nt1), pt1)
# also inherit regular dimension
nt2 = nt1.reshape(2, -1, -1, 2, 2)
pt2 = pt1.reshape(2, -1, 5, 2, 2)
self.assertEqual(noncontiguous_to_padded_tensor(nt2), pt2)
@parametrize("input_dim", [3, 4])
def test_scaled_dot_product_attention(self, device, input_dim):
def rand_tensor(*shape):
return torch.randn(shape, device=device)
E = 10
if input_dim == 3:
# Shape: (N, L, E); ragged L
query = torch.nested_tensor([rand_tensor(2, E), rand_tensor(3, E), rand_tensor(4, E)])
# Shape: (N, S, E); ragged S
key = torch.nested_tensor([rand_tensor(3, E), rand_tensor(4, E), rand_tensor(5, E)])
value = torch.nested_tensor([rand_tensor(3, E), rand_tensor(4, E), rand_tensor(5, E)])
elif input_dim == 4:
# Shape: (N, N', L, E); ragged N' and L
query = torch.nested_tensor([rand_tensor(2, 2, E), rand_tensor(3, 3, E), rand_tensor(4, 4, E)])
# Shape: (N, N', S, E); ragged N' and S
key = torch.nested_tensor([rand_tensor(2, 3, E), rand_tensor(3, 4, E), rand_tensor(4, 5, E)])
value = torch.nested_tensor([rand_tensor(2, 3, E), rand_tensor(3, 4, E), rand_tensor(4, 5, E)])
else:
self.fail(f"Invalid input_dim {input_dim} encountered in SDP test")
def rand_mask(size):
return torch.randint(0, 2, size=size, dtype=torch.bool, device=device)
# Shape: (N, L, S); ragged L and S matching above
attn_mask = torch.nested_tensor([rand_mask((2, 3)), rand_mask((3, 4)), rand_mask((4, 5))])
dropout_p = 0.0 # no dropout for reproducibility
need_attn_weights: bool = True
# Success case: no attn_mask set and is_causal=False.
actual = torch.ops.aten._scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=dropout_p, need_attn_weights=need_attn_weights)
expected_outputs = []
expected_attn_weights = []
for q, k, v in zip(query.unbind(), key.unbind(), value.unbind()):
(output, attn_weights) = torch.ops.aten._scaled_dot_product_attention(
q.unsqueeze(0), k.unsqueeze(0), v.unsqueeze(0), attn_mask=None, dropout_p=dropout_p,
need_attn_weights=need_attn_weights)
expected_outputs.append(output.squeeze(0))
expected_attn_weights.append(attn_weights.squeeze(0))
expected_output_nested = torch.nested_tensor(expected_outputs)
expected_attn_weight_nested = torch.nested_tensor(expected_attn_weights)
self.assertEqual(actual[0], expected_output_nested)
self.assertEqual(actual[1], expected_attn_weight_nested)
# Error case: explicit attn_mask set.
with self.assertRaisesRegex(RuntimeError, "not supported when an explicit attn_mask is set"):
torch.ops.aten._scaled_dot_product_attention(
query, key, value, attn_mask=attn_mask, dropout_p=dropout_p, need_attn_weights=need_attn_weights)
# Error case: is_causal=True.
with self.assertRaisesRegex(RuntimeError, "not supported when is_causal=True"):
torch.ops.aten._scaled_dot_product_attention(
query, key, value, dropout_p=dropout_p, need_attn_weights=need_attn_weights, is_causal=True)
class TestNestedTensorAutograd(TestCase):
# Note [Gradcheck args check_batched_grad=False] the common_utils testing version of gradcheck
# includes the default parameters used for testing ops with gradcheck. However nested tensor
# does not support the stack op therefore we turn it off for these tests
def _create_nested_tensor_from_list(self, requires_grad=False):
return torch.nested_tensor([torch.randn(1, 2, requires_grad=requires_grad),
torch.randn(7, 8, requires_grad=requires_grad)])
def _create_nested_tensor_from_mask(self, requires_grad=False):
data = torch.randn(2, 3, 4, requires_grad=requires_grad)
mask = torch.ones_like(data[:, :, 0]).bool()
return torch._nested_tensor_from_mask(data, mask)
def test_set_requires_grad_from_list(self):
nt = self._create_nested_tensor_from_list()
nt.requires_grad_()
assert nt.requires_grad
def test_set_requires_grad_from_mask(self):
nt = self._create_nested_tensor_from_mask()
nt.requires_grad_()
assert nt.requires_grad
def test_backward_for_add_op(self):
nt_1 = self._create_nested_tensor_from_mask()
nt_2 = self._create_nested_tensor_from_mask()
nt_1.requires_grad_()
c = nt_1 + nt_2
assert nt_1.requires_grad
assert c.requires_grad
grad_output = self._create_nested_tensor_from_mask()
c.backward(grad_output)
# Grad check doesn't work with nested yet.
# d/dnt_1 (nt + nt_1) = 1*grad_output
self.assertEqual(nt_1.grad, grad_output)
# Test Factory Functions
def test_nested_tensor_to_padded_tensor(self):
for padding_val in [0, 1]:
nt = torch.nested_tensor([torch.randn(1, 2), torch.randn(7, 8)])
nt.requires_grad_()
out = nt.to_padded_tensor(padding_val)
grad_output = torch.ones(out.shape)
out.backward(grad_output)
self.assertEqual(nt.grad, torch.nested_tensor([torch.ones(1, 2), torch.ones(7, 8)]))
def test_nested_tensor_from_mask_and_to_padded(self):
N, L, D = 2, 4, 4
mask = torch.ones(N, L)
for i in range(1, N):
end = torch.randint(1, L - 1, (1,))
mask[i, end:] = 0
mask[0, :] = 1
mask = mask.bool()
data = torch.randn(N, L, D, requires_grad=True, dtype=torch.float64)
def grad_test_func(inpt):
nt = torch._nested_tensor_from_mask(inpt, mask)
# This implicitly tests to_padded_tensor grads
return nt.to_padded_tensor(0)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_nested_tensor_from_padded(self):
nested_size = torch.tensor([[1, 2], [2, 2]])
padded_tensor = torch.randn(2, 2, 2, dtype=torch.float64)
padded_tensor[0, 1, :] = 0
padded_tensor.requires_grad_()
def grad_test_func(tensor, nested_size):
nt = torch._nested_from_padded(tensor, nested_size, fuse_transform_0213=False)
# This implicitly tests to_padded_tensor grads
return nt.to_padded_tensor(0)
data = (padded_tensor, nested_size)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_nested_tensor_from_padded_fused(self):
nested_size = torch.tensor([[1, 8], [2, 8]])
padded_tensor = torch.randn(2, 2, 2, 4, dtype=torch.float64)
padded_tensor[0, 1, :] = 0
padded_tensor.requires_grad_()
def grad_test_func(tensor, nested_size):
nt = torch._nested_from_padded(tensor, nested_size, fuse_transform_0213=True)
# This implicitly tests to_padded_tensor grads
return nt.to_padded_tensor(0)
data = (padded_tensor, nested_size)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_nested_tensor_from_list(self):
a = torch.randn(1, 2, requires_grad=True, dtype=torch.float64)
b = torch.randn(2, 2, requires_grad=True, dtype=torch.float64)
c = torch.randn(10, 2, requires_grad=True, dtype=torch.float64)
def grad_test_func(a, b, c):
c = torch.nested_tensor([a, b, c])
# This implictily tests to_padded_tensor grads
return c.to_padded_tensor(0)
data = (a, b, c)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_size_dim(self):
a = torch.nested_tensor([])
self.assertEqual(a.size(0), 0)
a = torch.nested_tensor([torch.tensor(1)])
self.assertEqual(a.size(0), 1)
a = torch.nested_tensor([torch.tensor(1), torch.tensor(2)])
self.assertEqual(a.size(0), 2)
a = torch.nested_tensor([torch.rand(1, 2),
torch.rand(1, 8)])
self.assertEqual(a.size(0), 2)
self.assertEqual(a.size(1), 1)
self.assertRaisesRegex(
RuntimeError, "Given dimension 2 is irregular and does not have a size", lambda: a.size(2))
a = torch.nested_tensor([torch.rand(3, 4),
torch.rand(5, 4)])
self.assertEqual(a.size(0), 2)
self.assertRaisesRegex(
RuntimeError, "Given dimension 1 is irregular and does not have a size", lambda: a.size(1))
self.assertEqual(a.size(2), 4)
def test_nested_tensor_bmm_gradcheck(self):
a = torch.randn(2, 6, requires_grad=True, dtype=torch.float64)
b = torch.randn(3, 6, requires_grad=True, dtype=torch.float64)
c = torch.randn(6, 4, requires_grad=True, dtype=torch.float64)
d = torch.randn(6, 5, requires_grad=True, dtype=torch.float64)
def grad_test_func(a, b, c, d):
nt0 = torch.nested_tensor([a, b])
nt1 = torch.nested_tensor([c, d])
result = nt0.bmm(nt1)
return result.to_padded_tensor(0.0)
data = (a, b, c, d)
assert torch.autograd.gradcheck(grad_test_func, inputs=data)
def test_nested_tensor_bmm_backward(self):
nt0 = torch.nested_tensor([torch.randn((2, 6)), torch.randn((3, 6))]).requires_grad_(True)
nt1 = torch.nested_tensor([torch.randn((6, 4)), torch.randn((6, 5))]).requires_grad_(True)
with torch.no_grad():
pt0 = nt0.to_padded_tensor(0.0).requires_grad_(True)
pt1 = nt1.to_padded_tensor(0.0).requires_grad_(True)
ynt = nt0.bmm(nt1)
ypt = pt0.bmm(pt1)
ynt.backward(ynt.clone())
ypt.backward(ypt.clone())
self.assertEqual(nt0.grad.to_padded_tensor(0.0), pt0.grad)
self.assertEqual(nt1.grad.to_padded_tensor(0.0), pt1.grad)
def test_nested_tensor_matmul_gradcheck(self):
a = torch.randn(2, 6, requires_grad=True, dtype=torch.float64)
b = torch.randn(3, 6, requires_grad=True, dtype=torch.float64)
c = torch.randn(6, 4, requires_grad=True, dtype=torch.float64)
d = torch.randn(6, 5, requires_grad=True, dtype=torch.float64)
def grad_test_func(a, b, c, d):
nt0 = torch.nested_tensor([a, b])
nt1 = torch.nested_tensor([c, d])
result = torch.matmul(nt0, nt1)
return result.to_padded_tensor(0.0)
data = (a, b, c, d)
assert torch.autograd.gradcheck(grad_test_func, inputs=data)
def test_nested_tensor_matmul_backward(self):
nt0 = torch.nested_tensor([torch.randn((7, 2, 6)), torch.randn((7, 3, 6))]).requires_grad_(True)
nt1 = torch.nested_tensor([torch.randn((7, 6, 4)), torch.randn((7, 6, 5))]).requires_grad_(True)
with torch.no_grad():
pt0 = nt0.to_padded_tensor(0.0).requires_grad_(True)
pt1 = nt1.to_padded_tensor(0.0).requires_grad_(True)
ynt = torch.matmul(nt0, nt1)
ypt = torch.matmul(pt0, pt1)
ynt.backward(ynt.clone())
ypt.backward(ypt.clone())
self.assertEqual(nt0.grad.to_padded_tensor(0.0), pt0.grad)
self.assertEqual(nt1.grad.to_padded_tensor(0.0), pt1.grad)
def test_nested_tensor_transpose_gradcheck(self):
a = torch.randn(2, 5, requires_grad=True)
b = torch.randn(3, 4, requires_grad=True)
def grad_test_func(a, b):
nt = torch.nested_tensor([a, b])
result = nt.transpose(-2, -1).transpose(-2, -1)
return result.to_padded_tensor(0.0)
data = (a, b)
assert torch.autograd.gradcheck(grad_test_func, inputs=data, eps=1e-3)
def test_nested_tensor_transpose_backward(self):
nt = torch.nested_tensor([torch.randn((2, 5)), torch.randn((3, 4))]).requires_grad_(True)
with torch.no_grad():
pt = nt.to_padded_tensor(0.0).requires_grad_(True)
ynt = nt.transpose(-2, -1)
ypt = pt.transpose(-2, -1)
ynt.backward(ynt.clone())
ypt.backward(ypt.clone())
self.assertEqual(nt.grad.to_padded_tensor(0.0), pt.grad)
def test_nested_tensor_reshape_gradcheck(self):
a = torch.randn(2, 6, requires_grad=True)
b = torch.randn(3, 6, requires_grad=True)
def grad_test_func(a, b):
nt = torch.nested_tensor([a, b])
result = nt.reshape(2, -1, 2, 3)
return result.to_padded_tensor(0.0)
data = (a, b)
assert torch.autograd.gradcheck(grad_test_func, inputs=data, eps=1e-3)
def test_nested_tensor_reshape_backward(self):
nt = torch.nested_tensor([torch.randn((2, 6)), torch.randn((3, 6))]).requires_grad_(True)
with torch.no_grad():
pt = nt.to_padded_tensor(0.0).requires_grad_(True)
ynt = nt.reshape(2, -1, 2, 3)
ypt = pt.reshape(2, -1, 2, 3)
ynt.backward(ynt.clone())
ypt.backward(ypt.clone())
self.assertEqual(nt.grad.to_padded_tensor(0.0), pt.grad)
def test_nested_tensor_linear(self):
a = torch.randn(1, 2, requires_grad=True, dtype=torch.float64)
b = torch.randn(2, 2, requires_grad=True, dtype=torch.float64)
c = torch.randn(3, 2, requires_grad=True, dtype=torch.float64)
weight = torch.randn(2, 2, requires_grad=True, dtype=torch.float64)
bias = torch.randn(2, requires_grad=True, dtype=torch.float64)
def grad_test_func(a, b, c, weight, bias=None):
nt = torch.nested_tensor([a, b, c])
# This implicitly tests to_padded_tensor grads
d = torch.functional.F.linear(nt, weight, bias)
return d.to_padded_tensor(0)
data = (a, b, c, weight, bias)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
# Test linear with no bias added
data = (a, b, c, weight)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_nested_tensor_softmax(self):
a = torch.randn(1, 2, requires_grad=True, dtype=torch.float64)
b = torch.randn(2, 2, requires_grad=True, dtype=torch.float64)
c = torch.randn(3, 2, requires_grad=True, dtype=torch.float64)
def grad_test_func(a, b, c, dim):
nt = torch.nested_tensor([a, b, c])
# This implicitly tests to_padded_tensor grads
d = torch.functional.F.softmax(nt, dim=dim)
return d.to_padded_tensor(0)
# softmax over last dim
data = (a, b, c, -1)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_nested_tensor_linear_backward(self):
a = torch.randn(1, 2, requires_grad=False)
b = torch.randn(2, 2, requires_grad=False)
c = torch.randn(3, 2, requires_grad=False)
weight = torch.randn(2, 2, requires_grad=True)
bias = torch.randn(2, requires_grad=True)
nt = torch.nested_tensor([a, b, c])
out = torch.functional.F.linear(nt, weight, bias)
out.backward(out.clone())
assert weight.grad is not None
assert bias.grad is not None
assert a.grad is None
assert b.grad is None
assert c.grad is None
instantiate_device_type_tests(TestNestedTensorDeviceType, globals())
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_nestedtensor.py
|
# Owner(s): ["module: unknown"]
import torch
from torch.utils import ThroughputBenchmark
from torch.testing._internal.common_utils import run_tests, TestCase, TemporaryFileName
class TwoLayerNet(torch.jit.ScriptModule):
def __init__(self, D_in, H, D_out):
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(2 * H, D_out)
@torch.jit.script_method
def forward(self, x1, x2):
h1_relu = self.linear1(x1).clamp(min=0)
h2_relu = self.linear1(x2).clamp(min=0)
cat = torch.cat((h1_relu, h2_relu), 1)
y_pred = self.linear2(cat)
return y_pred
class TwoLayerNetModule(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(TwoLayerNetModule, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(2 * H, D_out)
def forward(self, x1, x2):
h1_relu = self.linear1(x1).clamp(min=0)
h2_relu = self.linear1(x2).clamp(min=0)
cat = torch.cat((h1_relu, h2_relu), 1)
y_pred = self.linear2(cat)
return y_pred
class TestThroughputBenchmark(TestCase):
def linear_test(self, Module, profiler_output_path=""):
D_in = 10
H = 5
D_out = 15
B = 8
NUM_INPUTS = 2
module = Module(D_in, H, D_out)
inputs = []
for i in range(NUM_INPUTS):
inputs.append([torch.randn(B, D_in), torch.randn(B, D_in)])
bench = ThroughputBenchmark(module)
for input in inputs:
# can do both args and kwargs here
bench.add_input(input[0], x2=input[1])
for i in range(NUM_INPUTS):
# or just unpack the list of inputs
module_result = module(*inputs[i])
bench_result = bench.run_once(*inputs[i])
torch.testing.assert_close(bench_result, module_result)
stats = bench.benchmark(
num_calling_threads=4,
num_warmup_iters=100,
num_iters=1000,
profiler_output_path=profiler_output_path,
)
print(stats)
def test_script_module(self):
self.linear_test(TwoLayerNet)
def test_module(self):
self.linear_test(TwoLayerNetModule)
def test_profiling(self):
with TemporaryFileName() as fname:
self.linear_test(TwoLayerNetModule, profiler_output_path=fname)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_throughput_benchmark.py
|
# Owner(s): ["module: vmap"]
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor, vmap
import functools
import itertools
import warnings
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
import types
FALLBACK_REGEX = r'There is a performance drop'
class EnableVmapFallbackWarnings:
def __enter__(self):
self.prev_state = torch._C._debug_only_are_vmap_fallback_warnings_enabled()
torch._C._debug_only_display_vmap_fallback_warnings(True)
def __exit__(self, *ignored):
torch._C._debug_only_display_vmap_fallback_warnings(self.prev_state)
class TestVmapAPI(TestCase):
def test_non_tensor_output_raises(self):
with self.assertRaisesRegex(ValueError, "got type <class 'float'> as the return"):
output = vmap(lambda x: 3.14)(torch.ones(3))
def multiple_outputs(x):
return x, 3
with self.assertRaisesRegex(ValueError, "got type <class 'int'> for return 1"):
vmap(multiple_outputs)(torch.ones(3))
def test_different_map_dim_size_raises(self):
x = torch.randn(2)
y = torch.randn(3)
expected_msg = 'Expected all tensors to have the same size in the mapped dimension'
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(torch.mul)(x, y)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
def test_func_with_no_inputs(self):
expected_msg = 'got no inputs'
def foo():
return torch.randn(3)
def bar(x):
return torch.randn(3)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(foo)()
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(bar)()
def test_constant_function(self):
output = vmap(lambda x: torch.tensor(3.14))(torch.ones(3))
self.assertEqual(output, torch.tensor([3.14, 3.14, 3.14]))
def test_single_input(self):
x = torch.randn(2, 3)
def square(x):
return x * x
output = vmap(square)(x)
self.assertEqual(output, x * x)
def test_multiple_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul)(x, y)
self.assertEqual(output, x * y)
def test_multiple_outputs(self):
def foo(x):
return x * x, x * x * x
x = torch.randn(3)
outputs = vmap(foo)(x)
self.assertEqual(outputs[0], x * x)
self.assertEqual(outputs[1], x * x * x)
def test_multiple_outputs_error_cases(self):
# This is the same thing as
# def returns_tuple_of_tensors(x):
# return x, x
def returns_tuple_of_tensors(x):
return (x, x)
def returns_list_of_two_tensors(x):
return [x, x]
def returns_list_of_one_tensor(x):
return [x]
x = torch.randn(3)
# should not throw
vmap(returns_tuple_of_tensors)(x)
# jax supports these, but we don't yet
msg = "must only return Tensors, got type <class 'list'>"
with self.assertRaisesRegex(ValueError, msg):
vmap(returns_list_of_two_tensors)(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(returns_list_of_one_tensor)(x)
def test_nested_with_same_map_dim(self):
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
output = vmap(vmap(torch.mul))(x, y)
self.assertEqual(output, x * y)
output = vmap(vmap(vmap(torch.mul)))(x, y)
self.assertEqual(output, x * y)
def test_nested_with_different_map_dim(self):
x = torch.randn(2, 3)
y = torch.randn(5, 3)
output = vmap(lambda x: vmap(lambda y: x * y)(y))(x)
self.assertEqual(output.shape, (2, 5, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
z = torch.randn(7, 3)
output = vmap(lambda x: vmap(lambda y: vmap(lambda z: x * y * z)(z))(y))(x)
self.assertEqual(output.shape, (2, 5, 7, 3))
self.assertEqual(output, x.view(2, 1, 1, 3) * y.view(5, 1, 3) * z)
def test_noop_in_inner_vmap(self):
x = torch.randn(3)
y = torch.randn(5)
output = vmap(lambda x: vmap(lambda y: x)(y))(x)
self.assertEqual(output, x.view(3, 1).expand(3, 5))
def test_unsupported_op_err_msg(self):
# Unsupported view op
tensor = torch.randn(2, 3)
msg = (
r"Batching rule not implemented for aten::.+; the "
r"fallback path doesn't work on out= or view ops"
)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(torch.ravel)(tensor)
def out_op(x, y):
return torch.abs(x, out=y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(out_op)(tensor, tensor)
tensor = torch.randn(2)
# The fallback doesn't support TensorList
with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
vmap(lambda t: torch.atleast_1d([t]))(tensor)
# Don't support non-tensor returns. This is a limitation of vmap;
# functions that don't return tensors must be special cased
with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
vmap(torch.Tensor.item)(tensor)
def test_nonzero_out_dims(self):
# Basic test
tensor = torch.randn(2, 3)
result = vmap(lambda x: x, out_dims=1)(tensor)
self.assertEqual(result, tensor.permute(1, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
# Test that the batch dimension gets permuted to dim 2
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 0, 3))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
# negative out_dim
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=-1)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 3, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
# check that out_dims works on ALL outputs
tensor = torch.randn(2, 3, 5, 7)
other = torch.randn(2, 3, 5, 7)
result = vmap(lambda x, y: (x, y), out_dims=2)(tensor, other)
self.assertEqual(result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3)))
# use out_dims with the maximum vmap-able tensor dims (64 dims)
ndims = 64
shape = [2] + [1] * (ndims - 1)
expected_shape = [1, 1, 2] + [1] * (ndims - 3)
tensor = torch.randn(shape)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result.shape, expected_shape)
# test something that is not the identity function
def foo(x, y):
return x, x * y, x * y * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=1)(x, y)
self.assertEqual(
result,
(x.permute(1, 0, 2), (x * y).permute(1, 0, 2), (x * y * y).permute(1, 0, 2)))
def test_multiple_out_dims(self):
def foo(x):
return x, x
def bar(x, y):
return x, x, x, x * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=(0, 1))(x)
self.assertEqual(result, (x, x.permute(1, 0, 2)))
result = vmap(bar, out_dims=(-1, 0, 1, 2))(x, y)
expected = (
x.permute(1, 2, 0),
x,
x.permute(1, 0, 2),
(x * y).permute(1, 2, 0),
)
self.assertEqual(result, expected)
def test_nested_out_dims(self):
y = torch.randn(2, 3, 5, 7)
# Inner vmap has non-zero out_dim
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y))(y)
self.assertEqual(result.shape, (2, 5, 3, 7))
self.assertEqual(result, y.permute(0, 2, 1, 3))
# all vmaps have non-zero out_dim
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y)
self.assertEqual(result.shape, (5, 2, 3, 7))
self.assertEqual(result, y.permute(2, 0, 1, 3))
# throwing in some negative out_dims
result = vmap(lambda y: vmap(lambda x: x, out_dims=-1)(y), out_dims=-1)(y)
self.assertEqual(result.shape, (5, 7, 3, 2))
self.assertEqual(result, y.permute(2, 3, 1, 0))
# testing fn that isn't the identity
x = torch.randn(2, 3)
y = torch.randn(5, 3)
result = vmap(lambda y: vmap(lambda x: x * y, out_dims=1)(x), out_dims=-1)(y)
self.assertEqual(result.shape, (3, 2, 5))
self.assertEqual(result, (y.view(5, 1, 3) * x).permute(2, 1, 0))
def test_out_dims_edge_case(self):
def foo(x):
return x
# Test that we accept out_dims=(1,) for a function with one output.
tensor = torch.randn(2, 3)
expected = vmap(foo, out_dims=1)(tensor)
result = vmap(foo, out_dims=(1,))(tensor)
self.assertEqual(result, expected)
def test_out_dims_must_be_int_or_tuple_of_int_err_msg(self):
msg = '`out_dims` must be an int or a tuple of int'
tensor = torch.randn(2, 3)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims='lol')(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=('lol',))(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=None)(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(None,))(tensor)
def test_out_dims_and_num_outputs_mismatch_err_msg(self):
msg = '`out_dims` must have one dim per output'
x = torch.randn(2, 3, 5)
# Too many out_dims
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0, 0, 0))(x)
# Too few out_dims
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x), out_dims=(0,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0))(x)
def test_out_dim_out_of_bounds_err_msg(self):
# TODO(rzou): This error message isn't that great. It comes straight
# from maybe_wrap_dim. Consider doing a try-catch-(add some context) to
# the error message in the future in C++
msg = 'Dimension out of range'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=3)(x)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=-4)(x)
def test_non_zero_in_dims(self):
tensor = torch.randn(2, 3, 5)
# Implicit out_dims = 0; vmap will move the batch dim to the front.
output = vmap(lambda x: x, (1,))(tensor)
self.assertEqual(output, tensor.permute(1, 0, 2))
self.assertEqual(output.data_ptr(), tensor.data_ptr())
x = torch.randn(2, 3)
y = torch.randn(3, 2)
output = vmap(torch.mul, (0, 1))(x, y)
self.assertEqual(output, x * y.t())
output = vmap(torch.mul, (1, 0))(x, y)
self.assertEqual(output, x.t() * y)
def test_none_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
# None in_dim for a Tensor means we don't map over it
output = vmap(torch.mul, (0, None))(x, y)
self.assertEqual(output.shape, (2, 2, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
# None in_dim for non-tensor arguments
output = vmap(torch.mul, (0, None))(x, 2)
self.assertEqual(output, x * 2)
def test_nested_non_default_in_dims(self):
x = torch.rand(5, 2, 3)
y = torch.rand(3, 5, 2)
result = vmap(vmap(vmap(torch.mul), (1, 0)), (1, 2))(x, y)
self.assertEqual(result, x.permute(1, 2, 0) * y.permute(2, 0, 1))
def test_non_default_in_dims_out_dims(self):
x = torch.randn(2, 3, 5)
# Same in_dim as out_dim, vmap over identity
result = vmap(lambda x: x, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x)
self.assertEqual(result.data_ptr(), x.data_ptr())
# Different in_dim from out_dim, vmap over identity
result = vmap(lambda x: x, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, x.transpose(1, 2))
self.assertEqual(result.data_ptr(), x.data_ptr())
def foo(x):
return x * 2
# Same in_dim as out_dim, vmap over operation
result = vmap(foo, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x * 2)
# Different in_dim as out_dim, vmap over operation
result = vmap(foo, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, (x * 2).transpose(1, 2))
# Basic nested test.
result = vmap(vmap(foo, 1, 1), 1, 1)(x)
self.assertEqual(result, x * 2)
def test_accepts_nested_inputs(self):
B0 = 2
x = torch.randn(2, 3)
y = torch.randn(2, 3)
# Single layer of nesting
out = vmap(lambda z: z[0] + z[1])((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1])([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'])({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=(0,))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
self.assertEqual(out, x + y)
# Multiple layers of nesting
out_fn = vmap(lambda z: z['x'][0] + z['x'][1][0] + z['y'][0] + z['y'][1])
out = out_fn({'x': [x, (x,)], 'y': [y, y]})
self.assertEqual(out, x + x + y + y)
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'expected `in_dims` to be int or a \(potentially nested\) tuple'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0, 0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, 'lol')(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y])
# The following should not throw
vmap(torch.mul, (0, 0))(x, y)
def test_not_enough_in_dims_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'in_dims is not compatible with the structure of `inputs`'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0,))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0, 0, 0))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0],))([x, y])
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))([x, y])
# The following should not throw
vmap(torch.mul, (0, 0))(x, y)
def test_integer_in_dim_but_not_tensor_input_err_msg(self):
def foo(xy):
return xy[0] * xy[1]
def bar(x, yz):
return x * yz[0] * yz[1]
x = torch.randn(2, 3)
y = torch.randn(2, 3)
# the following are errors in jax (and will always be errors)
msg = 'Got in_dim=0 for an input but the input is of type'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum)(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum, (0, 0))(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, 1])
# The following should not throw
vmap(torch.sum, (0, None))(x, 0)
def test_in_dim_not_in_tensor_err_msg(self):
def foo(x):
return x * x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
msg = r'Got in_dim=-?\w for some input, but that input is a Tensor of dimensionality \w'
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(0,))(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(-1,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(2,))(y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([3, 0],))([x, y])
# the following should not throw
vmap(foo, in_dims=(0,))(torch.randn(2, 3))
vmap(foo, in_dims=(1,))(torch.randn(2, 3))
def test_fallback_does_not_warn_by_default(self):
# NB: One day we will implement a batching rule for torch.atan2.
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = torch.atan2
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
result = vmap(op)(x, y)
# The single warning here is the "vmap is experimental"
# warning, not a warning from the vmap fallback path.
self.assertEqual(len(wa), 1)
def test_fallback_warns_when_warnings_are_enabled(self):
# NB: One day we will implement a batching rule for torch.atan2.
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = torch.atan2
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
with EnableVmapFallbackWarnings():
result = vmap(op)(x, y)
self.assertEqual(len(wa), 2)
self.assertRegex(str(wa[-1].message), FALLBACK_REGEX)
def _assert_uses_vmap_fallback(self, vmap_args, inputs):
with warnings.catch_warnings(record=True) as wa:
with EnableVmapFallbackWarnings():
result = vmap(*vmap_args)(*inputs)
self.assertEqual(len(wa), 2)
self.assertRegex(str(wa[-1].message), FALLBACK_REGEX)
def test_fallback_zero_dim(self):
# NB: One day we will implement a batching rule for torch.atan2.
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = torch.atan2
x = torch.randn(11)
y = torch.randn(11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0, B1 = 0, 3
x = torch.randn(B0, 11)
y = torch.randn(11)
msg = 'The fallback path does not support vmap over dims of size 0'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
x = torch.randn(B0, B1, 11)
y = torch.randn(B1, 11)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
def test_fallback_atan2(self):
# NB: One day we will implement a batching rule for torch.atan2.
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = torch.atan2
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
# fallback on torch.atan2
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(op, (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
# fallback on torch.atan2, nested vmap
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
# big batch size (total 10000)
x = torch.randn(100, 10, 10, 5)
y = torch.randn(100, 10, 10)
result = vmap(vmap(vmap(op)))(x, y)
self.assertEqual(result, op(x, y.view(100, 10, 10, 1)))
def test_fallback_masked_fill(self):
# NB: One day we will implement a batching rule for masked_fill
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(
x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
def test_fallback_multiple_returns(self):
# NB: One day we will implement a batching rule for torch.var_mean
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
B0, B1, B2 = 2, 3, 1237
tensor = torch.randn(B0, 10)
self._assert_uses_vmap_fallback((torch.var_mean,), (tensor,))
# fallback correctness on torch.var_mean
result = vmap(torch.var_mean)(tensor)
expected = torch.var_mean(tensor, dim=1)
self.assertEqual(result, expected)
# nested vmap
tensor = torch.randn(B0, B1, 10)
result = vmap(vmap(torch.var_mean))(tensor)
expected = torch.var_mean(tensor, dim=2)
self.assertEqual(result, expected)
# big batch size, nested vmap
tensor = torch.randn(B0, B1, B2, 10)
result = vmap(vmap(vmap(torch.var_mean)))(tensor)
expected = torch.var_mean(tensor, dim=3)
self.assertEqual(result, expected)
def test_inplace_fallback_unary(self):
# Test the in-place fallback on an in-place method that takes no
# additional Tensor arguments. This is the simplest case of the fallback.
# NB: One day we will implement a batching rule for acos_.
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = Tensor.acos_
B0, B1, B2 = 2, 3, 10000
x = torch.randn(B0, 5)
self._assert_uses_vmap_fallback((op,), (x,))
# Single vmap
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op)(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
# Single vmap + different out_dim produces a view(!)
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op, out_dims=(1,))(x)
self.assertTrue(result._base is x)
self.assertEqual(result, x_orig.t().acos())
# Nested vmap
x_orig = torch.randn(B0, B1, 5)
x = x_orig.clone()
result = vmap(vmap(op))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
# Nested vmap, large batch size
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
result = vmap(vmap(vmap(op)))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
def test_inplace_fallback_nary_same_levels(self):
# NB: One day we will implement a batching rule for atan2_
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = Tensor.atan2_
outplace_op = torch.atan2
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
# Single vmap
B0 = 5
x_orig = torch.randn(7, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, 7, 11)
vmap(op, (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim(0, 2)))
# Nested vmap
B0, B1 = 5, 7
x_orig = torch.randn(B1, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, B1, 11)
vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim([0, 1], [2, 0])))
# big batch size (total 10000)
B0, B1, B2 = 100, 10, 10
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
y = torch.randn(B0, B1, B2)
result = vmap(vmap(vmap(op)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, B1, B2, 1)))
def test_inplace_fallback_nary_different_levels(self):
# NB: One day we will implement a batching rule for atan2_
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = Tensor.atan2_
outplace_op = torch.atan2
B0, B1, B2 = 2, 3, 5
x = torch.rand(B0, 7)
y = torch.rand(7)
self._assert_uses_vmap_fallback((op, (0, None)), (x, y))
# op(left, right): All of the levels in right are found in left
x_orig = torch.rand(B0, 7)
x = x_orig.clone()
y = torch.rand(7)
vmap(op, in_dims=(0, None))(x, y)
self.assertEqual(x, outplace_op(x_orig, y))
x_orig = torch.rand(B0, B1, 7)
x = x_orig.clone()
y = torch.rand(B0, 7)
vmap(vmap(op, in_dims=(0, None)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, 1, 7)))
# op(left, right): Some of the levels in right are not found in left
msg = r'vmap: aten::atan2_\(self, \*extra_args\) is not possible'
x = torch.rand(7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(7, B0)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 1))(x, y)
x = torch.rand(B0, 7)
y = torch.rand(B0, B1, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(None, 0)))(x, y)
def test_backward_unsupported_interaction(self):
x = torch.randn(3, requires_grad=True)
y = torch.randn(5)
grad = torch.randn_like(x)
err_msg = r'backward\(\) called inside torch.vmap'
def backward_on_vmapped_tensor(x):
x.sum().backward()
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_on_vmapped_tensor)(x)
def backward_with_vmapped_grad(x, grad):
x.backward(grad)
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_with_vmapped_grad)(x, grad)
def completely_unrelated_backward(y):
x.sum().backward()
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(completely_unrelated_backward)(y)
def test_grad_unsupported_interaction(self):
input_tensor = torch.randn(3, requires_grad=True)
err_msg = 'autograd.grad.* called inside torch.vmap'
captured = torch.randn(3, requires_grad=True)
def output_to_grad_is_vmapped(input_tensor):
output = (captured * input_tensor).sum()
return torch.autograd.grad([output], [captured])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(output_to_grad_is_vmapped)(input_tensor)
output = (input_tensor ** 2).sum()
def input_to_grad_is_vmapped(input_tensor):
return torch.autograd.grad([output], [input_tensor])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(input_to_grad_is_vmapped)(input_tensor)
def test_batched_gradient_basic(self):
N = 3
x = torch.randn(N, requires_grad=True)
y = torch.randn(N)
def vjp_mul(v):
return torch.autograd.grad([x * y], [x], grad_outputs=[v])[0]
batched_v = torch.eye(N)
jacobian = vmap(vjp_mul)(batched_v)
self.assertEqual(jacobian, torch.diagflat(y))
def test_functools_partial(self):
x = torch.randn(3)
y = torch.randn(2, 3)
result = vmap(functools.partial(torch.mul, x))(y)
self.assertEqual(result, x * y)
def test_nn_module(self):
tensor = torch.randn(2, 3)
model = torch.nn.Linear(3, 3, bias=False)
result = vmap(model)(tensor)
self.assertEqual(result, model(tensor))
def test_fallback_with_undefined_grad(self):
B0 = 7
x = torch.randn(2, 3, 4, 5, requires_grad=True)
weight = torch.randn(3, 3, 1, 1)
v = torch.randn(B0, 2, 3, 4, 5)
def get_vjp(v):
result = torch.nn.functional.conv2d(x, weight)
grad_x, = torch.autograd.grad(result, x, v)
return grad_x
# Runs vmap(get_vjp)(v), which should not error out.
# The backward formula for convolution returns an undefined
# Tensor for grad_bias because the original bias does not exist.
#
# In the future we'll probably add a batching rule for convolution
# backward. When this happens, we should modify this test to use a
# different op (and/or create and use a dummy operator) to avoid bitrot.
self._assert_uses_vmap_fallback([get_vjp], [v])
def slice_inputs(inputs, bdims, i):
result = []
for inp, bdim in zip(inputs, bdims):
if bdim is None:
result.append(inp)
else:
result.append(inp.select(bdim, i))
return tuple(result)
def reference_vmap(op, inputs, in_dims=0, out_dims=0):
if isinstance(in_dims, int):
in_dims = (in_dims,) * len(inputs)
bdim_sizes = [inp.size(dim) for inp, dim in zip(inputs, in_dims) if dim is not None]
assert all(bdim_size == bdim_sizes[0] for bdim_size in bdim_sizes)
bdim_size = bdim_sizes[0]
results = tuple(op(*slice_inputs(inputs, in_dims, i)) for i in range(bdim_size))
assert len(results) > 0
op_has_single_return = not isinstance(results[0], tuple)
if op_has_single_return:
assert all(isinstance(result, torch.Tensor) for result in results)
if isinstance(out_dims, int):
out_dims = (out_dims,) * 1
return torch.stack(results, dim=out_dims[0])
assert all(isinstance(result, tuple) for result in results)
num_returns = len(results[0])
assert all(len(result) == num_returns for result in results)
if isinstance(out_dims, int):
out_dims = (out_dims,) * num_returns
return tuple(torch.stack(result_shards, out_dim)
for result_shards, out_dim in zip(zip(*results), out_dims))
class TensorFactory:
@staticmethod
def rand(size, device='cpu', dtype=torch.float):
return torch.rand(size, device=device, dtype=dtype)
@staticmethod
def randn(size, device='cpu', dtype=torch.float):
return torch.randn(size, device=device, dtype=dtype)
@staticmethod
def randp1(size, device='cpu', dtype=torch.float):
return torch.rand(size, device=device, dtype=dtype) + 1
# Tests vmap(op, in_dims, out_dims)(*inputs) by comparing the output to a
# (slow) sequential map+stack fallback.
#
# check_view: Test if the first returned output is a view of the first input
# check_propagates_grad: Test if the operation propagates gradients.
def _vmap_test(self, op, inputs, in_dims=0, out_dims=0,
check_view=False, check_propagates_grad=True):
result = vmap(op, in_dims, out_dims)(*inputs)
reference_result = reference_vmap(op, inputs, in_dims, out_dims)
self.assertEqual(result, reference_result)
op_has_single_return = not isinstance(result, tuple)
if check_view:
result_as_tuple = (result,) if op_has_single_return else result
for output in result_as_tuple:
input0_base = inputs[0] if inputs[0]._base is None else inputs[0]._base
self.assertTrue(output._base is input0_base,
msg="result was not a view of the first input!")
if not check_propagates_grad:
return
# Assuming input[0] is a floating-point tensor. Check if the vmap
# operation propagates the requires_grad flag to the zeroth output.
# Some vmap operators are implemented in a way that assumes that
# they are composite with respect to autograd. If the operator ever is
# changed to not be composite with respect to autograd, then the
# following check should fail.
inputs_clone = list(inputs)
inputs_clone[0] = inputs[0].clone().requires_grad_()
result = vmap(op, in_dims, out_dims)(*inputs_clone)
result_as_tuple = (result,) if op_has_single_return else result
self.assertTrue(result[0].requires_grad)
def should_allow_vmap_fallback_usage(fn):
return getattr(fn, '_allow_vmap_fallback_usage', False)
def allowVmapFallbackUsage(fn):
fn._allow_vmap_fallback_usage = True
return fn
# All tests of TestVmapBase check that the slow vmap fallback is never invoked.
# This is so that we can incrementally add batching rules for operators to
# replace the slow vmap fallback path for said operators. To skip this check,
# please use the allowVmapFallbackUsage decorator.
#
# NB: Don't add tests to TestVmapBase directly, unless you want them to run
# on every subclass of TestVmapBase. Add them to e.g. TestVmapOperators.
#
# NB: TestVmapBase is a nested class. This prevents test runners from picking
# it up and running it.
class Namespace:
class TestVmapBase(TestCase):
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is None:
return
if not should_allow_vmap_fallback_usage(test_method):
setattr(self, method_name,
self._wrap_method_with_vmap_fallback_check(test_method))
def _wrap_method_with_vmap_fallback_check(self, method):
msg = (
'Expected the test to not invoke the vmap fallback path, i.e., '
'all of the operators being tested in this test should have batching '
'rules implemented. If you are intentionally testing something to '
'do with the fallback path, use allowVmapFallbackUsage. Otherwise, '
'please make sure that batching rules are implemented for the '
'operator(s) being tested.'
)
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
with warnings.catch_warnings(record=True) as wa:
warnings.simplefilter('always')
with EnableVmapFallbackWarnings():
method(*args, **kwargs)
for captured_warning in wa:
self.assertNotRegex(str(captured_warning.message), FALLBACK_REGEX, msg)
return types.MethodType(wrapper, self)
@allowVmapFallbackUsage
def test_vmap_fallback_check_ok(self):
# One day we'll implement a batching rule for torch.var_mean.
# When that happens, please change the example to use an
# operator that doesn't have a batching rule implemented.
op_using_fallback = torch.var_mean
vmap(op_using_fallback)(torch.rand(3))
def test_vmap_fallback_check(self):
@self._wrap_method_with_vmap_fallback_check
def no_fallback(self):
pass
# One day we'll implement a batching rule for torch.var_mean.
# When that happens, please change the example to use an
# operator that doesn't have a batching rule implemented.
op_using_fallback = torch.var_mean
@self._wrap_method_with_vmap_fallback_check
def uses_fallback(self):
vmap(op_using_fallback)(torch.rand(3))
no_fallback(self)
with self.assertRaises(AssertionError):
uses_fallback(self)
class TestVmapOperators(Namespace.TestVmapBase):
def _vmap_test(self, *args, **kwargs):
return _vmap_test(self, *args, **kwargs)
def _vmap_view_test(self, *args, **kwargs):
self._vmap_test(*args, **kwargs, check_view=True)
def _test_unary(self, op, getter, device, *args, **kwargs):
test = functools.partial(self._vmap_test, *args, **kwargs)
B0, B1 = 7, 11
# Single vmap, various in_dims / out_dims
test(op, [getter([B0, 3], device)])
test(op, [getter([2, 5, B0, 3], device)], in_dims=2)
test(op, [getter([2, 5, B0, 3], device)], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [getter([B0, B1], device)])
test(vmap(op), [getter([B1, 2, 5, B0, 3], device)], in_dims=2)
test(vmap(op, in_dims=2), [getter([2, 5, B0, B1, 3], device)],
in_dims=2, out_dims=2)
def test_unary_pointwise_ops(self):
cases = [
(torch.abs, TensorFactory.randn),
(torch.acos, TensorFactory.rand),
(torch.asin, TensorFactory.rand),
(torch.atan, TensorFactory.rand),
(torch.ceil, TensorFactory.randn),
(torch.cos, TensorFactory.rand),
(torch.cosh, TensorFactory.rand),
(torch.digamma, TensorFactory.rand),
(torch.exp, TensorFactory.randn),
(torch.expm1, TensorFactory.randn),
(torch.floor, TensorFactory.randn),
(torch.frac, TensorFactory.randn),
(torch.lgamma, TensorFactory.rand),
(torch.log, TensorFactory.randp1),
(torch.log10, TensorFactory.randp1),
(torch.log1p, TensorFactory.randp1),
(torch.log2, TensorFactory.randp1),
(torch.neg, TensorFactory.randn),
(torch.reciprocal, TensorFactory.randp1),
(torch.relu, TensorFactory.randn),
(torch.round, TensorFactory.randn),
(torch.rsqrt, TensorFactory.randp1),
(torch.sigmoid, TensorFactory.randn),
(torch.sign, TensorFactory.randn),
(torch.sin, TensorFactory.rand),
(torch.sinh, TensorFactory.rand),
(torch.sqrt, TensorFactory.rand),
(torch.tan, TensorFactory.rand),
(torch.tanh, TensorFactory.rand),
(torch.trunc, TensorFactory.randn),
]
for op, getter in cases:
self._test_unary(op, getter, 'cpu')
def test_clone(self):
# Some basic tests
self._test_unary(lambda x: x.clone(), TensorFactory.randn, 'cpu')
self._test_unary(lambda x: x.clone(memory_format=torch.preserve_format),
TensorFactory.randn, 'cpu')
self._test_unary(lambda x: x.clone(memory_format=torch.contiguous_format),
TensorFactory.randn, 'cpu')
# Test that the per-examples are contiguous when using torch.contiguous_format
def clone_contiguous(x):
return x.clone(memory_format=torch.contiguous_format)
B0, B1 = 3, 5
x = torch.randn(2, B0, 7)
y = vmap(clone_contiguous, in_dims=1, out_dims=1)(x)
self.assertTrue(y.movedim(1, 0).is_contiguous())
self.assertTrue(y[:, 0, :].is_contiguous())
x = torch.randn(2, B0, 7, B1)
y = vmap(vmap(clone_contiguous, in_dims=2), in_dims=1)(x)
self.assertTrue(y.is_contiguous())
self.assertTrue(y[0][0].is_contiguous())
msg = r'only supported with memory_format torch.preserve_format or torch.contiguous_format'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(lambda x: x.clone(memory_format=torch.channels_last))(torch.randn(B0))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(lambda x: x.clone(memory_format=torch.channels_last_3d))(torch.randn(B0))
def test_binary_pointwise_ops(self):
def get_number(getter):
return getter([]).item()
def make_case(op, input_getter=TensorFactory.randn):
return (op, input_getter)
cases = [
# Basic arithmetic
make_case(torch.add),
make_case(lambda x, y: x + y),
make_case(torch.sub),
make_case(lambda x, y: x - y),
make_case(torch.mul),
make_case(lambda x, y: x * y),
make_case(torch.div, input_getter=TensorFactory.randp1),
make_case(lambda x, y: x / y, input_getter=TensorFactory.randp1),
make_case(torch.pow, input_getter=TensorFactory.randp1),
make_case(lambda x, y: x ** y, input_getter=TensorFactory.randp1),
]
test = self._vmap_test
for op, getter in cases:
device = 'cpu'
B0, B1 = 7, 11
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3], device), getter([B0, 3], device)))
test(op, (getter([B0], device), getter([B0, 2, 3], device)))
test(op, (getter([B0], device), getter([2, B0, 3], device)), in_dims=(0, 1))
test(op, (getter([B0], device), getter([2, B0, 3], device)),
in_dims=(0, 1), out_dims=1)
test(op, (getter([B0], device), getter([2, 3], device)), in_dims=(0, None))
test(op, (getter([2, 3], device), getter([B0, 3], device)), in_dims=(0, None))
# Nested vmap: op(Tensor, Tensor)
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 3], device)))
test(vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3], device), getter([B1, 3], device)), in_dims=(0, None))
# Python number overload: op(Tensor, Number) (and vice-versa)
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device)
number = get_number(getter)
self._test_unary(lambda t: op(number, t), getter, device)
# Type promotion: op(Logical Scalar Tensor, Logical Scalar Tensor)
test(op, (getter([B0], device), getter([B0], device, dtype=torch.double)))
test(op, (getter([B0], device, dtype=torch.double), getter([B0], device)))
test(op, (getter([B0], device), getter([B0], device)))
# Type promotion: op(Tensor, Logical Scalar Tensor) (and vice-versa)
test(op, (getter([B0, 2], device), getter([B0], device, torch.double)))
test(op, (getter([B0], device, torch.double), getter([B0, 2], device)))
if not torch.cuda.is_available():
continue
# TODO(rzou): fix the following
# # Test cross-device scalars
# number = get_number(getter)
# self._test_unary(lambda t: op(t, number), getter, device='cuda')
# self._test_unary(lambda t: op(number, t), getter, device='cuda')
# self._test_unary(lambda t: op(t, torch.tensor(number)), getter, device='cuda')
def test_as_strided(self):
def _test(sizes, strides, offset, tensor, lambd):
result = vmap(lambda t: t.as_strided(sizes, strides, offset))(tensor)
expected = vmap(lambd)(tensor)
self.assertTrue(result._base is expected._base)
self.assertEqual(result, expected)
# single vmap test
B0 = 5
tensors = [
# contiguous
torch.randn(B0, 2, 3),
# non-contiguous
torch.randn(B0, 3, 2).transpose(1, 2),
# non-zero storage offset
torch.randn(2, B0, 2, 3)[1],
# non-contiguous strides, zero storage offset
torch.randn(B0, 2, 4, 3, 7)[:, :, 0, :, 0],
# non-contiguous strides, non-zero storage offset
torch.randn(B0, 2, 4, 3, 7)[:, :, 2, :, 1],
]
for x in tensors:
S0, S1 = x.stride()[1:]
offset = x.storage_offset()
# Broadcast
_test([5, 5, 2, 3], [0, 0, S0, S1], offset, x, lambda x: x.expand(5, 5, 2, 3))
# transpose
_test([3, 2], [S1, S0], offset, x, lambda x: x.transpose(0, 1))
# select
_test([2], [S0], offset + S1, x, lambda x: x[:, 1])
# Nested vmap test
B1 = 7
x = torch.randn(B1, B0, 2, 3)
S0, S1 = x.stride()[2:]
result = vmap(vmap(lambda t: t.as_strided([5, 5, 2, 3], [0, 0, S0, S1])), in_dims=1)(x)
expected = vmap(vmap(lambda t: t.expand(5, 5, 2, 3)), in_dims=1)(x)
self.assertTrue(result._base is expected._base)
self.assertEqual(result, expected)
# Check that mal-formatted size/strides doesn't crash
with self.assertRaisesRegex(RuntimeError, 'size and stride must have the same length'):
x = torch.randn(B0, 2, 3).transpose(0, 1)
vmap(lambda x: x.as_strided([1, 1, 1], [1, 1]))(x)
# Sanity check #1: we require the batch dims to be at the front of the
# tensor (in memory layout).
msg = 'batch dims being vmapped over are at the front of the tensor'
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(2, B0, 3).transpose(0, 1)
vmap(lambda x: x.as_strided([2, 3], [B0 * 3, 1]))(x)
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 2, 3, B1).movedim(3, 1)
vmap(vmap(lambda x: x.as_strided([2, 3], [B1 * 3, B1])))(x)
# All the Sanity check #2{a,b,c} cases check that
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# doesn't index memory that is out of bounds of xs[i]. This condition
# is important to the correctness of the as_strided batching rule
# (see NOTE: [When will the as_strided_batching_rule fail?])
# Sanity check #2a: The maximum indexable location of
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# is less than or equal to the maximum indexable location of xs[i].
msg = 'This is not supported inside of vmap'
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 3)
vmap(lambda x: x.as_strided([3], [1], 1))(x)
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 3, 5)
vmap(lambda x: x.as_strided([4, 4], [4, 1], 0))(x)
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, B1, 3, 5)
vmap(vmap(lambda x: x.as_strided([4, 4], [4, 1], 0)))(x)
# Sanity check #2b: The min indexable location of
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# is greater than or equal to the min indexable location of xs[i].
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(2, B0, 3)[1]
vmap(lambda x: x.as_strided([3], [1], B0 * 3 - 1))(x)
# Sanity check #2c:
# xs[i] is a zero-dim tensor, but
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# is not
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 0, 3)
vmap(lambda x: x.as_strided([3], [1]))(x)
def test_bmm(self):
op = torch.bmm
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = "Shape mismatch"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 3, 3, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2, 2, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 2, 3, 5), torch.rand(2, 5, 3)), in_dims=(0, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 2, 3, 5), torch.rand(2, 5, 3)),
in_dims=(1, None))
# right arg is vmapped
test(op, (torch.rand(2, 5, 3), torch.rand(B0, 2, 3, 5)), in_dims=(None, 0))
test(vmap(op, in_dims=(None, 0)), (torch.rand(2, 5, 3), torch.rand(B1, B0, 2, 3, 5)),
in_dims=(None, 1))
# both args are vmapped
test(op, (torch.rand(B0, 2, 3, 5), torch.rand(B0, 2, 5, 3)))
test(vmap(op), (torch.rand(B1, B0, 2, 3, 5), torch.rand(B0, B1, 2, 5, 3)), in_dims=(1, 0))
test(vmap(op, in_dims=(0, None)),
(torch.rand(B1, 2, 3, 5), torch.rand(B0, 2, 5, 3)), in_dims=(None, 0))
def test_cat(self):
test = self._vmap_test
B0, B1 = 5, 7
# Quick hack b/c vmap can't accept a list of tensors as an argument
def get_op(dim):
def op(*tensors):
return torch.cat(tensors, dim=dim)
return op
test(get_op(0), (torch.rand(B0, 2), torch.rand(B0, 3)))
test(get_op(0), (torch.rand(2), torch.rand(B0, 3)), in_dims=(None, 0))
test(get_op(0), (torch.rand(2, 17), torch.rand(3, 17, B0)), in_dims=(None, 2))
test(get_op(-1), (torch.rand(17, 2), torch.rand(17, 3, B0)), in_dims=(None, 2))
test(vmap(get_op(0), in_dims=(0, None)),
(torch.rand(B1, 2), torch.rand(B0, 3)), in_dims=(None, 0))
test(vmap(get_op(0), in_dims=(0, 0)),
(torch.rand(B1, 2), torch.rand(B0, B1, 3)), in_dims=(None, 0))
def test_conj(self):
op = torch.conj
def run_test(dtype):
def get(shape):
return torch.randn(shape, dtype=dtype)
B0, B1 = 7, 11
test = self._vmap_test
# Single vmap, various in_dims / out_dims
test(op, [get([B0, 3])])
test(op, [get([2, 5, B0, 3])], in_dims=2)
test(op, [get([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [get([B0, B1])])
test(vmap(op), [get([B1, 2, 5, B0, 3])], in_dims=2)
test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3])],
in_dims=2, out_dims=2)
# correctness tests
run_test(torch.float)
run_test(torch.cfloat)
# check that torch.conj on a non-complex tensor returns the same tensor
real_tensor = torch.randn(3)
result = vmap(op)(real_tensor)
self.assertEqual(result.data_ptr(), real_tensor.data_ptr())
def test_contiguous(self):
op = Tensor.contiguous
self._test_unary(op, TensorFactory.randn, 'cpu')
# check that contiguous returns the original tensor if the per-examples
# are already contiguous
B0 = 3
x = torch.randn(B0, 2, 5, 7)
x = x.movedim(0, 2)
result = vmap(Tensor.contiguous, in_dims=2, out_dims=2)(x)
self.assertTrue(result is x)
msg = 'NYI: querying is_contiguous inside of vmap for memory_format'
tensor = torch.randn(B0, 3)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(op, memory_format=torch.channels_last))(tensor)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(op, memory_format=torch.channels_last_3d))(tensor)
def test_stride(self):
B0 = 3
x = torch.randn(B0, 2, 5, 7)
def foo(x):
assert x.stride() == (7 * 5, 7, 1)
return x
vmap(foo)(x)
x = torch.randn(2, B0, 5, 7).movedim(1, 0)
def bar(x):
assert x.stride() == (7 * 5 * B0, 7, 1)
return x
vmap(bar)(x)
def test_chunk(self):
test = self._vmap_view_test
op = torch.chunk
B0, B1, B2 = 7, 11, 13
# tests for torch.split(self, split_size: int, dim)
test(op, (torch.rand(B0, 2, 1024), 15, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 9, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 4, 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
def test_clamp(self):
clamp_cases = (
(lambda t: t.clamp(min=-0.5), TensorFactory.randn),
(lambda t: t.clamp(max=0.5), TensorFactory.randn),
(lambda t: t.clamp(min=-0.5, max=0.5), TensorFactory.randn),
(lambda t: t.clamp_min(min=-0.5), TensorFactory.randn),
(lambda t: t.clamp_max(max=0.5), TensorFactory.randn),
)
for op, getter in clamp_cases:
self._test_unary(op, getter, 'cpu')
def test_comparison_ops(self):
test = functools.partial(self._vmap_test, check_propagates_grad=False)
getter = TensorFactory.randn
B0, B1 = 7, 11
ops = (
torch.eq, lambda x, y: x == y,
torch.gt, lambda x, y: x > y,
torch.ge, lambda x, y: x >= y,
torch.le, lambda x, y: x <= y,
torch.lt, lambda x, y: x < y,
torch.ne, lambda x, y: x != y,
)
for op in ops:
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3]), getter([B0, 3])))
test(op, (getter([B0]), getter([B0, 2, 3])))
test(op, (getter([B0]), getter([2, B0, 3])), in_dims=(0, 1))
test(op, (getter([B0]), getter([2, B0, 3])), in_dims=(0, 1), out_dims=1)
test(op, (getter([B0]), getter([2, 3])), in_dims=(0, None))
test(op, (getter([2, 3]), getter([B0, 3])), in_dims=(0, None))
# Nested vmap: op(Tensor, Tensor)
test(vmap(op), (getter([B0, B1, 2, 3]), getter([B0, B1, 3])))
test(vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3]), getter([B1, 3])), in_dims=(0, None))
# test number as inputs
number = getter([]).item()
self._test_unary(lambda t: op(t, number), getter, 'cpu', check_propagates_grad=False)
def test_diagonal(self):
tensor = torch.randn(3, 5, 7, 11, 13)
test = self._vmap_view_test
op = torch.diagonal
test(op, (tensor, 1, 0, 1), in_dims=(0, None, None, None))
test(op, (tensor, 0, 2, -1), in_dims=(0, None, None, None))
test(op, (tensor, 2, 1, 2), in_dims=(1, None, None, None))
test(op, (tensor, 0, -2, -1), in_dims=(1, None, None, None), out_dims=1)
test(vmap(lambda t: op(t, 0, 0, -1)), (tensor,), in_dims=1, out_dims=1)
test(vmap(vmap(lambda t: op(t, 0, 0, 1), in_dims=1), in_dims=3),
(tensor,), in_dims=1, out_dims=1)
def test_dot(self):
op = torch.dot
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = "Shape mismatch"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 5), torch.rand(5)), in_dims=(0, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 5), torch.rand(5)),
in_dims=(1, None))
# right arg is vmapped
test(op, (torch.rand(5), torch.rand(B0, 5)), in_dims=(None, 0))
test(vmap(op, in_dims=(None, 0)), (torch.rand(5), torch.rand(B1, B0, 5)),
in_dims=(None, 1))
# both args are vmapped
test(op, (torch.rand(B0, 5), torch.rand(B0, 5)))
test(vmap(op), (torch.rand(B1, B0, 5), torch.rand(B0, B1, 5)), in_dims=(1, 0))
test(vmap(op, in_dims=(0, None)),
(torch.rand(B1, 5), torch.rand(B0, 5)), in_dims=(None, 0))
def test_expand_as(self):
op = torch.Tensor.expand_as
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 1, 5), torch.rand(B0, 2, 3, 5)))
test(op, (torch.rand(B0, 1, 5), torch.rand(2, 3, 5)), in_dims=(0, None))
test(op, (torch.rand(1, 5), torch.rand(B0, 2, 3, 5)), in_dims=(None, 0))
test(vmap(op), (torch.rand(B0, B1, 1, 5), torch.rand(B0, B1, 2, 3, 5)))
test(vmap(op), (torch.rand(B0, B1, 1, 5), torch.rand(B1, B0, 2, 3, 5)), in_dims=(0, 1))
test(vmap(op), (torch.rand(B0, B1), torch.rand(B1, 2, 3, 5)), in_dims=(0, None))
test(vmap(vmap(op)), (torch.rand(B0, B1, B2), torch.rand(B0, B1, B2, 2, 3, 5)))
def test_fill_and_zero_inplace(self):
test = functools.partial(self._vmap_test, check_propagates_grad=False)
B0, B1 = 7, 11
ops = (
lambda t: t.fill_(0.1),
lambda t: t.fill_(torch.tensor(0.2)),
lambda t: t.zero_(),
)
for op in ops:
# Single vmap, various in_dims / out_dims
test(op, [TensorFactory.randn([B0, 3])])
test(op, [TensorFactory.randn([2, 5, B0, 3])], in_dims=2)
test(op, [TensorFactory.randn([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [TensorFactory.randn([B0, B1])])
test(vmap(op), [TensorFactory.randn([B1, 2, 5, B0, 3])], in_dims=2)
test(vmap(op, in_dims=2), [TensorFactory.randn([2, 5, B0, B1, 3])],
in_dims=2, out_dims=2)
# test when value is a batched tensor for fill_ operator
B0, B1 = 3, 5
test(Tensor.fill_, [TensorFactory.randn([B0, B1]), TensorFactory.randn(B0)])
with self.assertRaisesRegex(RuntimeError,
r"output with shape .+ doesn't match the broadcast shape"):
# Runtime Error is thrown when the tensor being written to isn't being vmapped over
vmap(Tensor.fill_, (None, 0))(TensorFactory.randn([B0, B1]),
TensorFactory.randn([B0]))
def _test_complex_views(self, op, dtypes):
test = self._vmap_view_test
def run_test(op, dtype):
def get(shape):
return torch.randn(shape, dtype=dtype)
B0, B1 = 7, 11
# Single vmap, various in_dims / out_dims
test(op, [get([B0, 3])])
test(op, [get([3, B0])], in_dims=1)
test(op, [get([2, 5, B0, 3])], in_dims=2)
test(op, [get([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [get([B0, B1])])
test(vmap(op), [get([B1, 2, 5, 3, B0])], in_dims=4)
test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3])],
in_dims=2, out_dims=2)
for dtype in dtypes:
run_test(op, dtype)
def test_real(self):
self._test_complex_views(torch.real, dtypes=[torch.cfloat, torch.cdouble])
def test_imag(self):
self._test_complex_views(torch.imag, dtypes=[torch.cfloat, torch.cdouble])
def test_view_as_real(self):
self._test_complex_views(torch.view_as_real, dtypes=[torch.cfloat, torch.cdouble])
def test_view_as_complex(self):
def run_test(dtype):
def get(shape):
return torch.randn(shape, dtype=dtype)
op = torch.view_as_complex
test = self._vmap_view_test
B0, B1 = 7, 11
# Single vmap, various in_dims / out_dims
test(op, [get([B0, 3, 2])])
test(op, [get([2, 5, B0, 3, 2])], in_dims=2)
test(op, [get([2, 5, B0, 3, 2])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [get([B0, B1, 2])])
test(vmap(op), [get([B1, 2, 5, B0, 3, 2])], in_dims=2)
test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3, 2])],
in_dims=2, out_dims=2)
# Interesting case #1: Batch dim directly before dim of size 2
test(op, [get([3, B0, 2])], in_dims=1)
test(vmap(op, in_dims=1), [get([3, B1, B0, 2])], in_dims=2)
# Interesting case #2: Batch dim at end of tensor, success cases
# view_as_complex requires that the dim with size 2 have stride 1
# in order for the view to function propertly
test(op, [get([B0, 2]).transpose(0, 1)], in_dims=1)
test(vmap(op, in_dims=1), [get([B0, B1, 2]).movedim(1, 2)])
test(vmap(op, in_dims=2), [get([B0, 3, B1, 2]).movedim(2, 3)])
# Interesting case #3: Batch dim at end of tensor, failure cases
msg = "Tensor must have a last dimension with stride 1"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=1)(get([2, B0]))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=1), in_dims=1)(get([2, B0, B1]))
# Invalid input: no dimension of size 2
msg = 'Input tensor must have one or more dimensions'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(get([B0]))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op))(get([B0, B1]))
# Invalid input: Batch dim has size 2, but the logical last dim does
# not have size 2
msg = 'Tensor must have a last dimension of size 2'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=1)(get([3, 2]))
for dtype in [torch.float, torch.double]:
run_test(dtype)
def test_is_complex(self):
ctensor = torch.randn(3, dtype=torch.cfloat)
tensor = torch.randn(3)
def foo(x):
if x.is_complex():
return torch.tensor(1)
else:
return torch.tensor(0)
self.assertEqual(vmap(foo)(ctensor), torch.tensor([1, 1, 1]))
self.assertEqual(vmap(foo)(tensor), torch.tensor([0, 0, 0]))
def test_is_floating_point(self):
float_tensor = torch.tensor([1., 2., 3.])
long_tensor = torch.tensor([1, 2, 3])
def foo(x):
if x.is_floating_point():
return torch.tensor(1)
else:
return torch.tensor(0)
self.assertEqual(vmap(foo)(float_tensor), torch.tensor([1, 1, 1]))
self.assertEqual(vmap(foo)(long_tensor), torch.tensor([0, 0, 0]))
def test_is_contiguous(self):
def foo(x):
if x.is_contiguous():
return torch.tensor(1.)
else:
return torch.tensor(0.)
B0, B1 = 3, 5
# Single batch dim
contig = torch.randn(B0, 2, 7)
self.assertEqual(vmap(foo)(contig), torch.ones(B0))
noncontig = torch.randn(2, B0, 7)
self.assertEqual(vmap(foo, in_dims=1)(noncontig), torch.zeros(B0))
noncontig = torch.randn(2, B0, 7).movedim(1, 0)
self.assertEqual(vmap(foo)(noncontig), torch.zeros(B0))
noncontig = torch.randn(2, 7, B0)
self.assertEqual(vmap(foo, in_dims=2)(noncontig), torch.zeros(B0))
# Multiple batch dims
contig = torch.randn(B0, B1, 3)
self.assertEqual(vmap(vmap(foo))(contig), torch.ones(B0, B1))
contig = torch.randn(B1, B0, 3)
self.assertEqual(vmap(vmap(foo), in_dims=1)(contig), torch.ones(B0, B1))
contig = torch.randn(B1, B0, 3).movedim(0, 1)
self.assertEqual(vmap(vmap(foo))(contig), torch.ones(B0, B1))
noncontig = torch.randn(B0, 3, B1)
self.assertEqual(vmap(vmap(foo, in_dims=1))(noncontig), torch.zeros(B0, B1))
# is_contiguous on empty tensor is True
def bar(x):
assert x.is_contiguous()
return x
vmap(bar)(torch.randn(B0, 0, 3))
vmap(bar, in_dims=1)(torch.randn(0, B0, 3))
vmap(bar)(torch.randn(B0, 0, 3).mT)
# is_contiguous with other memory formats
def baz(x, memory_format):
x.is_contiguous(memory_format=memory_format)
return x
msg = 'NYI: querying is_contiguous inside of vmap for memory_format'
tensor = torch.randn(B0, 2, 7, 3)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(baz, memory_format=torch.channels_last))(tensor)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(baz, memory_format=torch.channels_last_3d))(tensor)
def test_movedim(self):
op = torch.movedim
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
# movedim(tensor, int, int) variant
test(op, (torch.rand(B0, 2, 5), 0, 1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 5), 0, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 2, B0, 5), 0, 1), in_dims=(2, None, None))
test(vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5, B2), 0, 1), in_dims=(2, None, None))
# movedim(tensor, intlist, intlist) variant
test(op, (torch.rand(B0, 2, 3, 5), [1, 0], [0, 2]), in_dims=(0, None, None))
test(op, (torch.rand(2, 3, B0, 5), [1, 0], [0, 2]), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5), [0, 1], [1, 0]), in_dims=(2, None, None))
test(vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5, B2), [0, 1], [1, 0]), in_dims=(2, None, None))
def test_mm(self):
op = torch.mm
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = "Shape mismatch"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2, 2, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(5, 2)), in_dims=(0, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 2, 5), torch.rand(5, 2)),
in_dims=(1, None))
# right arg is vmapped
test(op, (torch.rand(2, 5), torch.rand(B0, 5, 2)), in_dims=(None, 0))
test(vmap(op, in_dims=(None, 0)), (torch.rand(2, 5), torch.rand(B1, B0, 5, 2)),
in_dims=(None, 1))
# both args are vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(B0, 5, 2)))
test(vmap(op), (torch.rand(B1, B0, 2, 5), torch.rand(B0, B1, 5, 2)), in_dims=(1, 0))
test(vmap(op, in_dims=(0, None)),
(torch.rand(B1, 2, 5), torch.rand(B0, 5, 2)), in_dims=(None, 0))
def test_mv(self):
op = torch.mv
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = "Shape mismatch"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 2, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(5)), in_dims=(0, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 2, 5), torch.rand(5)),
in_dims=(1, None))
# right arg is vmapped
test(op, (torch.rand(2, 5), torch.rand(B0, 5)), in_dims=(None, 0))
test(vmap(op, in_dims=(None, 0)), (torch.rand(2, 5), torch.rand(B1, B0, 5)),
in_dims=(None, 1))
# both args are vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(B0, 5)))
test(vmap(op), (torch.rand(B1, B0, 2, 5), torch.rand(B0, B1, 5)), in_dims=(1, 0))
test(vmap(op, in_dims=(0, None)),
(torch.rand(B1, 2, 5), torch.rand(B0, 5)), in_dims=(None, 0))
def test_narrow(self):
op = torch.narrow
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5), -1, 1, 3), in_dims=(0, None, None, None))
test(op, (torch.rand(2, B0, 5), 1, 1, 3), in_dims=(1, None, None, None))
test(vmap(op, in_dims=(0, None, None, None)),
(torch.rand(B1, 2, B0, 5), 1, 0, 0), in_dims=(2, None, None, None))
test(vmap(vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)),
(torch.rand(B1, 2, B0, 5, B2), -1, 2, 3), in_dims=(2, None, None, None))
def test_new_empty(self):
# Empty is non-deterministic so we just check that the shape of the
# output tensor is what we expect and that the vmap fallback isn't used.
op = Tensor.new_empty
B0, B1 = 7, 11
result = vmap(lambda x: op(x, [2, 3]))(torch.randn(B0))
self.assertEqual(result.shape, [B0, 2, 3])
result = vmap(lambda x: op(x, []))(torch.randn(B0))
self.assertEqual(result.shape, [B0])
result = vmap(vmap(lambda x: op(x, [2, 3])))(torch.randn(B0, B1))
self.assertEqual(result.shape, [B0, B1, 2, 3])
def test_new_empty_strided(self):
# Empty is non-deterministic so we just check that the size and shape
# of the output are what we expect and that the vmap fallback isn't used
B0, B1 = 7, 11
def _test_single_vmap(size, stride, B0):
x = torch.randn(B0)
result = vmap(lambda x: x.new_empty_strided(size, stride))(x)
S = torch.empty_strided(size, stride).storage().size()
self.assertEqual(result.shape, [B0] + size)
self.assertEqual(result.stride(), [S] + stride)
def _test_double_vmap(size, stride, B0, B1):
x = torch.randn(B0, B1)
result = vmap(vmap(lambda x: x.new_empty_strided(size, stride)))(x)
S = torch.empty_strided(size, stride).storage().size()
self.assertEqual(result.shape, [B0, B1] + size)
self.assertEqual(result.stride(), [B1 * S, S] + stride)
x = torch.randn(B1, B0)
result = vmap(vmap(lambda x: x.new_empty_strided(size, stride)), in_dims=1)(x)
S = x.new_empty_strided(size, stride).storage().size()
self.assertEqual(result.shape, [B0, B1] + size)
self.assertEqual(result.stride(), [B1 * S, S] + stride)
# contiguous case
_test_single_vmap([2, 3, 5], [3 * 5, 5, 1], B0)
_test_double_vmap([2, 3, 5], [3 * 5, 5, 1], B0, B1)
# expanded
_test_single_vmap([2, 3, 5], [0, 5, 1], B0)
_test_double_vmap([2, 3, 5], [0, 5, 1], B0, B1)
# some of these cases are pretty strange, just verifying that if
# empty_strided allows them then BatchedTensor.new_empty_strided
# can as well
for shape in [[2, 3, 4], [0, 2, 0]]:
for strides in [[12, 4, 1], [2, 4, 6], [0, 0, 0]]:
_test_single_vmap(shape, strides, B0)
_test_double_vmap(shape, strides, B0, B1)
def test_new_zeros(self):
op = Tensor.new_zeros
test = functools.partial(self._vmap_test, check_propagates_grad=False)
B0, B1 = 7, 11
test(lambda x: op(x, 2, 3), (torch.rand(B0),))
test(lambda x: op(x, []), (torch.rand(B0),))
test(vmap(lambda x: op(x, 3, 5)), (torch.rand(B0, B1),))
def test_select(self):
op = torch.select
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5), 0, 0), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 5), 1, 1), in_dims=(1, None, None))
test(vmap(lambda t: op(t, 1, 1)), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(lambda t: op(t, 1, 1), in_dims=1)), (torch.rand(B1, 2, B0, B2, 5),), in_dims=2)
def test_stack(self):
test = self._vmap_test
B0, B1 = 5, 7
# Quick hack b/c vmap can't accept a list of tensors as an argument
def get_op(dim):
def op(*tensors):
return torch.stack(tensors, dim=dim)
return op
test(get_op(0), (torch.rand(B0, 3), torch.rand(B0, 3)))
test(get_op(0), (torch.rand(3), torch.rand(B0, 3)), in_dims=(None, 0))
test(get_op(0), (torch.rand(2, 17), torch.rand(2, 17, B0)), in_dims=(None, 2))
test(get_op(-1), (torch.rand(2, 17), torch.rand(2, 17, B0)), in_dims=(None, 2))
test(vmap(get_op(0), in_dims=(0, None)),
(torch.rand(B1, 2), torch.rand(B0, 2)), in_dims=(None, 0))
test(vmap(get_op(0), in_dims=(0, 0)),
(torch.rand(B1, 2), torch.rand(B0, B1, 2)), in_dims=(None, 0))
def test_slice(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(lambda t: t[0:1], (torch.rand(B0, 3, 5),))
test(lambda t: t[:, 1:3], (torch.rand(3, 5, B0),), in_dims=2)
test(vmap(lambda t: t[:, 0:1], in_dims=2), (torch.rand(3, 5, B0, B1),), in_dims=2)
test(vmap(vmap(lambda t: t[0:1], in_dims=2), in_dims=2),
(torch.rand(3, 5, B0, B1, B2),), in_dims=2)
def test_squeeze(self):
test = self._vmap_view_test
op = torch.squeeze
B0, B1 = 1, 11
test(op, (torch.rand(B0),))
test(op, (torch.rand(B0, 3, 5),))
test(op, (torch.rand(1, B0, 5),), in_dims=1)
test(op, (torch.rand(B0, 0, 1, 5, 1),))
test(op, (torch.rand(B0, 1, 1, 1, 1),))
test(vmap(op), (torch.rand(B0, B1, 1),))
test(vmap(op), (torch.rand(B1, 1, B0),), in_dims=2)
def test_sum_dim(self):
test = self._vmap_test
B0, B1 = 5, 7
# Single vmap, various in_dims / out_dims
test(lambda x: x.sum(()), [torch.randn([B0])])
test(lambda x: x.sum(0), [torch.randn([B0])])
test(lambda x: x.sum(-1), [torch.randn([B0])])
test(lambda x: x.sum(0), [torch.randn([B0, 3])])
test(lambda x: x.sum(-1), [torch.randn([2, 5, B0, 3])], in_dims=2)
test(lambda x: x.sum(2), [torch.randn([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(lambda x: x.sum(())), [torch.randn([B0, B1])])
test(vmap(lambda x: x.sum(0)), [torch.randn([B0, B1])])
test(vmap(lambda x: x.sum(-1)), [torch.randn([B0, B1])])
test(vmap(lambda x: x.sum(-2)), [torch.randn([B1, 2, 5, B0, 3])], in_dims=2)
test(vmap(lambda x: x.sum(2), in_dims=2), [torch.randn([2, 5, B0, B1, 3])],
in_dims=2, out_dims=2)
def test_reshape(self):
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
op = torch.reshape
test(op, (torch.rand(B0, 2 * 5), [2, 5]), in_dims=(0, None), check_view=True)
test(op, (torch.rand(2, B0, 5), [1, 1, 10]), in_dims=(1, None), check_view=False)
test(vmap(lambda t: t.reshape([-1])), (torch.rand(B0, B1, 2, 5),), check_view=True)
test(vmap(vmap(lambda t: t.reshape([-1]), in_dims=2), in_dims=1),
(torch.rand(3, B1, 2, B2, 5, B0),), in_dims=5, check_view=False)
def test_reshape_as(self):
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.reshape_as
test(op, (torch.rand(B0, 2 * 5), torch.rand(B0, 2, 5)), check_view=True)
test(op, (torch.rand(2 * 5), torch.rand(B0, 2, 5)), in_dims=(None, 0), check_view=True)
test(op, (torch.rand(B0, 2 * 5), torch.rand(2, 5)), in_dims=(0, None), check_view=True)
test(op, (torch.rand(2, B0, 5), torch.rand(1, 1, 10)), in_dims=(1, None), check_view=False)
test(vmap(op), (torch.rand(B0, B1, 2, 5), torch.randn(B0, B1, 10)), check_view=True)
test(vmap(vmap(op, in_dims=(2, None)), in_dims=(1, None)),
(torch.rand(3, B1, 2, B2, 5, B0), torch.rand(B0, 3 * 2 * 5)),
in_dims=(5, 0), check_view=False)
def test_result_type(self):
def scalar_tensor_with_dtype(op):
def wrapped(*args, **kwargs):
dtype = op(*args, **kwargs)
return torch.ones([], dtype=dtype)
return wrapped
test = self._vmap_test
op = scalar_tensor_with_dtype(torch.result_type)
B0 = 2
test(op, (torch.randn(B0), torch.randn(B0, dtype=torch.float64)),
check_propagates_grad=False)
test(op, (torch.randn(B0), torch.randint(10, [B0], dtype=torch.int64)),
check_propagates_grad=False)
test(lambda x: op(x, 1), (torch.randn(B0),), check_propagates_grad=False)
test(lambda x: op(x, 1.6), (torch.randn(B0),), check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1)), (torch.randn(B0),),
check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
(torch.randn(B0),), check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randn(B0, 2, dtype=torch.float64)),
check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randint(10, [B0, 2], dtype=torch.int64)),
check_propagates_grad=False)
test(lambda x: op(x, 1), (torch.randn(B0, 2),), check_propagates_grad=False)
test(lambda x: op(x, 1.6), (torch.randn(B0, 2),), check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1)), (torch.randn(B0, 2),),
check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
(torch.randn(B0, 2),), check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randn(B0, dtype=torch.float64)),
check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randint(10, [B0], dtype=torch.int64)),
check_propagates_grad=False)
def test_tensor_split(self):
test = self._vmap_view_test
op = torch.tensor_split
B0, B1, B2 = 7, 11, 13
# tests for torch.tensor_split(self, indices_or_sections: int, dim)
test(op, (torch.rand(B0, 2, 1024), 5, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 150, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 256, 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
# tests for torch.tensor_split(self, indices_or_sections: List[int], dim)
test(op, (torch.rand(B0, 2, 1024), [50, 100, 378, 890], -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), [50, 100, 212, 345, 0, 378, 890], 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), [50, 100, 212, 345, 0, 378, 890], 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, [4, 8, 9, 34, 29], 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
def test_split(self):
test = self._vmap_view_test
op = torch.split
B0, B1, B2 = 7, 11, 13
# tests for torch.split(self, split_size: int, dim)
test(op, (torch.rand(B0, 2, 1024), 101, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 130, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 256, 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
# tests for torch.split(self, split_size: List[int], dim)
test(op, (torch.rand(B0, 2, 1024), [1, 1020, 3], -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), [100] * 10 + [24], 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), [256] * 3 + [255], 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, [4] * 8 + [8] * 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
def test_trace(self):
op = torch.trace
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5),))
test(op, (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 5, B2),), in_dims=2)
def test_transpose(self):
op = torch.transpose
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(lambda x: op(x, 0, 1), (torch.rand(B0, 2, 5),))
test(lambda x: op(x, -1, -2), (torch.rand(B0, 2, 5),))
test(lambda x: op(x, 3, 1), (torch.rand(B0, 2, 5, 4, 6),))
test(lambda x: op(x, 1, 0), (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(lambda x: op(x, 0, 1)), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(lambda x: op(x, 0, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 5, B2),), in_dims=2)
# Special case: scalar tensor
for dim1, dim2 in itertools.product([0, -1], [0, -1]):
x = torch.rand(B0)
result = vmap(lambda x: op(x, dim1, dim2))(x)
self.assertTrue(result is x)
def test_t(self):
op = torch.t
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5),))
test(op, (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 5, B2),), in_dims=2)
def test_T_numpy(self):
def op(t):
return t.T
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 3, 5),))
test(op, (torch.rand(B0),))
test(op, (torch.rand(2, B0, 3, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(op), (torch.rand(B1, 2, B0, 3, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 3, B2, 5),), in_dims=2)
def test_to(self):
test = self._vmap_test
B0, B1 = 7, 11
test(lambda t: t.to('cpu'), (torch.rand(B0),))
test(lambda t: t.to(torch.double), (torch.rand(B0),))
test(lambda t, o: t.to(o), (torch.rand(B0), torch.randn(B0, dtype=torch.float64)))
test(lambda t, o: t.to(o),
(torch.rand(B0), torch.randn(B0, dtype=torch.float64)),
in_dims=(0, None))
test(vmap(lambda t: t.to(torch.double)), (torch.rand(B0, B1, 3),))
# also test some casting methods
test(lambda t: t.double(), (torch.rand(B0),))
test(lambda t: t.float(), (torch.rand(B0),))
test(lambda t: t.int(), (torch.rand(B0),), check_propagates_grad=False)
test(lambda t: t.long(), (torch.rand(B0),), check_propagates_grad=False)
def test_unfold(self):
op = torch.Tensor.unfold
test = self._vmap_view_test
B0, B1, B2 = 3, 2, 5
test(op, (torch.rand(B0, 7, 11), 0, 2, 1), in_dims=(0, None, None, None))
test(op, (torch.rand(7, B0, 11), 1, 4, 2), in_dims=(1, None, None, None))
test(vmap(op, in_dims=(0, None, None, None)),
(torch.rand(B1, 7, B0, 11), 1, 5, 1), in_dims=(2, None, None, None))
test(vmap(vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)),
(torch.rand(B1, 7, B0, 11, B2), -1, 2, 4), in_dims=(2, None, None, None))
def test_unbind(self):
test = self._vmap_view_test
op = torch.unbind
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 1024), -1), in_dims=(0, None))
test(op, (torch.rand(B0, 2, 0),))
test(op, (torch.rand(2, B0, 7), 0), in_dims=(1, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, 1023, B0, 5), 1),
in_dims=(2, None))
test(vmap(vmap(lambda t: op(t, dim=1), in_dims=2)),
(torch.rand(B1, 2, B0, 32, B2),), in_dims=2)
def test_view(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.view
# We should error out if the view would produce an incorrect result
with self.assertRaises(RuntimeError):
vmap(op, in_dims=(1, None))(torch.rand(2, B0, 5), [10])
test(op, (torch.rand(B0, 2 * 5), [2, 5]), in_dims=(0, None))
test(op, (torch.rand(B0, 4, 5), [1, 2, 1, 10]), in_dims=(0, None))
test(vmap(lambda t: t.view([-1])), (torch.rand(B0, B1, 2, 5, 3),))
test(vmap(vmap(lambda t: t.reshape([-1])), in_dims=1),
(torch.rand(B2, B0, B1, 3, 2, 5),), in_dims=1)
def test_view_as(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.view_as
# We should error out if the view would produce an incorrect result
with self.assertRaises(RuntimeError):
vmap(op, in_dims=(1, 0))(torch.rand(2, B0, 5), torch.rand(B0, 10))
test(op, (torch.rand(B0, 2 * 5), torch.rand(B0, 2, 5)))
test(op, (torch.rand(2 * 5), torch.rand(B0, 2, 5)), in_dims=(None, 0))
test(op, (torch.rand(B0, 2 * 5), torch.rand(2, 5)), in_dims=(0, None))
test(op, (torch.rand(B0, 4, 5), torch.rand(2, 1, 1, 10)), in_dims=(0, None))
test(vmap(op), (torch.rand(B0, B1, 2, 5), torch.randn(B0, B1, 10)))
test(vmap(vmap(op, in_dims=(0, None)), in_dims=(0, None)),
(torch.rand(B1, B2, B0, 3, 2, 5), torch.rand(B0, 3 * 2 * 5)),
in_dims=(2, 0))
def test_no_random_op_support(self):
B0 = 2
captured = torch.rand(3)
random_ops = [
# out-of-place on BatchedTensor
(torch.bernoulli, (torch.rand(B0, 1),)),
(lambda t: torch.bernoulli(t, p=0.5), (torch.rand(B0, 1),)),
(lambda t: torch.multinomial(t, 2), (torch.rand(B0, 3),)),
(torch.normal, (torch.randn(B0, 1), torch.randn(B0, 1))),
(lambda t: torch.normal(t, 1.), (torch.randn(B0, 1),)),
(lambda t: torch.normal(0., t), (torch.randn(B0, 1),)),
(torch.poisson, (torch.rand(B0, 1),)),
(torch.rand_like, (torch.rand(B0, 1),)),
(torch.randn_like, (torch.rand(B0, 1),)),
(lambda t: torch.randint_like(t, 2), (torch.rand(B0, 1),)),
(lambda t: torch.randint_like(t, 0, 2), (torch.rand(B0, 1),)),
# out-of-place on captured tensor
(lambda t: torch.bernoulli(captured), (torch.rand(B0),)),
(lambda t: torch.bernoulli(captured, p=0.5), (torch.rand(B0),)),
(lambda t: torch.multinomial(captured, 2), (torch.rand(B0),)),
(lambda t: torch.normal(captured, captured), (torch.randn(B0),)),
(lambda t: torch.normal(captured, 1.), (torch.randn(B0),)),
(lambda t: torch.normal(0., captured), (torch.randn(B0),)),
(lambda t: torch.poisson(captured), (torch.rand(B0),)),
(lambda t: torch.rand_like(captured), (torch.rand(B0),)),
(lambda t: torch.randn_like(captured) , (torch.rand(B0),)),
(lambda t: torch.randint_like(captured, 2), (torch.rand(B0),)),
(lambda t: torch.randint_like(captured, 0, 2), (torch.rand(B0),)),
# in-place on BatchedTensor
(lambda t: t.bernoulli_(), (torch.randn(B0, 1),)),
(lambda t: t.cauchy_(), (torch.randn(B0, 1),)),
(lambda t: t.exponential_(), (torch.randn(B0, 1),)),
(lambda t: t.geometric_(0.5), (torch.randn(B0, 1),)),
(lambda t: t.log_normal_(), (torch.randn(B0, 1),)),
(lambda t: t.normal_(), (torch.randn(B0, 1),)),
(lambda t: t.random_(), (torch.randn(B0, 1),)),
(lambda t: t.random_(0, 2), (torch.randn(B0, 1),)),
(lambda t: t.random_(2), (torch.randn(B0, 1),)),
(lambda t: t.uniform_(), (torch.randn(B0, 1),)),
# in-place on captured tensor
(lambda t: captured.bernoulli_(), (torch.randn(B0),)),
(lambda t: captured.cauchy_(), (torch.randn(B0),)),
(lambda t: captured.exponential_(), (torch.randn(B0),)),
(lambda t: captured.geometric_(0.5), (torch.randn(B0),)),
(lambda t: captured.log_normal_(), (torch.randn(B0),)),
(lambda t: captured.normal_(), (torch.randn(B0),)),
(lambda t: captured.random_(), (torch.randn(B0),)),
(lambda t: captured.random_(0, 2), (torch.randn(B0),)),
(lambda t: captured.random_(2), (torch.randn(B0),)),
(lambda t: captured.uniform_(), (torch.randn(B0),)),
# factory functions
(lambda t: torch.rand(1), (torch.randn(B0),)),
(lambda t: torch.randn(1), (torch.randn(B0),)),
(lambda t: torch.randint(5, [1]), (torch.randn(B0),)),
(lambda t: torch.randperm(5), (torch.randn(B0),)),
]
for op, args in random_ops:
with self.assertRaisesRegex(RuntimeError,
'vmap: We do not yet support calling random operations'):
vmap(op)(*args)
def construct_v(output, batch_size):
return torch.randn(batch_size, *output.shape,
dtype=output.dtype, device=output.device)
def as_tuple(x):
if isinstance(x, tuple):
return x
elif isinstance(x, list):
return tuple(x)
else:
return x,
def differentiable(args):
return tuple(arg for arg in as_tuple(args)
if isinstance(arg, torch.Tensor) and arg.requires_grad)
def _get_rand_no_zeros(*args, **kwargs):
requires_grad = kwargs.get('requires_grad', False)
kwargs_without_requires_grad = kwargs.copy()
kwargs_without_requires_grad['requires_grad'] = False
result = torch.rand(*args, **kwargs_without_requires_grad)
return result.clamp_min_(0.1).requires_grad_(requires_grad)
class TestVmapBatchedGradient(Namespace.TestVmapBase):
def _vmap_test(self, *args, **kwargs):
return _vmap_test(self, *args, **kwargs)
# Tests batched gradient computation of outputs = op(*args, **kwargs)
# by comparing it to a sequential map+stack fallback.
#
# output_process_fn: a function that maps the outputs to the part
# that should be differentiated.
# batch_size: the batch dim size for the batched grad
def _batched_grad_test(self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3):
if kwargs is None:
kwargs = {}
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
batched_vectors = tuple(construct_v(out, batch_size) for out in outputs)
def vector_jacobian_product(*vectors):
return torch.autograd.grad(outputs, differentiable(args), vectors,
retain_graph=True)
self._vmap_test(vector_jacobian_product, batched_vectors,
check_propagates_grad=False)
# Tests batched second grad computation of outputs = op(*args, **kwargs).
# by comparing it to a sequential map+stack fallback.
#
# output_process_fn: a function that maps the outputs to the part
# that should be differentiated.
# batch_size: the batch dim size for the batched grad
#
# NB: we only test computing batched gradients in the second gradient
# computation. One specific use case that does this is computing the hessian
# matrix of a scalar-valued function; this is useful in Bayesian Logistic
# Regression.
# It might be useful to have a test that computes batched first gradients and
# then uses those to compute batched second gradients in the future.
def _batched_grad_grad_test(self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3):
if kwargs is None:
kwargs = {}
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
ones = tuple(torch.ones_like(out) for out in outputs)
# Same thing as summing together all of the outputs and calling .backward()
first_grads = torch.autograd.grad(outputs, differentiable(args), ones,
create_graph=True)
first_grads = differentiable(first_grads)
self.assertNotEqual(
len(first_grads), 0, "None of the first grads depend on the input!")
batched_vectors = tuple(construct_v(grad, batch_size) for grad in first_grads)
def vector_hessian_product(*vectors):
outputs = torch.autograd.grad(first_grads, differentiable(args), vectors,
retain_graph=True, allow_unused=True)
outputs = tuple(out for out in outputs if out is not None)
assert len(outputs) > 0
return outputs
self._vmap_test(vector_hessian_product, batched_vectors,
check_propagates_grad=False)
def _test_arithmetic(self, op, device, test_grad_grad=True):
x = torch.randn(2, 3, requires_grad=True, device=device)
y = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
scalar = 3.14
self._batched_grad_test(op, (x, y))
self._batched_grad_test(op, (scalar, y))
self._batched_grad_test(op, (x, scalar))
if test_grad_grad:
self._batched_grad_grad_test(op, (x, y))
def test_add(self, device):
self._test_arithmetic(torch.add, device, test_grad_grad=False)
self._test_arithmetic(lambda x, y: x + y, device, test_grad_grad=False)
def test_sub(self, device):
self._test_arithmetic(torch.sub, device, test_grad_grad=False)
self._test_arithmetic(lambda x, y: x - y, device, test_grad_grad=False)
def test_mul(self, device):
self._test_arithmetic(torch.mul, device)
self._test_arithmetic(lambda x, y: x * y, device)
def test_div(self, device):
self._test_arithmetic(torch.div, device)
self._test_arithmetic(lambda x, y: x / y, device)
@allowVmapFallbackUsage
def test_binary_cross_entropy(self, device):
x = torch.sigmoid(torch.randn(3, 2, device=device, requires_grad=True))
target = torch.rand(3, 2, device=device)
op = functools.partial(F.binary_cross_entropy, target=target)
self._batched_grad_test(op, (x,), {})
self._batched_grad_grad_test(op, (x,), {})
def test_expand(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
def op(x):
return x.expand(5, 5, 2, 3)
self._batched_grad_test(op, (x,))
@allowVmapFallbackUsage
def test_index(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
index = torch.tensor([[0, 0], [1, 1]], device=device)
def op(x):
y = x * x
return y[index]
self._batched_grad_test(op, (x,))
self._batched_grad_grad_test(op, (x,))
def test_lgamma(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(Tensor.lgamma, (x,))
self._batched_grad_grad_test(Tensor.lgamma, (x,))
def test_log(self, device):
x = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
self._batched_grad_test(torch.log, (x,))
self._batched_grad_grad_test(torch.log, (x,))
def test_logsumexp(self, device):
x = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
def op(x):
return torch.logsumexp(x, -1)
self._batched_grad_test(op, (x,))
self._batched_grad_grad_test(op, (x,))
def test_log1p(self, device):
x = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
self._batched_grad_test(torch.log1p, (x,))
self._batched_grad_grad_test(torch.log1p, (x,))
@allowVmapFallbackUsage
def test_max(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(torch.max, (x,))
@allowVmapFallbackUsage
def test_median(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(torch.median, (x,))
@allowVmapFallbackUsage
def test_min(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(torch.min, (x,))
def test_permute(self, device):
x = torch.randn(2, 3, 5, requires_grad=True, device=device)
def op(x):
return x.permute(2, 0, 1)
self._batched_grad_test(op, (x,))
def test_reshape(self, device):
x = torch.randn(2, 3, 5, requires_grad=True, device=device)
def op(x):
return x.reshape([2 * 3, 5])
self._batched_grad_test(op, (x,))
def test_sigmoid(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(Tensor.sigmoid, (x,))
self._batched_grad_grad_test(Tensor.sigmoid, (x,))
def test_stack(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
y = torch.randn(2, 3, device=device, requires_grad=True)
def op(x, y):
return torch.stack([x, y])
self._batched_grad_test(op, (x, y))
def test_select(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x[1], (x,))
self._batched_grad_test(lambda x: x.select(1, 2), (x,))
self._batched_grad_test(lambda x: x.select(-1, 0), (x,))
def test_slice(self, device):
x = torch.randn(2, 3, 5, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x[0:1], (x,))
self._batched_grad_test(lambda x: x[:, 1:3], (x,))
self._batched_grad_test(lambda x: x[..., 1:3], (x,))
def test_trace(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
self._batched_grad_test(Tensor.trace, (x,))
@skipCUDAIfNoMagma
@allowVmapFallbackUsage
def test_symeig(self, device):
def op(x):
return torch.symeig(x, eigenvectors=True)[0]
x = torch.randn(3, 3, device=device, requires_grad=True)
self._batched_grad_test(op, (x,), {})
self._batched_grad_grad_test(op, (x,), {})
def test_threshold(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
self._batched_grad_test(lambda x: F.threshold(x, 0.5, 0.0), (x,))
@allowVmapFallbackUsage
def test_inplace_on_view(self, device):
leaf = torch.randn(4, 5, requires_grad=True)
def func(leaf):
# Make sure the function is non-trivially twice differentiable
base = leaf * leaf
view = base[0]
view.cos_()
return view
self._batched_grad_test(func, (leaf,), {})
self._batched_grad_grad_test(func, (leaf,), {})
@allowVmapFallbackUsage
def test_inplace_manyview(self, device):
leaf = torch.randn(4, 4, 5, requires_grad=True)
def func(leaf):
# Make sure the function is non-trivially twice differentiable
base = leaf * leaf
view = base.transpose(0, 2)
view = view[1]
view = view.diagonal()
view = view[::2]
view.cos_()
return view
self._batched_grad_test(func, (leaf,), {})
self._batched_grad_grad_test(func, (leaf,), {})
def test_diagonal(self, device):
x = torch.randn(4, 5, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x.diagonal(1, 0, 1), (x,))
x = torch.randn(3, 4, 5, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x.diagonal(0, -1, -2), (x,))
@allowVmapFallbackUsage
def test_unrelated_output(self, device):
B0 = 3
x = torch.randn([], requires_grad=True)
y = torch.randn([], requires_grad=True)
gy = torch.randn(B0, requires_grad=True)
def vjp(v):
res, = torch.autograd.grad(y, x, v, allow_unused=True)
return torch.zeros_like(x) if res is None else res
result = vmap(vjp)(gy)
self.assertEqual(result, torch.zeros(B0, *x.shape, device=device))
@allowVmapFallbackUsage
def test_unrelated_output_multiple_grad(self, device):
B0 = 3
x = torch.randn([], requires_grad=True)
y = torch.randn([], requires_grad=True)
gy = torch.randn(B0, requires_grad=True)
def vjp(v):
res, = torch.autograd.grad(y, x, v, allow_unused=True)
return torch.zeros_like(x) if res is None else res
_ = vjp(gy[0])
result = vmap(vjp)(gy)
self.assertEqual(result, torch.zeros(B0, *x.shape, device=device))
instantiate_device_type_tests(
TestVmapBatchedGradient,
globals(),
None,
)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_vmap.py
|
import sys
import torch
if __name__ == '__main__':
print(torch.jit.load(sys.argv[1]))
sys.exit(0)
|
pytorch-master
|
test/load_torchscript_model.py
|
# Owner(s): ["NNC"]
import torch
import numpy as np
import torch._C._te as te
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
import unittest
LLVM_ENABLED = torch._C._llvm_enabled()
def construct_adder(n: int, dtype=torch.float32):
A = te.BufHandle("A", [n], dtype)
B = te.BufHandle("B", [n], dtype)
def compute(i):
return A.load([i]) + B.load([i])
C = te.Compute("C", [n], compute)
loopnest = te.LoopNest([C])
loopnest.prepare_for_codegen()
stmt = te.simplify(loopnest.root_stmt())
return te.construct_codegen("ir_eval", stmt, [A, B, C])
class TestTensorExprPyBind(JitTestCase):
def test_simple_sum(self):
n = 32
cg = construct_adder(n)
tA = torch.randn(n)
tB = torch.randn(n)
tC = torch.empty(n)
cg.call([tA, tB, tC])
torch.testing.assert_close(tA + tB, tC)
def test_call_raw(self):
n = 16
cg = construct_adder(n, dtype=torch.float64)
tA = torch.randn(n, dtype=torch.float64)
tB = torch.randn(n, dtype=torch.float64)
tC = torch.empty(n, dtype=torch.float64)
cg.call_raw([tA.data_ptr(), tB.data_ptr(), tC.data_ptr()])
torch.testing.assert_close(tA + tB, tC)
def test_external_calls(self):
dtype = torch.float32
A = te.BufHandle("A", [1, 4], dtype)
B = te.BufHandle("B", [4, 1], dtype)
C = te.BufHandle("C", [1, 1], dtype)
s = te.ExternalCall(C, "nnc_aten_matmul", [A, B], [])
loopnest = te.LoopNest(s, [C])
loopnest.prepare_for_codegen()
codegen = te.construct_codegen("ir_eval", s, [A, B, C])
tA = torch.ones(1, 4)
tB = torch.ones(4, 1)
tC = torch.empty(1, 1)
codegen.call([tA, tB, tC])
torch.testing.assert_close(torch.matmul(tA, tB), tC)
def test_dynamic_shape(self):
dN = te.VarHandle(torch.int32)
A = te.BufHandle([dN], torch.float64)
B = te.BufHandle([dN], torch.float64)
def compute(i):
return A.load(i) - B.load(i)
C = te.Compute("C", [dN], compute)
loopnest = te.LoopNest([C])
loopnest.prepare_for_codegen()
cg = te.construct_codegen("ir_eval", loopnest.simplify(), [A, B, C, dN])
def test_with_shape(n):
tA = torch.randn(n, dtype=torch.double)
tB = torch.randn(n, dtype=torch.double)
tC = torch.empty(n, dtype=torch.double)
cg.call([tA, tB, tC, n])
torch.testing.assert_close(tA - tB, tC)
test_with_shape(8)
test_with_shape(31)
def test_dynamic_shape_2d(self):
dN = te.VarHandle(torch.int32)
dM = te.VarHandle(torch.int32)
A = te.BufHandle([dN, dM], torch.float64)
B = te.BufHandle([dN, dM], torch.float64)
def compute(i, j):
return A.load([i, j]) - B.load([i, j])
C = te.Compute("C", [dN, dM], compute)
loopnest = te.LoopNest([C])
loopnest.prepare_for_codegen()
cg = te.construct_codegen("ir_eval", loopnest.simplify(), [A, B, C, dN, dM])
def test_with_shape(n, m):
tA = torch.randn(n, m, dtype=torch.double)
tB = torch.randn(n, m, dtype=torch.double)
tC = torch.empty(n, m, dtype=torch.double)
cg.call([tA, tB, tC, n, m])
torch.testing.assert_close(tA - tB, tC)
test_with_shape(2, 4)
test_with_shape(5, 3)
def test_dtype_error(self):
te.BufHandle("a", [1], torch.float32) # ok
self.assertRaises(TypeError, lambda: te.BufHandle("a", [1], "float55"))
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_tensor_inputs(self):
def f(a, b, c):
return a + b + c
device, size = "cpu", (4, 4)
x = torch.rand(size, device=device)
y = torch.rand(size, device=device)
z = torch.rand(size, device=device)
graph_str = """
graph(%a.1 : Float(4, 4, strides=[4, 1], requires_grad=0, device=cpu),
%b.1 : Float(4, 4, strides=[4, 1], requires_grad=0, device=cpu),
%c.1 : Float(4, 4, strides=[4, 1], requires_grad=0, device=cpu)):
%6 : int = prim::Constant[value=1]()
%7 : Float(4, 4, strides=[4, 1], requires_grad=0, device=cpu) = aten::add(%a.1, %b.1, %6)
%3 : Float(4, 4, strides=[4, 1], requires_grad=0, device=cpu) = aten::add(%7, %c.1, %6)
return (%3)
"""
graph = torch._C.parse_ir(graph_str)
kernel = te.TensorExprKernel(graph)
res1 = kernel.run((x, y, z))
res2 = kernel.fallback((x, y, z))
correct = f(x, y, z)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_scalar_inputs(self):
def f(a, b, c):
return a + b + c
x = torch.tensor(0.1, dtype=torch.float, device="cpu")
y = torch.tensor(0.6, dtype=torch.float, device="cpu")
z = torch.tensor(0.7, dtype=torch.float, device="cpu")
graph_str = """
graph(%a.1 : Float(requires_grad=0, device=cpu),
%b.1 : Float(requires_grad=0, device=cpu),
%c.1 : Float(requires_grad=0, device=cpu)):
%3 : int = prim::Constant[value=1]()
%6 : Float(requires_grad=0, device=cpu) = aten::add(%a.1, %b.1, %3)
%9 : Float(requires_grad=0, device=cpu) = aten::add(%6, %c.1, %3)
return (%9)
"""
graph = torch._C.parse_ir(graph_str)
kernel = te.TensorExprKernel(graph)
res1 = kernel.run((x, y, z))
res2 = kernel.fallback((x, y, z))
correct = f(x, y, z)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_shape_prop(self):
device, size = "cpu", (4, 4)
x = torch.rand(size, device=device)
y = torch.rand(size, device=device)
graph_str = """
graph(%a : Tensor, %b : Tensor):
%c : Tensor = aten::mul(%a, %b)
return (%c)
"""
graph = torch._C.parse_ir(graph_str)
exception_thrown = False
try:
kernel = te.TensorExprKernel(graph)
except RuntimeError:
# Graph doesn't have shape info for inputs => compilation should
# fail
exception_thrown = True
pass
assert exception_thrown
# Inject shape info and try compiling again
example_inputs = [torch.rand(4, 4), torch.rand(4, 4)]
torch._C._te.annotate_input_shapes(graph, example_inputs)
torch._C._jit_pass_propagate_shapes_on_graph(graph)
# Now compilation should pass
kernel = te.TensorExprKernel(graph)
res = kernel.run((x, y))
correct = torch.mul(x, y)
np.testing.assert_allclose(res.numpy(), correct.numpy(), atol=1e-5)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_shape_prop_module(self):
class TestModule(torch.nn.Module):
def forward(self, x, y):
return x * x + y
graph = torch.jit.script(TestModule()).graph
# Try compiling the graph as-is. It should fail because it doesn't have
# shape info.
exception_thrown = False
try:
kernel = te.TensorExprKernel(graph)
except RuntimeError:
exception_thrown = True
pass
assert exception_thrown
# Try injecting shape info for graph inputs
example_inputs = [torch.rand(4, 4), torch.rand(4, 4)]
exception_thrown = False
try:
torch._C._te.annotate_input_shapes(graph, example_inputs)
except RuntimeError:
# Graph has a 'self' argument for which we can't set shapes
exception_thrown = True
pass
assert exception_thrown
# Remove 'self' argument and try annotating shapes one more time
torch._C._te.remove_unused_self_argument(graph)
# Inject shape info and try compiling again
torch._C._te.annotate_input_shapes(graph, example_inputs)
torch._C._jit_pass_propagate_shapes_on_graph(graph)
# Now compilation should pass
kernel = te.TensorExprKernel(graph)
device, size = "cpu", (4, 4)
x = torch.rand(size, device=device)
y = torch.rand(size, device=device)
res = kernel.run((x, y))
correct = TestModule().forward(x, y)
np.testing.assert_allclose(res.numpy(), correct.numpy(), atol=1e-5)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_t(self):
def f(a):
return a.t()
device, size = "cpu", (3, 4)
x = torch.rand(size, device=device)
graph_str = """
graph(%a.1 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)):
%3 : Float(4, 3, strides=[4, 1], requires_grad=0, device=cpu) = aten::t(%a.1)
return (%3)
"""
graph = torch._C.parse_ir(graph_str)
kernel = te.TensorExprKernel(graph)
res1 = kernel.run((x,))
res2 = kernel.fallback((x,))
correct = f(x)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_transpose(self):
def f(a):
return a.transpose(-1, -2)
device, size = "cpu", (3, 4)
x = torch.rand(size, device=device)
graph_str = """
graph(%a.1 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)):
%2 : int = prim::Constant[value=-1]()
%3 : int = prim::Constant[value=-2]()
%4 : Float(4, 3, strides=[4, 1], requires_grad=0, device=cpu) = aten::transpose(%a.1, %2, %3)
return (%4)
"""
graph = torch._C.parse_ir(graph_str)
kernel = te.TensorExprKernel(graph)
res1 = kernel.run((x,))
res2 = kernel.fallback((x,))
correct = f(x)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_permute(self):
def f(a):
return a.permute([2, 1, 0])
device, size = "cpu", (3, 4, 5)
x = torch.rand(size, device=device)
graph_str = """
graph(%a.1 : Float(3, 4, 5, strides=[20, 5, 1], requires_grad=0, device=cpu)):
%1 : int = prim::Constant[value=2]()
%2 : int = prim::Constant[value=1]()
%3 : int = prim::Constant[value=0]()
%4 : int[] = prim::ListConstruct(%1, %2, %3)
%5 : Float(5, 4, 3, strides=[12, 3, 1], requires_grad=0, device=cpu) = aten::permute(%a.1, %4)
return (%5)
"""
graph = torch._C.parse_ir(graph_str)
kernel = te.TensorExprKernel(graph)
res1 = kernel.run((x,))
res2 = kernel.fallback((x,))
correct = f(x)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_custom_lowering(self):
def f(a):
return a.nan_to_num()
device = "cpu"
x = torch.ones((2, 2), device=device)
x[0, 0] = x[1, 1] = torch.nan
graph_str = """
graph(%x : Float(2, 2, strides=[2, 1], requires_grad=0, device=cpu)):
%none : NoneType = prim::Constant()
%y : Float(2, 2, strides=[2, 1], requires_grad=0, device=cpu) = aten::nan_to_num(%x, %none, %none, %none)
return (%y)
"""
graph = torch._C.parse_ir(graph_str)
def my_custom_lowering(inputs, out_shape, out_stride, out_type, device):
def compute(idxs):
load = inputs[0].as_buf().load(idxs)
return te.ifThenElse(
te.ExprHandle.isnan(load), te.ExprHandle.float(0.0), load
)
return te.Compute2("custom_nan_to_num", out_shape, compute)
kernel = te.TensorExprKernel(graph, {"aten::nan_to_num": my_custom_lowering})
res1 = kernel.run((x,))
res2 = kernel.fallback((x,))
correct = f(x)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_expand(self):
def f(a):
return a.expand((2, 3, 4))
device = "cpu"
x = torch.rand((1, 3, 1), device=device)
graph_str = """
graph(%a : Float(1, 3, 1, strides=[3, 1, 1], requires_grad=0, device=cpu)):
%1 : int = prim::Constant[value=2]()
%2 : int = prim::Constant[value=3]()
%3 : int = prim::Constant[value=4]()
%4 : int[] = prim::ListConstruct(%1, %2, %3)
%5 : bool = prim::Constant[value=0]()
%6 : Float(2, 3, 4, strides=[12, 4, 0], requires_grad=0, device=cpu) = aten::expand(%a, %4, %5)
return (%6)
"""
graph = torch._C.parse_ir(graph_str)
kernel = te.TensorExprKernel(graph)
res1 = kernel.run((x,))
res2 = kernel.fallback((x,))
correct = f(x)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_alloc_in_loop(self):
a, tmp, b = [
te.BufHandle(name, [1], torch.float32) for name in ["a", "tmp", "b"]
]
body = te.Block([tmp.store([0], a.load([0])), b.store([0], tmp.load([0]))])
for _ in range(4):
i = te.VarHandle("i", torch.int32)
body = te.For.make(i, 0, 100, body)
nest = te.LoopNest(body, [b])
nest.prepare_for_codegen()
f = te.construct_codegen("llvm", nest.simplify(), [a, b])
ta, tb = [torch.ones(1) for _ in range(2)]
f.call([ta.data_ptr(), tb.data_ptr()])
class TestExprHandlePyBind(JitTestCase):
def test_unary_ops(self):
unary_operators = {
torch.sin: torch._C._te.sin,
torch.cos: torch._C._te.cos,
torch.tan: torch._C._te.tan,
torch.asin: torch._C._te.asin,
torch.acos: torch._C._te.acos,
torch.atan: torch._C._te.atan,
torch.sinh: torch._C._te.sinh,
torch.cosh: torch._C._te.cosh,
torch.tanh: torch._C._te.tanh,
torch.sigmoid: torch._C._te.sigmoid,
torch.exp: torch._C._te.exp,
torch.expm1: torch._C._te.expm1,
torch.abs: torch._C._te.abs,
torch.log: torch._C._te.log,
torch.log2: torch._C._te.log2,
torch.log10: torch._C._te.log10,
torch.log1p: torch._C._te.log1p,
torch.erf: torch._C._te.erf,
torch.erfc: torch._C._te.erfc,
torch.sqrt: torch._C._te.sqrt,
torch.rsqrt: torch._C._te.rsqrt,
torch.ceil: torch._C._te.ceil,
torch.floor: torch._C._te.floor,
torch.round: torch._C._te.round,
torch.trunc: torch._C._te.trunc,
torch.lgamma: torch._C._te.lgamma,
torch.frac: torch._C._te.frac,
}
def construct_te_fn(op, n: int, dtype=torch.float32):
A = torch._C._te.BufHandle("A", [n], dtype)
def compute(i):
return op(A.load([i]))
C = te.Compute("C", [n], compute)
loopnest = te.LoopNest([C])
loopnest.prepare_for_codegen()
stmt = te.simplify(loopnest.root_stmt())
return te.construct_codegen("ir_eval", stmt, [A, C])
n = 10
a = torch.rand(n)
for torch_op, te_op in unary_operators.items():
ref = torch_op(a)
te_fn = construct_te_fn(te_op, n, torch.float32)
res = torch.empty(n)
te_fn.call([a, res])
assert torch.allclose(ref, res, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/test_tensorexpr_pybind.py
|
# Owner(s): ["module: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import io
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
import torch.nn.utils._stateless as _stateless
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument, _format_arg
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
from fx.test_pass_infra import TestPassManager # noqa: F401
from fx.test_common_passes import TestCommonPass # noqa: F401
from fx.test_cse_pass import TestCSEPass # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
find_library_location,
run_tests,
skipIfSlowGradcheckEnv,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
def wrapped_named_tup(p1, *, p2):
return p1.x + p2.y
wrap(wrapped_named_tup)
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
def _custom_fx_repr_fn(self) -> str:
return f"Pair(x={_format_arg(self.x)}, y={_format_arg(self.y)})"
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
super().setUp()
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
super().tearDown()
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_concrete_arg_none_assert(self):
class Foo(torch.nn.Module):
def forward(self, x, val=None):
return x if val is None else x + val
f = Foo()
traced = torch.fx.symbolic_trace(f, concrete_args={'val' : None})
with self.assertRaisesRegex(AssertionError, 'val has been specialized to have value None'):
traced(torch.randn(5), torch.randn(5))
x = torch.randn(5)
torch.testing.assert_close(traced(x), f(x))
def test_trace_multiple_funcs(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
return x + y
def minus_forward(self, x, y):
return x - y
def multiply_forward(self, x, y):
return x * y
f = Foo()
x, y = torch.randn(5), torch.randn(5)
print(torch.__version__)
tracer = Tracer()
torch.testing.assert_close(GraphModule(f, tracer.trace(f))(x, y), f(x, y))
tracer.traced_func_name = "minus_forward"
torch.testing.assert_close(
GraphModule(f, tracer.trace(f))(x, y),
f.minus_forward(x, y),
)
tracer.traced_func_name = "multiply_forward"
torch.testing.assert_close(
GraphModule(f, tracer.trace(f))(x, y),
f.multiply_forward(x, y),
)
tracer.traced_func_name = "add_forward"
with self.assertRaisesRegex(AssertionError, "doesn't exist in"):
tracer.trace(f)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_stack_traces_with_transformer(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
gm = GraphModule(tracer.root, graph)
new_gm = Transformer(gm).transform()
# nodes after Transformer should still preserve the original node's stack trace
for node in new_gm.graph.nodes:
if node.op in {'placeholder', 'output'}:
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
@unittest.skip("Hotfix for SEV remediation")
def test_trace_buffer_slice(self):
bs, d_hid = 10, 23
class ExampleCode(torch.nn.Module):
def __init__(self):
super().__init__()
self.mm_param = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin = torch.nn.Linear(d_hid, d_hid)
self.register_buffer('buffer', torch.randn(bs + 100, d_hid))
def forward(self, x):
x = torch.mm(x, self.mm_param)
skip_connection = x
x = torch.relu(x)
x = torch.mm(x, self.mm_param) + self.buffer[:x.shape[0]]
x = self.lin(x)
x = torch.relu(x)
x = x + skip_connection
x = torch.mm(x, self.mm_param2)
x = self.lin(x)
return x
ec = ExampleCode()
traced = torch.fx.symbolic_trace(ec)
x = torch.randn(bs, d_hid)
torch.testing.assert_allclose(ec(x), traced(x))
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_tuple_no_subscript(self):
def foo(x : Tuple):
return x[0]
traced = torch.fx.symbolic_trace(foo)
x = (torch.randn(5, 3),)
torch.testing.assert_allclose(traced(x), x[0])
bio = io.BytesIO()
torch.save(traced, bio)
bio.seek(0)
loaded = torch.load(bio)
torch.testing.assert_allclose(loaded(x), x[0])
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_torch_op_overloads(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.add.Tensor(a, a)
return b
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
for node in gm.graph.nodes:
if node.op == 'call_function':
assert isinstance(node.target, torch._ops.OpOverload)
assert node.target.__name__ == 'add.Tensor'
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_remove_uses_with_custom_filter(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu, lambda x: x != neg)
self.assertTrue(neg in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propagation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_immutable_list_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
l = immutable_list([3, [rand_tensor, 42]])
flattened, spec = pytree.tree_flatten(l)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == l
assert isinstance(unflattened, immutable_list)
def test_immutable_dict_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
d = immutable_dict({'a': 3, 'b': [rand_tensor, 42]})
flattened, spec = pytree.tree_flatten(d)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == d
assert isinstance(unflattened, immutable_dict)
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_named_tuple_inlined(self):
class NamedTupMod(torch.nn.Module):
def forward(self, inp):
return wrapped_named_tup(Pair(inp, 1.2), p2=Pair(3.4, inp))
m = NamedTupMod()
input = torch.rand(3, 4)
ref = m(input)
traced = symbolic_trace(m)
res = traced(input)
self.assertEqual(ref, res)
# Check Pair NamedTuple works when inlined into the function call.
ph = call_func = None
for node in traced.graph.nodes:
if node.op == "placeholder":
ph = node
elif node.op == "call_function" and node.target == wrapped_named_tup:
node.update_arg(0, Pair(ph, 1.2))
node.update_kwarg("p2", Pair(3.4, ph))
call_func = node
break
self.assertTrue(call_func is not None)
self.assertTrue(isinstance(call_func.args[0], Pair))
self.assertTrue(isinstance(call_func.kwargs["p2"], Pair))
self.assertEqual(_format_arg(call_func.args[0]), "Pair(x=%inp, y=1.2)")
self.assertEqual(_format_arg(call_func.kwargs["p2"]), "Pair(x=3.4, y=%inp)")
traced.graph.eliminate_dead_code()
traced.recompile()
res = traced(input)
self.assertEqual(ref, res)
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_fx_stateless(self):
class MockModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(1, 1)
self.register_buffer('buffer', torch.ones(1))
def forward(self, x):
return self.l1(x) + self.buffer
module = MockModule()
x = torch.rand((1, 1))
weight = torch.tensor([[1.0]], requires_grad=True)
bias = torch.tensor([0.0], requires_grad=True)
buffer = torch.tensor([0.0])
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
fx_module = torch.fx.symbolic_trace(module)
res = _stateless.functional_call(fx_module, parameters, x)
res.backward()
self.assertIsNotNone(weight.grad)
self.assertIsNotNone(bias.grad)
self.assertIsNone(buffer.grad)
# Gradient was not calculated for the module stated and buffers
self.assertIsNone(module.l1.weight.grad)
self.assertIsNone(module.l1.bias.grad)
self.assertIsNone(module.buffer.grad)
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
@unittest.skipIf(sys.version_info >= (3, 10), "Does not work on Python-3.10")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_custom_codegen_with_transformer(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
self.assertEqual(nf(vals), f(*vals))
transformed_gm = Transformer(nf).transform()
self.assertEqual(nf(vals), transformed_gm(vals))
def test_interpreter_with_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def generate_output(self, output_args):
return f'return list({repr(output_args)})'
def process_outputs(self, outputs):
return list(outputs)
def f(a, b):
a = a + b
b = a + b
return a, b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
self.assertEqual(Interpreter(nf).run(vals), nf(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def test_deepcopy_tracer(self):
def fn(x, y):
return (x + y).relu().sin()
tracer = Tracer()
tracer_before = copy.deepcopy(tracer)
tracer.trace(fn)
tracer_after = copy.deepcopy(tracer)
self.assertEqual(str(tracer.graph), str(tracer_after.graph))
self.assertTrue(not hasattr(tracer_before, 'graph') or str(tracer.graph) != str(tracer_before.graph))
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
super().setUp()
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
super().tearDown()
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
super().setUp()
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
super().tearDown()
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pad": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout1d": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 11):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
@skipIfSlowGradcheckEnv
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations while tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_resnet50_fpn_v2": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn_v2": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn_v2": PROXY_ITERATED,
"ssd300_vgg16": PROXY_ITERATED,
"fcos_resnet50_fpn": PROXY_ITERATED,
"ssdlite320_mobilenet_v3_large": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, x, kwargs):
def run_test(self):
model = torchvision_models.get_model(name, **kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k in torchvision_models.list_models(module=torchvision_models):
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k in torchvision_models.list_models(module=torchvision_models.segmentation):
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k in torchvision_models.list_models(module=torchvision_models.detection):
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k in torchvision_models.list_models(module=torchvision_models.video):
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112) if k not in {'mvit_v1_b', 'mvit_v2_s'} else torch.rand(1, 3, 16, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_fx.py
|
# Owner(s): ["module: numpy"]
import torch
import numpy as np
from itertools import product
from torch.testing._internal.common_utils import \
(TestCase, run_tests)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, onlyCPU, dtypes, skipMeta)
from torch.testing._internal.common_dtype import all_types_and_complex_and
# For testing handling NumPy objects and sending tensors to / accepting
# arrays from NumPy.
class TestNumPyInterop(TestCase):
# Note: the warning this tests for only appears once per program, so
# other instances of this warning should be addressed to avoid
# the tests depending on the order in which they're run.
@onlyCPU
def test_numpy_non_writeable(self, device):
arr = np.zeros(5)
arr.flags['WRITEABLE'] = False
self.assertWarns(UserWarning, lambda: torch.from_numpy(arr))
@onlyCPU
def test_numpy_unresizable(self, device) -> None:
x = np.zeros((2, 2))
y = torch.from_numpy(x)
with self.assertRaises(ValueError):
x.resize((5, 5))
z = torch.randn(5, 5)
w = z.numpy()
with self.assertRaises(RuntimeError):
z.resize_(10, 10)
with self.assertRaises(ValueError):
w.resize((10, 10))
@onlyCPU
def test_to_numpy(self, device) -> None:
def get_castable_tensor(shape, dtype):
if dtype.is_floating_point:
dtype_info = torch.finfo(dtype)
# can't directly use min and max, because for double, max - min
# is greater than double range and sampling always gives inf.
low = max(dtype_info.min, -1e10)
high = min(dtype_info.max, 1e10)
t = torch.empty(shape, dtype=torch.float64).uniform_(low, high)
else:
# can't directly use min and max, because for int64_t, max - min
# is greater than int64_t range and triggers UB.
low = max(torch.iinfo(dtype).min, int(-1e10))
high = min(torch.iinfo(dtype).max, int(1e10))
t = torch.empty(shape, dtype=torch.int64).random_(low, high)
return t.to(dtype)
dtypes = [
torch.uint8,
torch.int8,
torch.short,
torch.int,
torch.half,
torch.float,
torch.double,
torch.long,
]
for dtp in dtypes:
# 1D
sz = 10
x = get_castable_tensor(sz, dtp)
y = x.numpy()
for i in range(sz):
self.assertEqual(x[i], y[i])
# 1D > 0 storage offset
xm = get_castable_tensor(sz * 2, dtp)
x = xm.narrow(0, sz - 1, sz)
self.assertTrue(x.storage_offset() > 0)
y = x.numpy()
for i in range(sz):
self.assertEqual(x[i], y[i])
def check2d(x, y):
for i in range(sz1):
for j in range(sz2):
self.assertEqual(x[i][j], y[i][j])
# empty
x = torch.tensor([]).to(dtp)
y = x.numpy()
self.assertEqual(y.size, 0)
# contiguous 2D
sz1 = 3
sz2 = 5
x = get_castable_tensor((sz1, sz2), dtp)
y = x.numpy()
check2d(x, y)
self.assertTrue(y.flags['C_CONTIGUOUS'])
# with storage offset
xm = get_castable_tensor((sz1 * 2, sz2), dtp)
x = xm.narrow(0, sz1 - 1, sz1)
y = x.numpy()
self.assertTrue(x.storage_offset() > 0)
check2d(x, y)
self.assertTrue(y.flags['C_CONTIGUOUS'])
# non-contiguous 2D
x = get_castable_tensor((sz2, sz1), dtp).t()
y = x.numpy()
check2d(x, y)
self.assertFalse(y.flags['C_CONTIGUOUS'])
# with storage offset
xm = get_castable_tensor((sz2 * 2, sz1), dtp)
x = xm.narrow(0, sz2 - 1, sz2).t()
y = x.numpy()
self.assertTrue(x.storage_offset() > 0)
check2d(x, y)
# non-contiguous 2D with holes
xm = get_castable_tensor((sz2 * 2, sz1 * 2), dtp)
x = xm.narrow(0, sz2 - 1, sz2).narrow(1, sz1 - 1, sz1).t()
y = x.numpy()
self.assertTrue(x.storage_offset() > 0)
check2d(x, y)
if dtp != torch.half:
# check writeable
x = get_castable_tensor((3, 4), dtp)
y = x.numpy()
self.assertTrue(y.flags.writeable)
y[0][1] = 3
self.assertTrue(x[0][1] == 3)
y = x.t().numpy()
self.assertTrue(y.flags.writeable)
y[0][1] = 3
self.assertTrue(x[0][1] == 3)
def test_to_numpy_bool(self, device) -> None:
x = torch.tensor([True, False], dtype=torch.bool)
self.assertEqual(x.dtype, torch.bool)
y = x.numpy()
self.assertEqual(y.dtype, np.bool_)
for i in range(len(x)):
self.assertEqual(x[i], y[i])
x = torch.tensor([True], dtype=torch.bool)
self.assertEqual(x.dtype, torch.bool)
y = x.numpy()
self.assertEqual(y.dtype, np.bool_)
self.assertEqual(x[0], y[0])
def test_to_numpy_force_argument(self, device) -> None:
for force in [False, True]:
for requires_grad in [False, True]:
for sparse in [False, True]:
for conj in [False, True]:
data = [[1 + 2j, -2 + 3j], [-1 - 2j, 3 - 2j]]
x = torch.tensor(data, requires_grad=requires_grad, device=device)
y = x
if sparse:
if requires_grad:
continue
x = x.to_sparse()
if conj:
x = x.conj()
y = x.resolve_conj()
expect_error = requires_grad or sparse or conj or not device == 'cpu'
error_msg = r"Use (t|T)ensor\..*(\.numpy\(\))?"
if not force and expect_error:
self.assertRaisesRegex((RuntimeError, TypeError), error_msg, lambda: x.numpy())
self.assertRaisesRegex((RuntimeError, TypeError), error_msg, lambda: x.numpy(force=False))
elif force and sparse:
self.assertRaisesRegex(TypeError, error_msg, lambda: x.numpy(force=True))
else:
self.assertEqual(x.numpy(force=force), y)
def test_from_numpy(self, device) -> None:
dtypes = [
np.double,
np.float64,
np.float16,
np.complex64,
np.complex128,
np.int64,
np.int32,
np.int16,
np.int8,
np.uint8,
np.longlong,
np.bool_,
]
complex_dtypes = [
np.complex64,
np.complex128,
]
for dtype in dtypes:
array = np.array([1, 2, 3, 4], dtype=dtype)
tensor_from_array = torch.from_numpy(array)
# TODO: change to tensor equality check once HalfTensor
# implements `==`
for i in range(len(array)):
self.assertEqual(tensor_from_array[i], array[i])
# ufunc 'remainder' not supported for complex dtypes
if dtype not in complex_dtypes:
# This is a special test case for Windows
# https://github.com/pytorch/pytorch/issues/22615
array2 = array % 2
tensor_from_array2 = torch.from_numpy(array2)
for i in range(len(array2)):
self.assertEqual(tensor_from_array2[i], array2[i])
# Test unsupported type
array = np.array([1, 2, 3, 4], dtype=np.uint16)
with self.assertRaises(TypeError):
tensor_from_array = torch.from_numpy(array)
# check storage offset
x = np.linspace(1, 125, 125)
x.shape = (5, 5, 5)
x = x[1]
expected = torch.arange(1, 126, dtype=torch.float64).view(5, 5, 5)[1]
self.assertEqual(torch.from_numpy(x), expected)
# check noncontiguous
x = np.linspace(1, 25, 25)
x.shape = (5, 5)
expected = torch.arange(1, 26, dtype=torch.float64).view(5, 5).t()
self.assertEqual(torch.from_numpy(x.T), expected)
# check noncontiguous with holes
x = np.linspace(1, 125, 125)
x.shape = (5, 5, 5)
x = x[:, 1]
expected = torch.arange(1, 126, dtype=torch.float64).view(5, 5, 5)[:, 1]
self.assertEqual(torch.from_numpy(x), expected)
# check zero dimensional
x = np.zeros((0, 2))
self.assertEqual(torch.from_numpy(x).shape, (0, 2))
x = np.zeros((2, 0))
self.assertEqual(torch.from_numpy(x).shape, (2, 0))
# check ill-sized strides raise exception
x = np.array([3., 5., 8.])
x.strides = (3,)
self.assertRaises(ValueError, lambda: torch.from_numpy(x))
@skipMeta
def test_from_list_of_ndarray_warning(self, device):
warning_msg = r"Creating a tensor from a list of numpy.ndarrays is extremely slow"
with self.assertWarnsOnceRegex(UserWarning, warning_msg):
torch.tensor([np.array([0]), np.array([1])], device=device)
def test_ctor_with_invalid_numpy_array_sequence(self, device):
# Invalid list of numpy array
with self.assertRaisesRegex(ValueError, "expected sequence of length"):
torch.tensor([np.random.random(size=(3, 3)), np.random.random(size=(3, 0))], device=device)
# Invalid list of list of numpy array
with self.assertRaisesRegex(ValueError, "expected sequence of length"):
torch.tensor([[np.random.random(size=(3, 3)), np.random.random(size=(3, 2))]], device=device)
with self.assertRaisesRegex(ValueError, "expected sequence of length"):
torch.tensor([[np.random.random(size=(3, 3)), np.random.random(size=(3, 3))],
[np.random.random(size=(3, 3)), np.random.random(size=(3, 2))]], device=device)
# expected shape is `[1, 2, 3]`, hence we try to iterate over 0-D array
# leading to type error : not a sequence.
with self.assertRaisesRegex(TypeError, "not a sequence"):
torch.tensor([[np.random.random(size=(3)), np.random.random()]], device=device)
# list of list or numpy array.
with self.assertRaisesRegex(ValueError, "expected sequence of length"):
torch.tensor([[1, 2, 3], np.random.random(size=(2,)), ], device=device)
@onlyCPU
def test_ctor_with_numpy_scalar_ctor(self, device) -> None:
dtypes = [
np.double,
np.float64,
np.float16,
np.int64,
np.int32,
np.int16,
np.uint8,
np.bool_,
]
for dtype in dtypes:
self.assertEqual(dtype(42), torch.tensor(dtype(42)).item())
@onlyCPU
def test_numpy_index(self, device):
i = np.array([0, 1, 2], dtype=np.int32)
x = torch.randn(5, 5)
for idx in i:
self.assertFalse(isinstance(idx, int))
self.assertEqual(x[idx], x[int(idx)])
@onlyCPU
def test_numpy_array_interface(self, device):
types = [
torch.DoubleTensor,
torch.FloatTensor,
torch.HalfTensor,
torch.LongTensor,
torch.IntTensor,
torch.ShortTensor,
torch.ByteTensor,
]
dtypes = [
np.float64,
np.float32,
np.float16,
np.int64,
np.int32,
np.int16,
np.uint8,
]
for tp, dtype in zip(types, dtypes):
# Only concrete class can be given where "Type[number[_64Bit]]" is expected
if np.dtype(dtype).kind == 'u': # type: ignore[misc]
# .type expects a XxxTensor, which have no type hints on
# purpose, so ignore during mypy type checking
x = torch.tensor([1, 2, 3, 4]).type(tp) # type: ignore[call-overload]
array = np.array([1, 2, 3, 4], dtype=dtype)
else:
x = torch.tensor([1, -2, 3, -4]).type(tp) # type: ignore[call-overload]
array = np.array([1, -2, 3, -4], dtype=dtype)
# Test __array__ w/o dtype argument
asarray = np.asarray(x)
self.assertIsInstance(asarray, np.ndarray)
self.assertEqual(asarray.dtype, dtype)
for i in range(len(x)):
self.assertEqual(asarray[i], x[i])
# Test __array_wrap__, same dtype
abs_x = np.abs(x)
abs_array = np.abs(array)
self.assertIsInstance(abs_x, tp)
for i in range(len(x)):
self.assertEqual(abs_x[i], abs_array[i])
# Test __array__ with dtype argument
for dtype in dtypes:
x = torch.IntTensor([1, -2, 3, -4])
asarray = np.asarray(x, dtype=dtype)
self.assertEqual(asarray.dtype, dtype)
# Only concrete class can be given where "Type[number[_64Bit]]" is expected
if np.dtype(dtype).kind == 'u': # type: ignore[misc]
wrapped_x = np.array([1, -2, 3, -4], dtype=dtype)
for i in range(len(x)):
self.assertEqual(asarray[i], wrapped_x[i])
else:
for i in range(len(x)):
self.assertEqual(asarray[i], x[i])
# Test some math functions with float types
float_types = [torch.DoubleTensor, torch.FloatTensor]
float_dtypes = [np.float64, np.float32]
for tp, dtype in zip(float_types, float_dtypes):
x = torch.tensor([1, 2, 3, 4]).type(tp) # type: ignore[call-overload]
array = np.array([1, 2, 3, 4], dtype=dtype)
for func in ['sin', 'sqrt', 'ceil']:
ufunc = getattr(np, func)
res_x = ufunc(x)
res_array = ufunc(array)
self.assertIsInstance(res_x, tp)
for i in range(len(x)):
self.assertEqual(res_x[i], res_array[i])
# Test functions with boolean return value
for tp, dtype in zip(types, dtypes):
x = torch.tensor([1, 2, 3, 4]).type(tp) # type: ignore[call-overload]
array = np.array([1, 2, 3, 4], dtype=dtype)
geq2_x = np.greater_equal(x, 2)
geq2_array = np.greater_equal(array, 2).astype('uint8')
self.assertIsInstance(geq2_x, torch.ByteTensor)
for i in range(len(x)):
self.assertEqual(geq2_x[i], geq2_array[i])
@onlyCPU
def test_multiplication_numpy_scalar(self, device) -> None:
for np_dtype in [np.float32, np.float64, np.int32, np.int64, np.int16, np.uint8]:
for t_dtype in [torch.float, torch.double]:
# mypy raises an error when np.floatXY(2.0) is called
# even though this is valid code
np_sc = np_dtype(2.0) # type: ignore[abstract, arg-type]
t = torch.ones(2, requires_grad=True, dtype=t_dtype)
r1 = t * np_sc
self.assertIsInstance(r1, torch.Tensor)
self.assertTrue(r1.dtype == t_dtype)
self.assertTrue(r1.requires_grad)
r2 = np_sc * t
self.assertIsInstance(r2, torch.Tensor)
self.assertTrue(r2.dtype == t_dtype)
self.assertTrue(r2.requires_grad)
@onlyCPU
def test_parse_numpy_int(self, device):
# Only concrete class can be given where "Type[number[_64Bit]]" is expected
self.assertRaisesRegex(RuntimeError, "Overflow",
lambda: torch.mean(torch.randn(1, 1), np.uint64(-1))) # type: ignore[call-overload]
# https://github.com/pytorch/pytorch/issues/29252
for nptype in [np.int16, np.int8, np.uint8, np.int32, np.int64]:
scalar = 3
np_arr = np.array([scalar], dtype=nptype)
np_val = np_arr[0]
# np integral type can be treated as a python int in native functions with
# int parameters:
self.assertEqual(torch.ones(5).diag(scalar), torch.ones(5).diag(np_val))
self.assertEqual(torch.ones([2, 2, 2, 2]).mean(scalar), torch.ones([2, 2, 2, 2]).mean(np_val))
# numpy integral type parses like a python int in custom python bindings:
self.assertEqual(torch.Storage(np_val).size(), scalar) # type: ignore[attr-defined]
tensor = torch.tensor([2], dtype=torch.int)
tensor[0] = np_val
self.assertEqual(tensor[0], np_val)
# Original reported issue, np integral type parses to the correct
# PyTorch integral type when passed for a `Scalar` parameter in
# arithmetic operations:
t = torch.from_numpy(np_arr)
self.assertEqual((t + np_val).dtype, t.dtype)
self.assertEqual((np_val + t).dtype, t.dtype)
def test_has_storage_numpy(self, device):
for dtype in [np.float32, np.float64, np.int64,
np.int32, np.int16, np.uint8]:
arr = np.array([1], dtype=dtype)
self.assertIsNotNone(torch.tensor(arr, device=device, dtype=torch.float32).storage())
self.assertIsNotNone(torch.tensor(arr, device=device, dtype=torch.double).storage())
self.assertIsNotNone(torch.tensor(arr, device=device, dtype=torch.int).storage())
self.assertIsNotNone(torch.tensor(arr, device=device, dtype=torch.long).storage())
self.assertIsNotNone(torch.tensor(arr, device=device, dtype=torch.uint8).storage())
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_numpy_scalar_cmp(self, device, dtype):
if dtype.is_complex:
tensors = (torch.tensor(complex(1, 3), dtype=dtype, device=device),
torch.tensor([complex(1, 3), 0, 2j], dtype=dtype, device=device),
torch.tensor([[complex(3, 1), 0], [-1j, 5]], dtype=dtype, device=device))
else:
tensors = (torch.tensor(3, dtype=dtype, device=device),
torch.tensor([1, 0, -3], dtype=dtype, device=device),
torch.tensor([[3, 0, -1], [3, 5, 4]], dtype=dtype, device=device))
for tensor in tensors:
if dtype == torch.bfloat16:
with self.assertRaises(TypeError):
np_array = tensor.cpu().numpy()
continue
np_array = tensor.cpu().numpy()
for t, a in product((tensor.flatten()[0], tensor.flatten()[0].item()),
(np_array.flatten()[0], np_array.flatten()[0].item())):
self.assertEqual(t, a)
if dtype == torch.complex64 and torch.is_tensor(t) and type(a) == np.complex64:
# TODO: Imaginary part is dropped in this case. Need fix.
# https://github.com/pytorch/pytorch/issues/43579
self.assertFalse(t == a)
else:
self.assertTrue(t == a)
instantiate_device_type_tests(TestNumPyInterop, globals())
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_numpy_interop.py
|
# Owner(s): ["module: tests"]
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_modules import modules, module_db
# For testing TestCase methods and torch.testing functions
class TestTesting(TestCase):
# Ensure that assertEqual handles numpy arrays properly
@dtypes(*all_types_and_complex_and(torch.bool, torch.half))
def test_assertEqual_numpy(self, device, dtype):
S = 10
test_sizes = [
(),
(0,),
(S,),
(S, S),
(0, S),
(S, 0)]
for test_size in test_sizes:
a = make_tensor(test_size, dtype=dtype, device=device, low=-5, high=5)
a_n = a.cpu().numpy()
msg = f'size: {test_size}'
self.assertEqual(a_n, a, rtol=0, atol=0, msg=msg)
self.assertEqual(a, a_n, rtol=0, atol=0, msg=msg)
self.assertEqual(a_n, a_n, rtol=0, atol=0, msg=msg)
def test_assertEqual_longMessage(self):
actual = "actual"
expected = "expected"
long_message = self.longMessage
try:
# Capture the default error message by forcing TestCase.longMessage = False
self.longMessage = False
try:
self.assertEqual(actual, expected)
except AssertionError as error:
default_msg = str(error)
else:
raise AssertionError("AssertionError not raised")
self.longMessage = True
extra_msg = "sentinel"
with self.assertRaisesRegex(AssertionError, re.escape(f"{default_msg} : {extra_msg}")):
self.assertEqual(actual, expected, msg=extra_msg)
finally:
self.longMessage = long_message
def _isclose_helper(self, tests, device, dtype, equal_nan, atol=1e-08, rtol=1e-05):
for test in tests:
a = torch.tensor((test[0],), device=device, dtype=dtype)
b = torch.tensor((test[1],), device=device, dtype=dtype)
actual = torch.isclose(a, b, equal_nan=equal_nan, atol=atol, rtol=rtol)
expected = test[2]
self.assertEqual(actual.item(), expected)
def test_isclose_bool(self, device):
tests = (
(True, True, True),
(False, False, True),
(True, False, False),
(False, True, False),
)
self._isclose_helper(tests, device, torch.bool, False)
@dtypes(torch.uint8,
torch.int8, torch.int16, torch.int32, torch.int64)
def test_isclose_integer(self, device, dtype):
tests = (
(0, 0, True),
(0, 1, False),
(1, 0, False),
)
self._isclose_helper(tests, device, dtype, False)
# atol and rtol tests
tests = [
(0, 1, True),
(1, 0, False),
(1, 3, True),
]
self._isclose_helper(tests, device, dtype, False, atol=.5, rtol=.5)
if dtype is torch.uint8:
tests = [
(-1, 1, False),
(1, -1, False)
]
else:
tests = [
(-1, 1, True),
(1, -1, True)
]
self._isclose_helper(tests, device, dtype, False, atol=1.5, rtol=.5)
@onlyNativeDeviceTypes
@dtypes(torch.float16, torch.float32, torch.float64)
def test_isclose_float(self, device, dtype):
tests = (
(0, 0, True),
(0, -1, False),
(float('inf'), float('inf'), True),
(-float('inf'), float('inf'), False),
(float('inf'), float('nan'), False),
(float('nan'), float('nan'), False),
(0, float('nan'), False),
(1, 1, True),
)
self._isclose_helper(tests, device, dtype, False)
# atol and rtol tests
eps = 1e-2 if dtype is torch.half else 1e-6
tests = (
(0, 1, True),
(0, 1 + eps, False),
(1, 0, False),
(1, 3, True),
(1 - eps, 3, False),
(-.25, .5, True),
(-.25 - eps, .5, False),
(.25, -.5, True),
(.25 + eps, -.5, False),
)
self._isclose_helper(tests, device, dtype, False, atol=.5, rtol=.5)
# equal_nan = True tests
tests = (
(0, float('nan'), False),
(float('inf'), float('nan'), False),
(float('nan'), float('nan'), True),
)
self._isclose_helper(tests, device, dtype, True)
@unittest.skipIf(IS_SANDCASTLE, "Skipping because doesn't work on sandcastle")
@dtypes(torch.complex64, torch.complex128)
def test_isclose_complex(self, device, dtype):
tests = (
(complex(1, 1), complex(1, 1 + 1e-8), True),
(complex(0, 1), complex(1, 1), False),
(complex(1, 1), complex(1, 0), False),
(complex(1, 1), complex(1, float('nan')), False),
(complex(1, float('nan')), complex(1, float('nan')), False),
(complex(1, 1), complex(1, float('inf')), False),
(complex(float('inf'), 1), complex(1, float('inf')), False),
(complex(-float('inf'), 1), complex(1, float('inf')), False),
(complex(-float('inf'), 1), complex(float('inf'), 1), False),
(complex(float('inf'), 1), complex(float('inf'), 1), True),
(complex(float('inf'), 1), complex(float('inf'), 1 + 1e-4), False),
)
self._isclose_helper(tests, device, dtype, False)
# atol and rtol tests
# atol and rtol tests
eps = 1e-6
tests = (
# Complex versions of float tests (real part)
(complex(0, 0), complex(1, 0), True),
(complex(0, 0), complex(1 + eps, 0), False),
(complex(1, 0), complex(0, 0), False),
(complex(1, 0), complex(3, 0), True),
(complex(1 - eps, 0), complex(3, 0), False),
(complex(-.25, 0), complex(.5, 0), True),
(complex(-.25 - eps, 0), complex(.5, 0), False),
(complex(.25, 0), complex(-.5, 0), True),
(complex(.25 + eps, 0), complex(-.5, 0), False),
# Complex versions of float tests (imaginary part)
(complex(0, 0), complex(0, 1), True),
(complex(0, 0), complex(0, 1 + eps), False),
(complex(0, 1), complex(0, 0), False),
(complex(0, 1), complex(0, 3), True),
(complex(0, 1 - eps), complex(0, 3), False),
(complex(0, -.25), complex(0, .5), True),
(complex(0, -.25 - eps), complex(0, .5), False),
(complex(0, .25), complex(0, -.5), True),
(complex(0, .25 + eps), complex(0, -.5), False),
)
self._isclose_helper(tests, device, dtype, False, atol=.5, rtol=.5)
# atol and rtol tests for isclose
tests = (
# Complex-specific tests
(complex(1, -1), complex(-1, 1), False),
(complex(1, -1), complex(2, -2), True),
(complex(-math.sqrt(2), math.sqrt(2)),
complex(-math.sqrt(.5), math.sqrt(.5)), True),
(complex(-math.sqrt(2), math.sqrt(2)),
complex(-math.sqrt(.501), math.sqrt(.499)), False),
(complex(2, 4), complex(1., 8.8523607), True),
(complex(2, 4), complex(1., 8.8523607 + eps), False),
(complex(1, 99), complex(4, 100), True),
)
self._isclose_helper(tests, device, dtype, False, atol=.5, rtol=.5)
# equal_nan = True tests
tests = (
(complex(1, 1), complex(1, float('nan')), False),
(complex(1, 1), complex(float('nan'), 1), False),
(complex(float('nan'), 1), complex(float('nan'), 1), True),
(complex(float('nan'), 1), complex(1, float('nan')), True),
(complex(float('nan'), float('nan')), complex(float('nan'), float('nan')), True),
)
self._isclose_helper(tests, device, dtype, True)
# Tests that isclose with rtol or atol values less than zero throws a
# RuntimeError
@dtypes(torch.bool, torch.uint8,
torch.int8, torch.int16, torch.int32, torch.int64,
torch.float16, torch.float32, torch.float64)
def test_isclose_atol_rtol_greater_than_zero(self, device, dtype):
t = torch.tensor((1,), device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
torch.isclose(t, t, atol=-1, rtol=1)
with self.assertRaises(RuntimeError):
torch.isclose(t, t, atol=1, rtol=-1)
with self.assertRaises(RuntimeError):
torch.isclose(t, t, atol=-1, rtol=-1)
def test_isclose_equality_shortcut(self):
# For values >= 2**53, integers differing by 1 can no longer differentiated by torch.float64 or lower precision
# floating point dtypes. Thus, even with rtol == 0 and atol == 0, these tensors would be considered close if
# they were not compared as integers.
a = torch.tensor(2 ** 53, dtype=torch.int64)
b = a + 1
self.assertFalse(torch.isclose(a, b, rtol=0, atol=0))
@dtypes(torch.float16, torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_isclose_nan_equality_shortcut(self, device, dtype):
if dtype.is_floating_point:
a = b = torch.nan
else:
a = complex(torch.nan, 0)
b = complex(0, torch.nan)
expected = True
tests = [(a, b, expected)]
self._isclose_helper(tests, device, dtype, equal_nan=True, rtol=0, atol=0)
@dtypes(torch.bool, torch.long, torch.float, torch.cfloat)
def test_make_tensor(self, device, dtype):
def check(size, low, high, requires_grad, noncontiguous):
if dtype not in [torch.float, torch.cfloat]:
requires_grad = False
t = make_tensor(size, dtype=dtype, device=device, low=low, high=high,
requires_grad=requires_grad, noncontiguous=noncontiguous)
self.assertEqual(t.shape, size)
self.assertEqual(t.device, torch.device(device))
self.assertEqual(t.dtype, dtype)
low = -9 if low is None else low
high = 9 if high is None else high
if t.numel() > 0 and dtype in [torch.long, torch.float]:
self.assertTrue(t.le(high).logical_and(t.ge(low)).all().item())
self.assertEqual(t.requires_grad, requires_grad)
if t.numel() > 1:
self.assertEqual(t.is_contiguous(), not noncontiguous)
else:
self.assertTrue(t.is_contiguous())
for size in (tuple(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)):
check(size, None, None, False, False)
check(size, 2, 4, True, True)
def test_make_tensor_complex32(self, device):
# verify that we can generate torch.complex32 tensor
t = make_tensor((1, 2, 3), dtype=torch.complex32, device=device)
self.assertEqual(t.dtype, torch.complex32)
# The following tests (test_cuda_assert_*) are added to ensure test suite terminates early
# when CUDA assert was thrown. Because all subsequent test will fail if that happens.
# These tests are slow because it spawn another process to run test suite.
# See: https://github.com/pytorch/pytorch/issues/49019
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support device side asserts")
@onlyCUDA
@slowTest
def test_cuda_assert_should_stop_common_utils_test_suite(self, device):
# test to ensure common_utils.py override has early termination for CUDA.
stderr = TestCase.runWithPytorchAPIUsageStderr("""\
#!/usr/bin/env python3
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, slowTest)
class TestThatContainsCUDAAssertFailure(TestCase):
@slowTest
def test_throw_unrecoverable_cuda_exception(self):
x = torch.rand(10, device='cuda')
# cause unrecoverable CUDA exception, recoverable on CPU
y = x[torch.tensor([25])].cpu()
@slowTest
def test_trivial_passing_test_case_on_cpu_cuda(self):
x1 = torch.tensor([0., 1.], device='cuda')
x2 = torch.tensor([0., 1.], device='cpu')
self.assertEqual(x1, x2)
if __name__ == '__main__':
run_tests()
""")
# should capture CUDA error
self.assertIn('CUDA error: device-side assert triggered', stderr)
# should run only 1 test because it throws unrecoverable error.
self.assertIn('errors=1', stderr)
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support device side asserts")
@onlyCUDA
@slowTest
def test_cuda_assert_should_stop_common_device_type_test_suite(self, device):
# test to ensure common_device_type.py override has early termination for CUDA.
stderr = TestCase.runWithPytorchAPIUsageStderr("""\
#!/usr/bin/env python3
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, slowTest)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
class TestThatContainsCUDAAssertFailure(TestCase):
@slowTest
def test_throw_unrecoverable_cuda_exception(self, device):
x = torch.rand(10, device=device)
# cause unrecoverable CUDA exception, recoverable on CPU
y = x[torch.tensor([25])].cpu()
@slowTest
def test_trivial_passing_test_case_on_cpu_cuda(self, device):
x1 = torch.tensor([0., 1.], device=device)
x2 = torch.tensor([0., 1.], device='cpu')
self.assertEqual(x1, x2)
instantiate_device_type_tests(
TestThatContainsCUDAAssertFailure,
globals(),
only_for='cuda'
)
if __name__ == '__main__':
run_tests()
""")
# should capture CUDA error
self.assertIn('CUDA error: device-side assert triggered', stderr)
# should run only 1 test because it throws unrecoverable error.
self.assertIn('errors=1', stderr)
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support device side asserts")
@onlyCUDA
@slowTest
def test_cuda_assert_should_not_stop_common_distributed_test_suite(self, device):
# test to ensure common_distributed.py override should not early terminate CUDA.
stderr = TestCase.runWithPytorchAPIUsageStderr("""\
#!/usr/bin/env python3
import torch
from torch.testing._internal.common_utils import (run_tests, slowTest)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_distributed import MultiProcessTestCase
class TestThatContainsCUDAAssertFailure(MultiProcessTestCase):
@slowTest
def test_throw_unrecoverable_cuda_exception(self, device):
x = torch.rand(10, device=device)
# cause unrecoverable CUDA exception, recoverable on CPU
y = x[torch.tensor([25])].cpu()
@slowTest
def test_trivial_passing_test_case_on_cpu_cuda(self, device):
x1 = torch.tensor([0., 1.], device=device)
x2 = torch.tensor([0., 1.], device='cpu')
self.assertEqual(x1, x2)
instantiate_device_type_tests(
TestThatContainsCUDAAssertFailure,
globals(),
only_for='cuda'
)
if __name__ == '__main__':
run_tests()
""")
# we are currently disabling CUDA early termination for distributed tests.
self.assertIn('errors=2', stderr)
@expectedFailureMeta # This is only supported for CPU and CUDA
@onlyNativeDeviceTypes
def test_get_supported_dtypes(self, device):
# Test the `get_supported_dtypes` helper function.
# We acquire the dtypes for few Ops dynamically and verify them against
# the correct statically described values.
ops_to_test = list(filter(lambda op: op.name in ['atan2', 'topk', 'xlogy'], op_db))
for op in ops_to_test:
dynamic_dtypes = opinfo.utils.get_supported_dtypes(op, op.sample_inputs_func, self.device_type)
dynamic_dispatch = opinfo.utils.dtypes_dispatch_hint(dynamic_dtypes)
if self.device_type == 'cpu':
dtypes = op.dtypes
else: # device_type ='cuda'
dtypes = op.dtypesIfCUDA
self.assertTrue(set(dtypes) == set(dynamic_dtypes))
self.assertTrue(set(dtypes) == set(dynamic_dispatch.dispatch_fn()))
instantiate_device_type_tests(TestTesting, globals())
class TestFrameworkUtils(TestCase):
@skipIfRocm
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
@unittest.skipIf(IS_SANDCASTLE, "Skipping because doesn't work on sandcastle")
def test_filtering_env_var(self):
# Test environment variable selected device type test generator.
test_filter_file_template = """\
#!/usr/bin/env python3
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
class TestEnvironmentVariable(TestCase):
def test_trivial_passing_test(self, device):
x1 = torch.tensor([0., 1.], device=device)
x2 = torch.tensor([0., 1.], device='cpu')
self.assertEqual(x1, x2)
instantiate_device_type_tests(
TestEnvironmentVariable,
globals(),
)
if __name__ == '__main__':
run_tests()
"""
test_bases_count = len(get_device_type_test_bases())
# Test without setting env var should run everything.
env = dict(os.environ)
for k in ['CI', PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY]:
if k in env.keys():
del env[k]
_, stderr = TestCase.run_process_no_exception(test_filter_file_template, env=env)
self.assertIn(f'Ran {test_bases_count} test', stderr.decode('ascii'))
# Test with setting only_for should only run 1 test.
env[PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY] = 'cpu'
_, stderr = TestCase.run_process_no_exception(test_filter_file_template, env=env)
self.assertIn('Ran 1 test', stderr.decode('ascii'))
# Test with setting except_for should run 1 less device type from default.
del env[PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY]
env[PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY] = 'cpu'
_, stderr = TestCase.run_process_no_exception(test_filter_file_template, env=env)
self.assertIn(f'Ran {test_bases_count-1} test', stderr.decode('ascii'))
# Test with setting both should throw exception
env[PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY] = 'cpu'
_, stderr = TestCase.run_process_no_exception(test_filter_file_template, env=env)
self.assertNotIn('OK', stderr.decode('ascii'))
def make_assert_close_inputs(actual: Any, expected: Any) -> List[Tuple[Any, Any]]:
"""Makes inputs for :func:`torch.testing.assert_close` functions based on two examples.
Args:
actual (Any): Actual input.
expected (Any): Expected input.
Returns:
List[Tuple[Any, Any]]: Pair of example inputs, as well as the example inputs wrapped in sequences
(:class:`tuple`, :class:`list`), and mappings (:class:`dict`, :class:`~collections.OrderedDict`).
"""
return [
(actual, expected),
# tuple vs. tuple
((actual,), (expected,)),
# list vs. list
([actual], [expected]),
# tuple vs. list
((actual,), [expected]),
# dict vs. dict
({"t": actual}, {"t": expected}),
# OrderedDict vs. OrderedDict
(collections.OrderedDict([("t", actual)]), collections.OrderedDict([("t", expected)])),
# dict vs. OrderedDict
({"t": actual}, collections.OrderedDict([("t", expected)])),
# list of tuples vs. tuple of lists
([(actual,)], ([expected],)),
# list of dicts vs. tuple of OrderedDicts
([{"t": actual}], (collections.OrderedDict([("t", expected)]),)),
# dict of lists vs. OrderedDict of tuples
({"t": [actual]}, collections.OrderedDict([("t", (expected,))])),
]
def assert_close_with_inputs(actual: Any, expected: Any) -> Iterator[Callable]:
"""Yields :func:`torch.testing.assert_close` with predefined positional inputs based on two examples.
.. note::
Every test that does not test for a specific input should iterate over this to maximize the coverage.
Args:
actual (Any): Actual input.
expected (Any): Expected input.
Yields:
Callable: :func:`torch.testing.assert_close` with predefined positional inputs.
"""
for inputs in make_assert_close_inputs(actual, expected):
yield functools.partial(torch.testing.assert_close, *inputs)
class TestAssertClose(TestCase):
def test_mismatching_types_subclasses(self):
actual = torch.rand(())
expected = torch.nn.Parameter(actual)
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_mismatching_types_type_equality(self):
actual = torch.empty(())
expected = torch.nn.Parameter(actual)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(TypeError, str(type(expected))):
fn(allow_subclasses=False)
def test_mismatching_types(self):
actual = torch.empty(2)
expected = actual.numpy()
for fn, allow_subclasses in itertools.product(assert_close_with_inputs(actual, expected), (True, False)):
with self.assertRaisesRegex(TypeError, str(type(expected))):
fn(allow_subclasses=allow_subclasses)
def test_unknown_type(self):
actual = "0"
expected = "0"
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(TypeError, str(type(actual))):
fn()
def test_mismatching_shape(self):
actual = torch.empty(())
expected = actual.clone().reshape((1,))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, "shape"):
fn()
@unittest.skipIf(not torch.backends.mkldnn.is_available(), reason="MKLDNN is not available.")
def test_unknown_layout(self):
actual = torch.empty((2, 2))
expected = actual.to_mkldnn()
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(ValueError, "layout"):
fn()
def test_meta(self):
actual = torch.empty((2, 2), device="meta")
expected = torch.empty((2, 2), device="meta")
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_mismatching_layout(self):
strided = torch.empty((2, 2))
sparse_coo = strided.to_sparse()
sparse_csr = strided.to_sparse_csr()
for actual, expected in itertools.combinations((strided, sparse_coo, sparse_csr), 2):
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, "layout"):
fn()
def test_mismatching_layout_no_check(self):
strided = torch.randn((2, 2))
sparse_coo = strided.to_sparse()
sparse_csr = strided.to_sparse_csr()
for actual, expected in itertools.combinations((strided, sparse_coo, sparse_csr), 2):
for fn in assert_close_with_inputs(actual, expected):
fn(check_layout=False)
def test_mismatching_dtype(self):
actual = torch.empty((), dtype=torch.float)
expected = actual.clone().to(torch.int)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, "dtype"):
fn()
def test_mismatching_dtype_no_check(self):
actual = torch.ones((), dtype=torch.float)
expected = actual.clone().to(torch.int)
for fn in assert_close_with_inputs(actual, expected):
fn(check_dtype=False)
def test_mismatching_stride(self):
actual = torch.empty((2, 2))
expected = torch.as_strided(actual.clone().t().contiguous(), actual.shape, actual.stride()[::-1])
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, "stride"):
fn(check_stride=True)
def test_mismatching_stride_no_check(self):
actual = torch.rand((2, 2))
expected = torch.as_strided(actual.clone().t().contiguous(), actual.shape, actual.stride()[::-1])
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_only_rtol(self):
actual = torch.empty(())
expected = actual.clone()
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaises(ValueError):
fn(rtol=0.0)
def test_only_atol(self):
actual = torch.empty(())
expected = actual.clone()
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaises(ValueError):
fn(atol=0.0)
def test_mismatching_values(self):
actual = torch.tensor(1)
expected = torch.tensor(2)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaises(AssertionError):
fn()
def test_mismatching_values_rtol(self):
eps = 1e-3
actual = torch.tensor(1.0)
expected = torch.tensor(1.0 + eps)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaises(AssertionError):
fn(rtol=eps / 2, atol=0.0)
def test_mismatching_values_atol(self):
eps = 1e-3
actual = torch.tensor(0.0)
expected = torch.tensor(eps)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaises(AssertionError):
fn(rtol=0.0, atol=eps / 2)
def test_matching(self):
actual = torch.tensor(1.0)
expected = actual.clone()
torch.testing.assert_close(actual, expected)
def test_matching_rtol(self):
eps = 1e-3
actual = torch.tensor(1.0)
expected = torch.tensor(1.0 + eps)
for fn in assert_close_with_inputs(actual, expected):
fn(rtol=eps * 2, atol=0.0)
def test_matching_atol(self):
eps = 1e-3
actual = torch.tensor(0.0)
expected = torch.tensor(eps)
for fn in assert_close_with_inputs(actual, expected):
fn(rtol=0.0, atol=eps * 2)
# TODO: the code that this test was designed for was removed in https://github.com/pytorch/pytorch/pull/56058
# We need to check if this test is still needed or if this behavior is now enabled by default.
def test_matching_conjugate_bit(self):
actual = torch.tensor(complex(1, 1)).conj()
expected = torch.tensor(complex(1, -1))
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_matching_nan(self):
nan = float("NaN")
tests = (
(nan, nan),
(complex(nan, 0), complex(0, nan)),
(complex(nan, nan), complex(nan, 0)),
(complex(nan, nan), complex(nan, nan)),
)
for actual, expected in tests:
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaises(AssertionError):
fn()
def test_matching_nan_with_equal_nan(self):
nan = float("NaN")
tests = (
(nan, nan),
(complex(nan, 0), complex(0, nan)),
(complex(nan, nan), complex(nan, 0)),
(complex(nan, nan), complex(nan, nan)),
)
for actual, expected in tests:
for fn in assert_close_with_inputs(actual, expected):
fn(equal_nan=True)
def test_numpy(self):
tensor = torch.rand(2, 2, dtype=torch.float32)
actual = tensor.numpy()
expected = actual.copy()
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_scalar(self):
number = torch.randint(10, size=()).item()
for actual, expected in itertools.product((int(number), float(number), complex(number)), repeat=2):
check_dtype = type(actual) is type(expected)
for fn in assert_close_with_inputs(actual, expected):
fn(check_dtype=check_dtype)
def test_bool(self):
actual = torch.tensor([True, False])
expected = actual.clone()
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_none(self):
actual = expected = None
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_none_mismatch(self):
expected = None
for actual in (False, 0, torch.nan, torch.tensor(torch.nan)):
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaises(AssertionError):
fn()
def test_docstring_examples(self):
finder = doctest.DocTestFinder(verbose=False)
runner = doctest.DocTestRunner(verbose=False, optionflags=doctest.NORMALIZE_WHITESPACE)
globs = dict(torch=torch)
doctests = finder.find(torch.testing.assert_close, globs=globs)[0]
failures = []
runner.run(doctests, out=lambda report: failures.append(report))
if failures:
raise AssertionError(f"Doctest found {len(failures)} failures:\n\n" + "\n".join(failures))
def test_default_tolerance_selection_mismatching_dtypes(self):
# If the default tolerances where selected based on the promoted dtype, i.e. float64,
# these tensors wouldn't be considered close.
actual = torch.tensor(0.99, dtype=torch.bfloat16)
expected = torch.tensor(1.0, dtype=torch.float64)
for fn in assert_close_with_inputs(actual, expected):
fn(check_dtype=False)
class UnexpectedException(Exception):
"""The only purpose of this exception is to test ``assert_close``'s handling of unexpected exceptions. Thus,
the test should mock a component to raise this instead of the regular behavior. We avoid using a builtin
exception here to avoid triggering possible handling of them.
"""
pass
@unittest.mock.patch("torch.testing._comparison.TensorLikePair.__init__", side_effect=UnexpectedException)
def test_unexpected_error_originate(self, _):
actual = torch.tensor(1.0)
expected = actual.clone()
with self.assertRaisesRegex(RuntimeError, "unexpected exception"):
torch.testing.assert_close(actual, expected)
@unittest.mock.patch("torch.testing._comparison.TensorLikePair.compare", side_effect=UnexpectedException)
def test_unexpected_error_compare(self, _):
actual = torch.tensor(1.0)
expected = actual.clone()
with self.assertRaisesRegex(RuntimeError, "unexpected exception"):
torch.testing.assert_close(actual, expected)
class TestAssertCloseMultiDevice(TestCase):
@deviceCountAtLeast(1)
def test_mismatching_device(self, devices):
for actual_device, expected_device in itertools.permutations(("cpu", *devices), 2):
actual = torch.empty((), device=actual_device)
expected = actual.clone().to(expected_device)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, "device"):
fn()
@deviceCountAtLeast(1)
def test_mismatching_device_no_check(self, devices):
for actual_device, expected_device in itertools.permutations(("cpu", *devices), 2):
actual = torch.rand((), device=actual_device)
expected = actual.clone().to(expected_device)
for fn in assert_close_with_inputs(actual, expected):
fn(check_device=False)
instantiate_device_type_tests(TestAssertCloseMultiDevice, globals(), only_for="cuda")
class TestAssertCloseErrorMessage(TestCase):
def test_identifier_tensor_likes(self):
actual = torch.tensor([1, 2, 3, 4])
expected = torch.tensor([1, 2, 5, 6])
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Tensor-likes")):
fn()
def test_identifier_scalars(self):
actual = 3
expected = 5
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Scalars")):
fn()
def test_not_equal(self):
actual = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
expected = torch.tensor([1, 2, 5, 6], dtype=torch.float32)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("not equal")):
fn(rtol=0.0, atol=0.0)
def test_not_close(self):
actual = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
expected = torch.tensor([1, 2, 5, 6], dtype=torch.float32)
for fn, (rtol, atol) in itertools.product(
assert_close_with_inputs(actual, expected), ((1.3e-6, 0.0), (0.0, 1e-5), (1.3e-6, 1e-5))
):
with self.assertRaisesRegex(AssertionError, re.escape("not close")):
fn(rtol=rtol, atol=atol)
def test_mismatched_elements(self):
actual = torch.tensor([1, 2, 3, 4])
expected = torch.tensor([1, 2, 5, 6])
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Mismatched elements: 2 / 4 (50.0%)")):
fn()
def test_abs_diff(self):
actual = torch.tensor([[1, 2], [3, 4]])
expected = torch.tensor([[1, 2], [5, 4]])
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Greatest absolute difference: 2 at index (1, 0)")):
fn()
def test_abs_diff_scalar(self):
actual = 3
expected = 5
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Absolute difference: 2")):
fn()
def test_rel_diff(self):
actual = torch.tensor([[1, 2], [3, 4]])
expected = torch.tensor([[1, 4], [3, 4]])
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Greatest relative difference: 0.5 at index (0, 1)")):
fn()
def test_rel_diff_scalar(self):
actual = 2
expected = 4
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Relative difference: 0.5")):
fn()
def test_zero_div_zero(self):
actual = torch.tensor([1.0, 0.0])
expected = torch.tensor([2.0, 0.0])
for fn in assert_close_with_inputs(actual, expected):
# Although it looks complicated, this regex just makes sure that the word 'nan' is not part of the error
# message. That would happen if the 0 / 0 is used for the mismatch computation although it matches.
with self.assertRaisesRegex(AssertionError, "((?!nan).)*"):
fn()
def test_rtol(self):
rtol = 1e-3
actual = torch.tensor((1, 2))
expected = torch.tensor((2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape(f"(up to {rtol} allowed)")):
fn(rtol=rtol, atol=0.0)
def test_atol(self):
atol = 1e-3
actual = torch.tensor((1, 2))
expected = torch.tensor((2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape(f"(up to {atol} allowed)")):
fn(rtol=0.0, atol=atol)
def test_msg_str(self):
msg = "Custom error message!"
actual = torch.tensor(1)
expected = torch.tensor(2)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, msg):
fn(msg=msg)
def test_msg_callable(self):
msg = "Custom error message"
actual = torch.tensor(1)
expected = torch.tensor(2)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, msg):
fn(msg=lambda _: msg)
class TestAssertCloseContainer(TestCase):
def test_sequence_mismatching_len(self):
actual = (torch.empty(()),)
expected = ()
with self.assertRaises(AssertionError):
torch.testing.assert_close(actual, expected)
def test_sequence_mismatching_values_msg(self):
t1 = torch.tensor(1)
t2 = torch.tensor(2)
actual = (t1, t1)
expected = (t1, t2)
with self.assertRaisesRegex(AssertionError, re.escape("item [1]")):
torch.testing.assert_close(actual, expected)
def test_mapping_mismatching_keys(self):
actual = {"a": torch.empty(())}
expected = {}
with self.assertRaises(AssertionError):
torch.testing.assert_close(actual, expected)
def test_mapping_mismatching_values_msg(self):
t1 = torch.tensor(1)
t2 = torch.tensor(2)
actual = {"a": t1, "b": t1}
expected = {"a": t1, "b": t2}
with self.assertRaisesRegex(AssertionError, re.escape("item ['b']")):
torch.testing.assert_close(actual, expected)
class TestAssertCloseSparseCOO(TestCase):
def test_matching_coalesced(self):
indices = (
(0, 1),
(1, 0),
)
values = (1, 2)
actual = torch.sparse_coo_tensor(indices, values, size=(2, 2)).coalesce()
expected = actual.clone()
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_matching_uncoalesced(self):
indices = (
(0, 1),
(1, 0),
)
values = (1, 2)
actual = torch.sparse_coo_tensor(indices, values, size=(2, 2))
expected = actual.clone()
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_mismatching_sparse_dims(self):
t = torch.randn(2, 3, 4)
actual = t.to_sparse()
expected = t.to_sparse(2)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("number of sparse dimensions in sparse COO tensors")):
fn()
def test_mismatching_nnz(self):
actual_indices = (
(0, 1),
(1, 0),
)
actual_values = (1, 2)
actual = torch.sparse_coo_tensor(actual_indices, actual_values, size=(2, 2))
expected_indices = (
(0, 1, 1,),
(1, 0, 0,),
)
expected_values = (1, 1, 1)
expected = torch.sparse_coo_tensor(expected_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("number of specified values in sparse COO tensors")):
fn()
def test_mismatching_indices_msg(self):
actual_indices = (
(0, 1),
(1, 0),
)
actual_values = (1, 2)
actual = torch.sparse_coo_tensor(actual_indices, actual_values, size=(2, 2))
expected_indices = (
(0, 1),
(1, 1),
)
expected_values = (1, 2)
expected = torch.sparse_coo_tensor(expected_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse COO indices")):
fn()
def test_mismatching_values_msg(self):
actual_indices = (
(0, 1),
(1, 0),
)
actual_values = (1, 2)
actual = torch.sparse_coo_tensor(actual_indices, actual_values, size=(2, 2))
expected_indices = (
(0, 1),
(1, 0),
)
expected_values = (1, 3)
expected = torch.sparse_coo_tensor(expected_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse COO values")):
fn()
@unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Not all sandcastle jobs support CSR testing")
class TestAssertCloseSparseCSR(TestCase):
def test_matching(self):
crow_indices = (0, 1, 2)
col_indices = (1, 0)
values = (1, 2)
actual = torch.sparse_csr_tensor(crow_indices, col_indices, values, size=(2, 2))
expected = actual.clone()
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_mismatching_crow_indices_msg(self):
actual_crow_indices = (0, 1, 2)
actual_col_indices = (0, 1)
actual_values = (1, 2)
actual = torch.sparse_csr_tensor(actual_crow_indices, actual_col_indices, actual_values, size=(2, 2))
expected_crow_indices = (0, 2, 2)
expected_col_indices = actual_col_indices
expected_values = actual_values
expected = torch.sparse_csr_tensor(expected_crow_indices, expected_col_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse CSR crow_indices")):
fn()
def test_mismatching_col_indices_msg(self):
actual_crow_indices = (0, 1, 2)
actual_col_indices = (1, 0)
actual_values = (1, 2)
actual = torch.sparse_csr_tensor(actual_crow_indices, actual_col_indices, actual_values, size=(2, 2))
expected_crow_indices = actual_crow_indices
expected_col_indices = (1, 1)
expected_values = actual_values
expected = torch.sparse_csr_tensor(expected_crow_indices, expected_col_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse CSR col_indices")):
fn()
def test_mismatching_values_msg(self):
actual_crow_indices = (0, 1, 2)
actual_col_indices = (1, 0)
actual_values = (1, 2)
actual = torch.sparse_csr_tensor(actual_crow_indices, actual_col_indices, actual_values, size=(2, 2))
expected_crow_indices = actual_crow_indices
expected_col_indices = actual_col_indices
expected_values = (1, 3)
expected = torch.sparse_csr_tensor(expected_crow_indices, expected_col_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse CSR values")):
fn()
@unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Not all sandcastle jobs support CSC testing")
class TestAssertCloseSparseCSC(TestCase):
def test_matching(self):
ccol_indices = (0, 1, 2)
row_indices = (1, 0)
values = (1, 2)
actual = torch.sparse_csc_tensor(ccol_indices, row_indices, values, size=(2, 2))
expected = actual.clone()
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_mismatching_ccol_indices_msg(self):
actual_ccol_indices = (0, 1, 2)
actual_row_indices = (0, 1)
actual_values = (1, 2)
actual = torch.sparse_csc_tensor(actual_ccol_indices, actual_row_indices, actual_values, size=(2, 2))
expected_ccol_indices = (0, 2, 2)
expected_row_indices = actual_row_indices
expected_values = actual_values
expected = torch.sparse_csc_tensor(expected_ccol_indices, expected_row_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse CSC ccol_indices")):
fn()
def test_mismatching_row_indices_msg(self):
actual_ccol_indices = (0, 1, 2)
actual_row_indices = (1, 0)
actual_values = (1, 2)
actual = torch.sparse_csc_tensor(actual_ccol_indices, actual_row_indices, actual_values, size=(2, 2))
expected_ccol_indices = actual_ccol_indices
expected_row_indices = (1, 1)
expected_values = actual_values
expected = torch.sparse_csc_tensor(expected_ccol_indices, expected_row_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse CSC row_indices")):
fn()
def test_mismatching_values_msg(self):
actual_ccol_indices = (0, 1, 2)
actual_row_indices = (1, 0)
actual_values = (1, 2)
actual = torch.sparse_csc_tensor(actual_ccol_indices, actual_row_indices, actual_values, size=(2, 2))
expected_ccol_indices = actual_ccol_indices
expected_row_indices = actual_row_indices
expected_values = (1, 3)
expected = torch.sparse_csc_tensor(expected_ccol_indices, expected_row_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse CSC values")):
fn()
@unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Not all sandcastle jobs support BSR testing")
class TestAssertCloseSparseBSR(TestCase):
def test_matching(self):
crow_indices = (0, 1, 2)
col_indices = (1, 0)
values = ([[1]], [[2]])
actual = torch.sparse_bsr_tensor(crow_indices, col_indices, values, size=(2, 2))
expected = actual.clone()
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_mismatching_crow_indices_msg(self):
actual_crow_indices = (0, 1, 2)
actual_col_indices = (0, 1)
actual_values = ([[1]], [[2]])
actual = torch.sparse_bsr_tensor(actual_crow_indices, actual_col_indices, actual_values, size=(2, 2))
expected_crow_indices = (0, 2, 2)
expected_col_indices = actual_col_indices
expected_values = actual_values
expected = torch.sparse_bsr_tensor(expected_crow_indices, expected_col_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse BSR crow_indices")):
fn()
def test_mismatching_col_indices_msg(self):
actual_crow_indices = (0, 1, 2)
actual_col_indices = (1, 0)
actual_values = ([[1]], [[2]])
actual = torch.sparse_bsr_tensor(actual_crow_indices, actual_col_indices, actual_values, size=(2, 2))
expected_crow_indices = actual_crow_indices
expected_col_indices = (1, 1)
expected_values = actual_values
expected = torch.sparse_bsr_tensor(expected_crow_indices, expected_col_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse BSR col_indices")):
fn()
def test_mismatching_values_msg(self):
actual_crow_indices = (0, 1, 2)
actual_col_indices = (1, 0)
actual_values = ([[1]], [[2]])
actual = torch.sparse_bsr_tensor(actual_crow_indices, actual_col_indices, actual_values, size=(2, 2))
expected_crow_indices = actual_crow_indices
expected_col_indices = actual_col_indices
expected_values = ([[1]], [[3]])
expected = torch.sparse_bsr_tensor(expected_crow_indices, expected_col_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse BSR values")):
fn()
@unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Not all sandcastle jobs support BSC testing")
class TestAssertCloseSparseBSC(TestCase):
def test_matching(self):
ccol_indices = (0, 1, 2)
row_indices = (1, 0)
values = ([[1]], [[2]])
actual = torch.sparse_bsc_tensor(ccol_indices, row_indices, values, size=(2, 2))
expected = actual.clone()
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_mismatching_ccol_indices_msg(self):
actual_ccol_indices = (0, 1, 2)
actual_row_indices = (0, 1)
actual_values = ([[1]], [[2]])
actual = torch.sparse_bsc_tensor(actual_ccol_indices, actual_row_indices, actual_values, size=(2, 2))
expected_ccol_indices = (0, 2, 2)
expected_row_indices = actual_row_indices
expected_values = actual_values
expected = torch.sparse_bsc_tensor(expected_ccol_indices, expected_row_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse BSC ccol_indices")):
fn()
def test_mismatching_row_indices_msg(self):
actual_ccol_indices = (0, 1, 2)
actual_row_indices = (1, 0)
actual_values = ([[1]], [[2]])
actual = torch.sparse_bsc_tensor(actual_ccol_indices, actual_row_indices, actual_values, size=(2, 2))
expected_ccol_indices = actual_ccol_indices
expected_row_indices = (1, 1)
expected_values = actual_values
expected = torch.sparse_bsc_tensor(expected_ccol_indices, expected_row_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse BSC row_indices")):
fn()
def test_mismatching_values_msg(self):
actual_ccol_indices = (0, 1, 2)
actual_row_indices = (1, 0)
actual_values = ([[1]], [[2]])
actual = torch.sparse_bsc_tensor(actual_ccol_indices, actual_row_indices, actual_values, size=(2, 2))
expected_ccol_indices = actual_ccol_indices
expected_row_indices = actual_row_indices
expected_values = ([[1]], [[3]])
expected = torch.sparse_bsc_tensor(expected_ccol_indices, expected_row_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse BSC values")):
fn()
class TestAssertCloseQuantized(TestCase):
def test_mismatching_is_quantized(self):
actual = torch.tensor(1.0)
expected = torch.quantize_per_tensor(actual, scale=1.0, zero_point=0, dtype=torch.qint32)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, "is_quantized"):
fn()
def test_mismatching_qscheme(self):
t = torch.tensor((1.0,))
actual = torch.quantize_per_tensor(t, scale=1.0, zero_point=0, dtype=torch.qint32)
expected = torch.quantize_per_channel(
t,
scales=torch.tensor((1.0,)),
zero_points=torch.tensor((0,)),
axis=0,
dtype=torch.qint32,
)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, "qscheme"):
fn()
def test_matching_per_tensor(self):
actual = torch.quantize_per_tensor(torch.tensor(1.0), scale=1.0, zero_point=0, dtype=torch.qint32)
expected = actual.clone()
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_matching_per_channel(self):
actual = torch.quantize_per_channel(
torch.tensor((1.0,)),
scales=torch.tensor((1.0,)),
zero_points=torch.tensor((0,)),
axis=0,
dtype=torch.qint32,
)
expected = actual.clone()
for fn in assert_close_with_inputs(actual, expected):
fn()
def _get_test_names_for_test_class(test_cls):
""" Convenience function to get all test names for a given test class. """
test_names = ['{}.{}'.format(test_cls.__name__, key) for key in test_cls.__dict__
if key.startswith('test_')]
return sorted(test_names)
class TestTestParametrization(TestCase):
def test_default_names(self):
class TestParametrized(TestCase):
@parametrize("x", range(5))
def test_default_names(self, x):
pass
@parametrize("x,y", [(1, 2), (2, 3), (3, 4)])
def test_two_things_default_names(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
expected_test_names = [
'TestParametrized.test_default_names_x_0',
'TestParametrized.test_default_names_x_1',
'TestParametrized.test_default_names_x_2',
'TestParametrized.test_default_names_x_3',
'TestParametrized.test_default_names_x_4',
'TestParametrized.test_two_things_default_names_x_1_y_2',
'TestParametrized.test_two_things_default_names_x_2_y_3',
'TestParametrized.test_two_things_default_names_x_3_y_4',
]
test_names = _get_test_names_for_test_class(TestParametrized)
self.assertEqual(expected_test_names, test_names)
def test_name_fn(self):
class TestParametrized(TestCase):
@parametrize("bias", [False, True], name_fn=lambda b: 'bias' if b else 'no_bias')
def test_custom_names(self, bias):
pass
@parametrize("x", [1, 2], name_fn=str)
@parametrize("y", [3, 4], name_fn=str)
@parametrize("z", [5, 6], name_fn=str)
def test_three_things_composition_custom_names(self, x, y, z):
pass
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: '{}__{}'.format(x, y))
def test_two_things_custom_names_alternate(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
expected_test_names = [
'TestParametrized.test_custom_names_bias',
'TestParametrized.test_custom_names_no_bias',
'TestParametrized.test_three_things_composition_custom_names_1_3_5',
'TestParametrized.test_three_things_composition_custom_names_1_3_6',
'TestParametrized.test_three_things_composition_custom_names_1_4_5',
'TestParametrized.test_three_things_composition_custom_names_1_4_6',
'TestParametrized.test_three_things_composition_custom_names_2_3_5',
'TestParametrized.test_three_things_composition_custom_names_2_3_6',
'TestParametrized.test_three_things_composition_custom_names_2_4_5',
'TestParametrized.test_three_things_composition_custom_names_2_4_6',
'TestParametrized.test_two_things_custom_names_alternate_1__2',
'TestParametrized.test_two_things_custom_names_alternate_1__3',
'TestParametrized.test_two_things_custom_names_alternate_1__4',
]
test_names = _get_test_names_for_test_class(TestParametrized)
self.assertEqual(expected_test_names, test_names)
def test_subtest_names(self):
class TestParametrized(TestCase):
@parametrize("bias", [subtest(True, name='bias'),
subtest(False, name='no_bias')])
def test_custom_names(self, bias):
pass
@parametrize("x,y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple'),
subtest((1, 4), name='quadruple')])
def test_two_things_custom_names(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
expected_test_names = [
'TestParametrized.test_custom_names_bias',
'TestParametrized.test_custom_names_no_bias',
'TestParametrized.test_two_things_custom_names_double',
'TestParametrized.test_two_things_custom_names_quadruple',
'TestParametrized.test_two_things_custom_names_triple',
]
test_names = _get_test_names_for_test_class(TestParametrized)
self.assertEqual(expected_test_names, test_names)
def test_modules_decorator_misuse_error(self):
# Test that @modules errors out when used with instantiate_parametrized_tests().
class TestParametrized(TestCase):
@modules(module_db)
def test_modules(self, module_info):
pass
with self.assertRaisesRegex(RuntimeError, 'intended to be used in a device-specific context'):
instantiate_parametrized_tests(TestParametrized)
def test_ops_decorator_misuse_error(self):
# Test that @modules errors out when used with instantiate_parametrized_tests().
class TestParametrized(TestCase):
@ops(op_db)
def test_ops(self, module_info):
pass
with self.assertRaisesRegex(RuntimeError, 'intended to be used in a device-specific context'):
instantiate_parametrized_tests(TestParametrized)
def test_multiple_handling_of_same_param_error(self):
# Test that multiple decorators handling the same param errors out.
class TestParametrized(TestCase):
@parametrize("x", range(3))
@parametrize("x", range(5))
def test_param(self, x):
pass
with self.assertRaisesRegex(RuntimeError, 'multiple parametrization decorators'):
instantiate_parametrized_tests(TestParametrized)
@parametrize("x", [1, subtest(2, decorators=[unittest.expectedFailure]), 3])
def test_subtest_expected_failure(self, x):
if x == 2:
raise RuntimeError('Boom')
@parametrize("x", [subtest(1, decorators=[unittest.expectedFailure]), 2, 3])
@parametrize("y", [4, 5, subtest(6, decorators=[unittest.expectedFailure])])
def test_two_things_subtest_expected_failure(self, x, y):
if x == 1 or y == 6:
raise RuntimeError('Boom')
class TestTestParametrizationDeviceType(TestCase):
def test_unparametrized_names(self, device):
# This test exists to protect against regressions in device / dtype test naming
# due to parametrization logic.
device = self.device_type
class TestParametrized(TestCase):
def test_device_specific(self, device):
pass
@dtypes(torch.float32, torch.float64)
def test_device_dtype_specific(self, device, dtype):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_device_dtype_specific_{}_float32',
'{}.test_device_dtype_specific_{}_float64',
'{}.test_device_specific_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_default_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("x", range(5))
def test_default_names(self, device, x):
pass
@parametrize("x,y", [(1, 2), (2, 3), (3, 4)])
def test_two_things_default_names(self, device, x, y):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_default_names_x_0_{}',
'{}.test_default_names_x_1_{}',
'{}.test_default_names_x_2_{}',
'{}.test_default_names_x_3_{}',
'{}.test_default_names_x_4_{}',
'{}.test_two_things_default_names_x_1_y_2_{}',
'{}.test_two_things_default_names_x_2_y_3_{}',
'{}.test_two_things_default_names_x_3_y_4_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_name_fn(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("bias", [False, True], name_fn=lambda b: 'bias' if b else 'no_bias')
def test_custom_names(self, device, bias):
pass
@parametrize("x", [1, 2], name_fn=str)
@parametrize("y", [3, 4], name_fn=str)
@parametrize("z", [5, 6], name_fn=str)
def test_three_things_composition_custom_names(self, device, x, y, z):
pass
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: '{}__{}'.format(x, y))
def test_two_things_custom_names_alternate(self, device, x, y):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_custom_names_bias_{}',
'{}.test_custom_names_no_bias_{}',
'{}.test_three_things_composition_custom_names_1_3_5_{}',
'{}.test_three_things_composition_custom_names_1_3_6_{}',
'{}.test_three_things_composition_custom_names_1_4_5_{}',
'{}.test_three_things_composition_custom_names_1_4_6_{}',
'{}.test_three_things_composition_custom_names_2_3_5_{}',
'{}.test_three_things_composition_custom_names_2_3_6_{}',
'{}.test_three_things_composition_custom_names_2_4_5_{}',
'{}.test_three_things_composition_custom_names_2_4_6_{}',
'{}.test_two_things_custom_names_alternate_1__2_{}',
'{}.test_two_things_custom_names_alternate_1__3_{}',
'{}.test_two_things_custom_names_alternate_1__4_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_subtest_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("bias", [subtest(True, name='bias'),
subtest(False, name='no_bias')])
def test_custom_names(self, device, bias):
pass
@parametrize("x,y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple'),
subtest((1, 4), name='quadruple')])
def test_two_things_custom_names(self, device, x, y):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_custom_names_bias_{}',
'{}.test_custom_names_no_bias_{}',
'{}.test_two_things_custom_names_double_{}',
'{}.test_two_things_custom_names_quadruple_{}',
'{}.test_two_things_custom_names_triple_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_ops_composition_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@ops(op_db)
@parametrize("flag", [False, True], lambda f: 'flag_enabled' if f else 'flag_disabled')
def test_op_parametrized(self, device, dtype, op, flag):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = []
for op in op_db:
for dtype in op.supported_dtypes(torch.device(device).type):
for flag_part in ('flag_disabled', 'flag_enabled'):
expected_name = '{}.test_op_parametrized_{}_{}_{}_{}'.format(
device_cls.__name__, op.formatted_name, flag_part, device, dtype_name(dtype))
expected_test_names.append(expected_name)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
def test_dtypes_composition_valid(self, device):
# Test checks that @parametrize and @dtypes compose as expected when @parametrize
# doesn't set dtype.
device = self.device_type
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@parametrize("x", range(3))
def test_parametrized(self, x, dtype):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_parametrized_x_0_{}_float32',
'{}.test_parametrized_x_0_{}_float64',
'{}.test_parametrized_x_1_{}_float32',
'{}.test_parametrized_x_1_{}_float64',
'{}.test_parametrized_x_2_{}_float32',
'{}.test_parametrized_x_2_{}_float64')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
def test_dtypes_composition_invalid(self, device):
# Test checks that @dtypes cannot be composed with parametrization decorators when they
# also try to set dtype.
device = self.device_type
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@parametrize("dtype", [torch.int32, torch.int64])
def test_parametrized(self, dtype):
pass
with self.assertRaisesRegex(RuntimeError, "handled multiple times"):
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
# Verify proper error behavior with @ops + @dtypes, as both try to set dtype.
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@ops(op_db)
def test_parametrized(self, op, dtype):
pass
with self.assertRaisesRegex(RuntimeError, "handled multiple times"):
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
def test_multiple_handling_of_same_param_error(self, device):
# Test that multiple decorators handling the same param errors out.
# Both @modules and @ops handle the dtype param.
class TestParametrized(TestCase):
@ops(op_db)
@modules(module_db)
def test_param(self, device, dtype, op, module_info):
pass
with self.assertRaisesRegex(RuntimeError, "handled multiple times"):
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
@parametrize("x", [1, subtest(2, decorators=[unittest.expectedFailure]), 3])
def test_subtest_expected_failure(self, device, x):
if x == 2:
raise RuntimeError('Boom')
@parametrize("x", [subtest(1, decorators=[unittest.expectedFailure]), 2, 3])
@parametrize("y", [4, 5, subtest(6, decorators=[unittest.expectedFailure])])
def test_two_things_subtest_expected_failure(self, device, x, y):
if x == 1 or y == 6:
raise RuntimeError('Boom')
instantiate_parametrized_tests(TestTestParametrization)
instantiate_device_type_tests(TestTestParametrizationDeviceType, globals())
class TestImports(TestCase):
def test_circular_dependencies(self) -> None:
""" Checks that all modules inside torch can be imported
Prevents regression reported in https://github.com/pytorch/pytorch/issues/77441 """
ignored_modules = ["torch.utils.tensorboard", # deps on tensorboard
"torch.distributed.elastic.rendezvous", # depps on etcd
"torch.backends._coreml", # depends on pycoreml
"torch.contrib.", # something weird
"torch.testing._internal.distributed.", # just fails
"torch.ao.sparsity._experimental.", # depends on pytorch_lightning, not user-facing
"torch.cuda._dynamo_graphs", # depends on torchdynamo
]
# See https://github.com/pytorch/pytorch/issues/77801
if not sys.version_info >= (3, 9):
ignored_modules.append("torch.utils.benchmark")
if IS_WINDOWS:
# Distributed does not work on Windows
ignored_modules.append("torch.distributed.")
ignored_modules.append("torch.testing._internal.dist_utils")
torch_dir = os.path.dirname(torch.__file__)
for base, folders, files in os.walk(torch_dir):
prefix = os.path.relpath(base, os.path.dirname(torch_dir)).replace(os.path.sep, ".")
for f in files:
if not f.endswith(".py"):
continue
mod_name = f"{prefix}.{f[:-3]}" if f != "__init__.py" else prefix
# Do not attempt to import executable modules
if f == "__main__.py":
continue
if any(mod_name.startswith(x) for x in ignored_modules):
continue
try:
mod = importlib.import_module(mod_name)
except Exception as e:
raise RuntimeError(f"Failed to import {mod_name}: {e}") from e
self.assertTrue(inspect.ismodule(mod))
@unittest.skipIf(IS_WINDOWS, "importing torch+CUDA on CPU results in warning")
def test_no_warning_on_import(self) -> None:
out = subprocess.check_output(
[sys.executable, "-W", "all", "-c", "import torch"],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),).decode("utf-8")
self.assertEquals(out, "")
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_testing.py
|
import sys
import torch.cuda
import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
from torch.testing._internal.common_utils import IS_WINDOWS
if sys.platform == 'win32':
vc_version = os.getenv('VCToolsVersion', '')
if vc_version.startswith('14.16.'):
CXX_FLAGS = ['/sdl']
else:
CXX_FLAGS = ['/sdl', '/permissive-']
else:
CXX_FLAGS = ['-g']
USE_NINJA = os.getenv('USE_NINJA') == '1'
ext_modules = [
CppExtension(
'torch_test_cpp_extension.cpp', ['extension.cpp'],
extra_compile_args=CXX_FLAGS),
CppExtension(
'torch_test_cpp_extension.ort', ['ort_extension.cpp'],
extra_compile_args=CXX_FLAGS),
CppExtension(
'torch_test_cpp_extension.rng', ['rng_extension.cpp'],
extra_compile_args=CXX_FLAGS),
]
if torch.cuda.is_available() and (CUDA_HOME is not None or ROCM_HOME is not None):
extension = CUDAExtension(
'torch_test_cpp_extension.cuda', [
'cuda_extension.cpp',
'cuda_extension_kernel.cu',
'cuda_extension_kernel2.cu',
],
extra_compile_args={'cxx': CXX_FLAGS,
'nvcc': ['-O2']})
ext_modules.append(extension)
if torch.cuda.is_available() and (CUDA_HOME is not None or ROCM_HOME is not None):
extension = CUDAExtension(
'torch_test_cpp_extension.torch_library', [
'torch_library.cu'
],
extra_compile_args={'cxx': CXX_FLAGS,
'nvcc': ['-O2']})
ext_modules.append(extension)
# todo(mkozuki): Figure out the root cause
if (not IS_WINDOWS) and torch.cuda.is_available() and CUDA_HOME is not None:
# malfet: One shoudl not assume that PyTorch re-exports CUDA dependencies
cublas_extension = CUDAExtension(
name='torch_test_cpp_extension.cublas_extension',
sources=['cublas_extension.cpp'],
libraries=['cublas'] if torch.version.hip is None else [],
)
ext_modules.append(cublas_extension)
cusolver_extension = CUDAExtension(
name='torch_test_cpp_extension.cusolver_extension',
sources=['cusolver_extension.cpp'],
libraries=['cusolver'] if torch.version.hip is None else [],
)
ext_modules.append(cusolver_extension)
if USE_NINJA and (not IS_WINDOWS) and torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension(
name='torch_test_cpp_extension.cuda_dlink',
sources=[
'cuda_dlink_extension.cpp',
'cuda_dlink_extension_kernel.cu',
'cuda_dlink_extension_add.cu',
],
dlink=True,
extra_compile_args={'cxx': CXX_FLAGS,
'nvcc': ['-O2', '-dc']})
ext_modules.append(extension)
setup(
name='torch_test_cpp_extension',
packages=['torch_test_cpp_extension'],
ext_modules=ext_modules,
include_dirs='self_compiler_include_dirs_test',
cmdclass={'build_ext': BuildExtension.with_options(use_ninja=USE_NINJA)})
|
pytorch-master
|
test/cpp_extensions/setup.py
|
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name="no_python_abi_suffix_test",
ext_modules=[
CppExtension("no_python_abi_suffix_test", ["no_python_abi_suffix_test.cpp"])
],
cmdclass={"build_ext": BuildExtension.with_options(no_python_abi_suffix=True)},
)
|
pytorch-master
|
test/cpp_extensions/no_python_abi_suffix_test/setup.py
|
pytorch-master
|
test/cpp_extensions/torch_test_cpp_extension/__init__.py
|
|
# Owner(s): ["module: onnx"]
import io
import itertools
import sys
import unittest
from typing import Tuple
import caffe2.python.onnx.backend as c2
import model_defs.dcgan as dcgan
import model_defs.word_language_model as word_language_model
import numpy as np
import onnx
import torch.onnx
import torch.onnx.operators
import torch.utils.model_zoo as model_zoo
import verify
from caffe2.python.operator_test.torch_integration_test import (
create_bbox_transform_inputs,
generate_rois_rotated,
)
from debug_embed_params import run_embed_params
from model_defs.lstm_flattening_result import LstmFlatteningResult
from model_defs.mnist import MNIST
from model_defs.rnn_model_with_packed_sequence import RnnModelWithPackedSequence
from model_defs.squeezenet import SqueezeNet
from model_defs.srresnet import SRResNet
from model_defs.super_resolution import SuperResolutionNet
from pytorch_test_common import (
BATCH_SIZE,
RNN_BATCH_SIZE,
RNN_HIDDEN_SIZE,
RNN_INPUT_SIZE,
RNN_SEQUENCE_LENGTH,
skipIfNoCuda,
skipIfTravis,
skipIfUnsupportedMinOpsetVersion,
skipIfUnsupportedOpsetVersion,
)
from torch import nn
from torch.autograd import function, Variable
from torch.nn.utils import rnn as rnn_utils
from torch.onnx import ExportTypes
from torch.testing._internal import common_utils
from torch.testing._internal.common_utils import skipIfNoLapack
# Import various models for testing
from torchvision.models.alexnet import alexnet
from torchvision.models.densenet import densenet121
from torchvision.models.inception import inception_v3
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
skip = unittest.skip
def skipIfEmbed(func):
def wrapper(self):
if self.embed_params:
raise unittest.SkipTest("Skip embed_params verify test")
return func(self)
return wrapper
def skipIfNoEmbed(func):
def wrapper(self):
if not self.embed_params:
raise unittest.SkipTest("Skip debug embed_params test")
return func(self)
return wrapper
# def import_model(proto, input, workspace=None, use_gpu=True):
# model_def = onnx.ModelProto.FromString(proto)
# onnx.checker.check_model(model_def)
#
# if workspace is None:
# workspace = {}
# if isinstance(input, tuple):
# for i in range(len(input)):
# workspace[model_def.graph.input[i]] = input[i]
# else:
# workspace[model_def.graph.input[0]] = input
#
# caffe2_out_workspace = c2.run_model(
# init_graph=None,
# predict_graph=graph_def,
# inputs=workspace,
# use_gpu=use_gpu)
# caffe2_out = caffe2_out_workspace[0]
# return caffe2_out
def do_export(model, inputs, *args, **kwargs):
f = io.BytesIO()
out = torch.onnx._export(model, inputs, f, *args, **kwargs)
if isinstance(model, torch.jit.ScriptModule):
# Special case for common case of passing a single Tensor
if isinstance(inputs, torch.Tensor):
inputs = (inputs,)
out = model(*inputs)
return f.getvalue(), out
torch.set_default_tensor_type("torch.FloatTensor")
try:
import torch
except ImportError:
print("Cannot import torch, hence caffe2-torch test will not run.")
sys.exit(0)
model_urls = {
"alexnet": "https://s3.amazonaws.com/download.caffe2.ai/test_data/alexnet-owt-4df8aa71.pth",
"dcgan_b": "https://s3.amazonaws.com/pytorch/test_data/export/netG_bedroom_epoch_1-0649e76b.pth",
"dcgan_f": "https://s3.amazonaws.com/pytorch/test_data/export/netG_faces_epoch_49-d86035a6.pth",
"densenet121": "https://s3.amazonaws.com/download.caffe2.ai/test_data/densenet121-d66d3027.pth",
"inception_v3_google": "https://s3.amazonaws.com/download.caffe2.ai/test_data/inception_v3_google-1a9a5a14.pth",
"resnet50": "https://s3.amazonaws.com/download.caffe2.ai/test_data/resnet50-19c8e357.pth",
"srresNet": "https://s3.amazonaws.com/pytorch/demos/srresnet-e10b2039.pth",
"super_resolution": "https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth",
"squeezenet1_0": "https://s3.amazonaws.com/download.caffe2.ai/test_data/squeezenet1_0-a815701f.pth",
"squeezenet1_1": "https://s3.amazonaws.com/download.caffe2.ai/test_data/squeezenet1_1-f364aa15.pth",
"vgg16": "https://s3.amazonaws.com/download.caffe2.ai/test_data/vgg16-397923af.pth",
"vgg19": "https://s3.amazonaws.com/download.caffe2.ai/test_data/vgg19-dcbb9e9d.pth",
}
class TestCaffe2Backend_opset9(common_utils.TestCase):
opset_version = 9
embed_params = False
def setUp(self):
# the following should ideally be super().setUp(), https://github.com/pytorch/pytorch/issues/79630
common_utils.TestCase.setUp(self)
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
np.random.seed(seed=0)
def convert_cuda(self, model, input):
cuda_model = model.cuda()
# input might be nested - we want to move everything to GPU
cuda_input = function._nested_map(
lambda o: isinstance(o, Variable) or isinstance(o, torch.Tensor),
lambda o: o.cuda(),
)(input)
return cuda_model, cuda_input
def run_debug_test(
self,
model,
train,
batch_size,
state_dict=None,
input=None,
use_gpu=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
):
"""
# TODO: remove this from the final release version
This test is for our debugging only for the case where
embed_params=False
"""
if not isinstance(model, torch.jit.ScriptModule):
model.train(train)
if state_dict is not None:
model.load_state_dict(state_dict)
# Either user specified input or random (deterministic) input
if input is None:
input = torch.randn(batch_size, 3, 224, 224, requires_grad=True)
if use_gpu:
model, input = self.convert_cuda(model, input)
onnxir, torch_out = do_export(
model,
input,
export_params=self.embed_params,
verbose=False,
do_constant_folding=False,
opset_version=self.opset_version,
keep_initializers_as_inputs=True,
add_node_names=False,
operator_export_type=operator_export_type,
)
if isinstance(torch_out, torch.autograd.Variable):
torch_out = (torch_out,)
caffe2_out = run_embed_params(onnxir, model, input, state_dict, use_gpu)
for _, (x, y) in enumerate(zip(torch_out, caffe2_out)):
np.testing.assert_almost_equal(x.data.cpu().numpy(), y, decimal=3)
def run_actual_test(
self,
model,
train,
batch_size,
state_dict=None,
input=None,
use_gpu=True,
rtol=0.001,
atol=1e-7,
do_constant_folding=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
input_names=None,
dynamic_axes=None,
remained_onnx_input_idx=None,
):
"""
This is what the user facing version will look like
"""
# set the training/test mode for the model
if not isinstance(model, torch.jit.ScriptModule):
model.train(train)
# use the pre-trained model params if available
if state_dict is not None:
model.load_state_dict(state_dict)
# Either user specified input or random (deterministic) input
if input is None:
input = torch.randn(batch_size, 3, 224, 224, requires_grad=True)
# GPU-ize the model, if requested
if use_gpu:
model, input = self.convert_cuda(model, input)
# Verify the model runs the same in Caffe2
verify.verify(
model,
input,
c2,
rtol=rtol,
atol=atol,
do_constant_folding=do_constant_folding,
opset_version=self.opset_version,
keep_initializers_as_inputs=True,
operator_export_type=operator_export_type,
input_names=input_names,
dynamic_axes=dynamic_axes,
remained_onnx_input_idx=remained_onnx_input_idx,
)
def run_model_test(
self,
model,
train,
batch_size,
state_dict=None,
input=None,
use_gpu=True,
rtol=0.001,
atol=1e-7,
do_constant_folding=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
input_names=None,
dynamic_axes=None,
remained_onnx_input_idx=None,
):
use_gpu_ = torch.cuda.is_available() and use_gpu
# NOTE: do_constant_folding is turned on only when model has
# parameters embedded (which are needed for constant folding),
# i.e. for self.embed_params=True case. self.embed_params=True
# for the TestCaffe2BackendEmbed class defined at the bottom.
if self.embed_params:
self.run_actual_test(
model,
train,
batch_size,
state_dict,
input,
use_gpu=use_gpu_,
rtol=rtol,
atol=atol,
do_constant_folding=do_constant_folding,
operator_export_type=operator_export_type,
input_names=input_names,
dynamic_axes=dynamic_axes,
remained_onnx_input_idx=remained_onnx_input_idx,
)
else:
self.run_debug_test(
model,
train,
batch_size,
state_dict,
input,
use_gpu=use_gpu_,
operator_export_type=operator_export_type,
)
def test_linear(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.many_fc = nn.Sequential(
nn.Linear(4, 5, bias=True),
nn.ReLU(inplace=True),
nn.Linear(5, 6, bias=True),
nn.ReLU(inplace=True),
nn.Linear(6, 7, bias=True),
)
def forward(self, input):
return self.many_fc(input)
model = MyModel()
input = torch.randn(3, 4, requires_grad=True)
self.run_model_test(model, train=False, batch_size=0, input=input)
def test_onnx_export_with_parameter_renaming(self):
class SimpleFcNet(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(5, 10)
def forward(self, input):
return self.fc1(input)
model = SimpleFcNet()
input = torch.randn(7, 5)
output = model(input)
f = io.BytesIO()
# Note that the export call explicitly sets the names of not just the input,
# but also the parameters. This test checks that the model can be loaded and
# executed in Caffe2 backend correctly.
torch.onnx._export(
model,
input,
f,
verbose=True,
export_type=ExportTypes.ZIP_ARCHIVE,
input_names=["input1", "parameter1", "parameter2"],
keep_initializers_as_inputs=True,
)
f.seek(0)
model_c2 = c2.prepare_zip_archive(f)
result = model_c2.run(input.numpy())
np.testing.assert_almost_equal(output.data.cpu().numpy(), result[0], decimal=3)
def test_onnx_export_param_name_duplication(self):
class SimpleFcNet(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(5, 10)
def forward(self, input):
return self.fc1(input)
model = SimpleFcNet()
input = torch.randn(7, 5)
output = model(input)
f = io.BytesIO()
# The export call explicitly sets the names of the input, and the first parameter.
# But note that the target first parameter name is the same as the second parameter name.
# This test checks that given this edge condition, the model can be loaded and executed
# in Caffe2 backend correctly.
torch.onnx._export(
model,
input,
f,
verbose=True,
export_type=ExportTypes.ZIP_ARCHIVE,
input_names=["input1", "fc1.bias"],
keep_initializers_as_inputs=True,
)
f.seek(0)
model_c2 = c2.prepare_zip_archive(f)
result = model_c2.run(input.numpy())
np.testing.assert_almost_equal(output.data.cpu().numpy(), result[0], decimal=3)
def test_lstm_cell(self):
model = nn.LSTMCell(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE)
input = torch.randn(BATCH_SIZE, RNN_INPUT_SIZE)
h0 = torch.randn(BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(BATCH_SIZE, RNN_HIDDEN_SIZE)
self.run_model_test(
model,
train=False,
batch_size=BATCH_SIZE,
input=(input, (h0, c0)),
use_gpu=False,
)
def test_gru_cell(self):
model = nn.GRUCell(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE)
input = torch.randn(BATCH_SIZE, RNN_INPUT_SIZE)
h0 = torch.randn(BATCH_SIZE, RNN_HIDDEN_SIZE)
self.run_model_test(
model, train=False, batch_size=BATCH_SIZE, input=(input, h0), use_gpu=False
)
def _dispatch_rnn_test(self, name, *args, **kwargs):
if name == "elman":
self._elman_rnn_test(*args, **kwargs)
if name == "lstm":
self._lstm_test(*args, **kwargs)
if name == "gru":
self._gru_test(*args, **kwargs)
def _elman_rnn_test(
self,
layers,
nonlinearity,
bidirectional,
initial_state,
packed_sequence,
dropout,
):
batch_first = True if packed_sequence == 2 else False
model = nn.RNN(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
layers,
nonlinearity=nonlinearity,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_model_test(
model,
train=False,
batch_size=RNN_BATCH_SIZE,
input=input,
use_gpu=False,
atol=1e-7,
)
# test that the model still runs with a different batch size
# (save the model with a batch_size of 1 with rnn with a variable batch size,
# otherwise expand will fail)
variable_batch_size_init_input = make_input(1)
# Constant folding works when model has parameters embedded. For this case, we need to disable it
onnxir, _ = do_export(
model,
variable_batch_size_init_input,
keep_initializers_as_inputs=True,
do_constant_folding=False,
)
other_input = make_input(RNN_BATCH_SIZE + 1)
_ = run_embed_params(onnxir, model, other_input, use_gpu=False)
def _lstm_test(
self, layers, bidirectional, initial_state, packed_sequence, dropout
):
batch_first = True if packed_sequence == 2 else False
model = LstmFlatteningResult(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
c0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append((h0, c0))
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_model_test(
model, train=False, batch_size=RNN_BATCH_SIZE, input=input, use_gpu=False
)
# test that the model still runs with a different batch size
# (save the model with a batch_size of 1 with rnn with a variable batch size,
# otherwise expand will fail)
variable_batch_size_init_input = make_input(1)
# Constant folding works when model has parameters embedded. For this case, we need to disable it
onnxir, _ = do_export(
model,
variable_batch_size_init_input,
keep_initializers_as_inputs=True,
do_constant_folding=False,
)
other_input = make_input(RNN_BATCH_SIZE + 1)
_ = run_embed_params(onnxir, model, other_input, use_gpu=False)
def _gru_test(self, layers, bidirectional, initial_state, packed_sequence, dropout):
batch_first = True if packed_sequence == 2 else False
model = nn.GRU(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_model_test(
model, train=False, batch_size=RNN_BATCH_SIZE, input=input, use_gpu=False
)
# test that the model still runs with a different batch size
# (save the model with a batch_size of 1 with rnn with a variable batch size,
# otherwise expand will fail)
variable_batch_size_init_input = make_input(1)
# Constant folding works when model has parameters embedded. For this case, we need to disable it
onnxir, _ = do_export(
model,
variable_batch_size_init_input,
keep_initializers_as_inputs=True,
do_constant_folding=False,
)
other_input = make_input(RNN_BATCH_SIZE + 1)
_ = run_embed_params(onnxir, model, other_input, use_gpu=False)
@unittest.skip("Disabled due to onnx optimizer deprecation")
def test_rnn_init_predict_split(self):
model = nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 3, bidirectional=True)
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=7)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
input = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
input = rnn_utils.pad_sequence(input)
# Test that we are correctly splitting between init and
# predict net. When we embed parameters, there should be more
# ops in the init net.
mp = onnx.ModelProto.FromString(
do_export(
model,
input,
export_params=self.embed_params,
keep_initializers_as_inputs=True,
do_constant_folding=False,
)[0]
)
prepared = c2.prepare(mp, device="CPU")
if self.embed_params:
assert len(prepared.init_net.op) == 950
assert len(prepared.predict_net.op) == 101
else:
assert len(prepared.init_net.op) == 83
assert len(prepared.predict_net.op) == 968
def test_alexnet(self):
state_dict = model_zoo.load_url(model_urls["alexnet"], progress=False)
self.run_model_test(
alexnet(),
train=False,
batch_size=BATCH_SIZE,
state_dict=state_dict,
atol=1e-3,
)
@skipIfNoCuda
def test_dcgan(self):
# dcgan is flaky on some seeds, see:
# https://github.com/ProjectToffee/onnx/pull/70
torch.manual_seed(1)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1)
netD = dcgan._netD(1)
netD.apply(dcgan.weights_init)
input = torch.randn(BATCH_SIZE, 3, dcgan.imgsz, dcgan.imgsz)
self.run_model_test(netD, train=False, batch_size=BATCH_SIZE, input=input)
netG = dcgan._netG(1)
netG.apply(dcgan.weights_init)
state_dict = model_zoo.load_url(model_urls["dcgan_b"], progress=False)
# state_dict = model_zoo.load_url(model_urls["dcgan_f"], progress=False)
noise = torch.randn(BATCH_SIZE, dcgan.nz, 1, 1).normal_(0, 1)
self.run_model_test(
netG,
train=False,
batch_size=BATCH_SIZE,
input=noise,
state_dict=state_dict,
rtol=1e-2,
atol=1e-6,
)
@unittest.skipIf(
not torch.cuda.is_available(), "model on net has cuda in it, awaiting fix"
)
def test_densenet(self):
state_dict = model_zoo.load_url(model_urls["densenet121"], progress=False)
self.run_model_test(
densenet121(),
train=False,
batch_size=BATCH_SIZE,
state_dict=state_dict,
atol=1e-7,
)
@skip("doesn't match exactly...")
# TODO: figure out the numerical instabilities
def test_inception(self):
x = torch.randn(BATCH_SIZE, 3, 299, 299, requires_grad=True)
# state_dict = model_zoo.load_url(model_urls["inception_v3_google"], progress=False)
state_dict = None
self.run_model_test(
inception_v3(),
train=False,
batch_size=BATCH_SIZE,
state_dict=state_dict,
input=x,
)
@skipIfNoEmbed
def test_resnet(self):
state_dict = model_zoo.load_url(model_urls["resnet50"], progress=False)
self.run_model_test(
resnet50(),
train=False,
batch_size=BATCH_SIZE,
state_dict=state_dict,
atol=1e-5,
)
def test_squeezenet(self):
sqnet_v1_1 = SqueezeNet(version=1.1)
state_dict = model_zoo.load_url(model_urls["squeezenet1_1"], progress=False)
# state_dict = model_zoo.load_url(model_urls["squeezenet1_0"], progress=False)
self.run_model_test(
sqnet_v1_1, train=False, batch_size=BATCH_SIZE, state_dict=state_dict
)
# @skip("takes long to run, LAPACK needed for gpu")
@skipIfNoLapack
@unittest.skip("This model takes too much memory")
def test_srresnet(self):
super_resolution_net = SRResNet(rescale_factor=4, n_filters=64, n_blocks=8)
state_dict = model_zoo.load_url(model_urls["srresNet"], progress=False)
x = torch.randn(1, 3, 224, 224, requires_grad=True)
self.run_model_test(
super_resolution_net,
train=False,
batch_size=1,
state_dict=state_dict,
input=x,
use_gpu=False,
)
@skipIfTravis
@skipIfNoLapack
@skipIfNoCuda
def test_super_resolution(self):
super_resolution_net = SuperResolutionNet(upscale_factor=3)
state_dict = model_zoo.load_url(model_urls["super_resolution"], progress=False)
x = torch.randn(1, 1, 224, 224, requires_grad=True)
self.run_model_test(
super_resolution_net,
train=False,
batch_size=BATCH_SIZE,
state_dict=state_dict,
input=x,
use_gpu=False,
atol=1e-6,
)
@unittest.skip("This model takes too much memory")
def test_vgg16(self):
state_dict = model_zoo.load_url(model_urls["vgg16"], progress=False)
self.run_model_test(
vgg16(), train=False, batch_size=BATCH_SIZE, state_dict=state_dict
)
@skip("disable to run tests faster...")
def test_vgg16_bn(self):
self.run_model_test(vgg16_bn(), train=False, batch_size=BATCH_SIZE)
@skip("disable to run tests faster...")
def test_vgg19(self):
state_dict = model_zoo.load_url(model_urls["vgg19"], progress=False)
self.run_model_test(
vgg19(), train=False, batch_size=BATCH_SIZE, state_dict=state_dict
)
@skip("disable to run tests faster...")
def test_vgg19_bn(self):
self.run_model_test(vgg19_bn(), train=False, batch_size=BATCH_SIZE)
def run_word_language_model(self, model_name):
ntokens = 50
emsize = 5
nhid = 5
nlayers = 5
dropout = 0.2
tied = False
batchsize = 5
model = word_language_model.RNNModel(
model_name, ntokens, emsize, nhid, nlayers, dropout, tied, batchsize
)
x = torch.arange(0, ntokens).long().view(-1, batchsize)
# Only support CPU version, since tracer is not working in GPU RNN.
self.run_model_test(
model,
train=False,
input=(x, model.hidden),
batch_size=batchsize,
use_gpu=False,
)
@unittest.skip("Disabled due to onnx optimizer deprecation")
@skipIfUnsupportedOpsetVersion([10])
def test_word_language_model_RNN_TANH(self):
self.run_word_language_model("RNN_TANH")
@unittest.skip("Disabled due to onnx optimizer deprecation")
@skipIfUnsupportedOpsetVersion([10])
def test_word_language_model_RNN_RELU(self):
self.run_word_language_model("RNN_RELU")
@unittest.skip("Disabled due to onnx optimizer deprecation")
@skipIfUnsupportedOpsetVersion([10])
def test_word_language_model_LSTM(self):
self.run_word_language_model("LSTM")
@unittest.skip("Disabled due to onnx optimizer deprecation")
@skipIfUnsupportedOpsetVersion([10])
def test_word_language_model_GRU(self):
self.run_word_language_model("GRU")
def test_batchnorm1d_special(self):
c = torch.randn(BATCH_SIZE, 224)
model = nn.BatchNorm1d(224)
self.run_model_test(model, train=True, input=c, batch_size=BATCH_SIZE)
def test_batchnorm1d(self):
c = torch.randn(BATCH_SIZE, 224, 224)
model = nn.BatchNorm1d(224)
self.run_model_test(model, train=True, input=c, batch_size=BATCH_SIZE)
def test_batchnorm1d_noaffine(self):
c = torch.randn(BATCH_SIZE, 224)
model = nn.BatchNorm1d(224, affine=False)
self.run_model_test(model, train=False, input=c, batch_size=BATCH_SIZE)
def test_batchnorm2d_noaffine(self):
c = torch.randn(128, 128, 1, 1)
model = nn.BatchNorm2d(128, affine=False)
self.run_model_test(model, train=False, input=c, batch_size=BATCH_SIZE)
def test_batchnorm3d_noaffine(self):
c = torch.randn(128, 128, 1, 1, 1)
model = nn.BatchNorm3d(128, affine=False)
self.run_model_test(model, train=False, input=c, batch_size=BATCH_SIZE)
def test_constant(self):
c = torch.randn(BATCH_SIZE, 3, 224, 224)
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input + c.type_as(input)
self.run_model_test(MyModel(), train=False, batch_size=BATCH_SIZE)
def test_consumed_bn(self):
underlying = nn.BatchNorm2d(3)
self.run_model_test(underlying, train=True, batch_size=BATCH_SIZE)
def _test_index_generic(self, fn):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return fn(input)
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_model_test(MyModel(), input=m1, train=False, batch_size=BATCH_SIZE)
def test_index_1d(self):
self._test_index_generic(lambda input: input[0])
@skipIfUnsupportedOpsetVersion([10])
def test_index_2d_1dimslice(self):
self._test_index_generic(lambda input: input[0:1, :])
@skipIfUnsupportedOpsetVersion([10])
def test_index_2d_sliceint(self):
self._test_index_generic(lambda input: input[1, :])
@skipIfUnsupportedOpsetVersion([10])
def test_index_2d_neg_slice(self):
self._test_index_generic(lambda input: input[0:-1, :])
@skipIfUnsupportedOpsetVersion([10])
def test_index_2d_2dimslice(self):
self._test_index_generic(lambda input: input[0:1, 0:1])
@skipIfUnsupportedOpsetVersion([10])
def test_index_2d_neg_slice2dim(self):
self._test_index_generic(lambda input: input[0:-1, 0:-1])
def test_tensor_index_1d(self):
self._test_index_generic(lambda input: input[torch.tensor([0, 2])])
def test_tensor_index_2d_1dconstant(self):
self._test_index_generic(lambda input: input[1, torch.tensor([0, 2])])
@skipIfUnsupportedOpsetVersion([10])
def test_tensor_index_2d_1dslice(self):
self._test_index_generic(lambda input: input[torch.tensor([0, 2]), 0:1])
@skipIfUnsupportedOpsetVersion([10])
def test_tensor_index_2d_1dslice_first(self):
self._test_index_generic(lambda input: input[1:3, torch.tensor([0, 2])])
def test_tensor_index_newaxis(self):
self._test_index_generic(lambda input: input[None, torch.tensor([0, 2])])
def test_tensor_index_advanced_indexing(self):
self._test_index_generic(
lambda input: input[
:,
torch.tensor([[0, 2], [1, 1]]),
:,
torch.tensor([2, 1]),
torch.tensor([0, 3]),
]
)
@skipIfUnsupportedOpsetVersion([10])
def test_tensor_index_advanced_indexing_with_slice(self):
self._test_index_generic(
lambda input: input[
:, torch.tensor([0, 2]), None, 2:4, torch.tensor([[1, 3], [4, 0]])
]
)
self._test_index_generic(
lambda input: input[
:,
torch.tensor([0, 2]),
torch.tensor([1]),
2:4,
torch.tensor([[1], [4]]),
]
)
def test_tensor_index_advanced_indexing_consecutive(self):
self._test_index_generic(
lambda input: input[
:, torch.tensor([0, 2]), torch.tensor([[1, 3], [4, 0]]), None
]
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_index_advanced_indexing_masked(self):
self._test_index_generic(
lambda input: input[
:,
torch.tensor([1, 0, 1, 0], dtype=torch.uint8),
torch.tensor([[1, 3], [4, 0]]),
None,
]
)
def test_chunk(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
# TODO: Why index? This returns a tuple and test runner doesn't
# support tuple comparison.
return input.chunk(8, dim=2)[-1]
self.run_model_test(MyModel(), train=False, batch_size=BATCH_SIZE)
def test_sqrt(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input.sqrt()
input = torch.empty(BATCH_SIZE, 10, 10).uniform_(4, 9)
self.run_model_test(MyModel(), train=False, input=input, batch_size=BATCH_SIZE)
def test_rsqrt(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input.rsqrt()
input = torch.randn(4, 2, 3, requires_grad=True)
self.run_model_test(MyModel(), train=False, input=input, batch_size=BATCH_SIZE)
def test_log(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input.log()
input = torch.empty(BATCH_SIZE, 10, 10).uniform_(4, 9)
self.run_model_test(MyModel(), train=False, input=input, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test_erf(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input.erf()
input = torch.empty(BATCH_SIZE, 10, 10).uniform_(4, 9)
self.run_model_test(MyModel(), train=False, input=input, batch_size=BATCH_SIZE)
def test_trigonometry(self):
def test_func(name):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return getattr(input, name)()
input = torch.empty(BATCH_SIZE, 10, 10).uniform_()
self.run_model_test(
MyModel(), train=False, input=input, batch_size=BATCH_SIZE
)
test_func("cos")
test_func("sin")
test_func("tan")
test_func("acos")
test_func("asin")
test_func("atan")
def test_addconstant(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
# TODO: Why index? This returns a tuple and test runner doesn't
# support tuple comparison.
return input + 1
self.run_model_test(MyModel(), train=False, batch_size=BATCH_SIZE)
def test_subconstant(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
# TODO: Why index? This returns a tuple and test runner doesn't
# support tuple comparison.
return input - 1
self.run_model_test(MyModel(), train=False, batch_size=BATCH_SIZE)
def test_arithmetic(self):
class ArithmeticModule(torch.nn.Module):
def forward(self, x):
x = x + 2
x = x - 4
x = x * 6
x = x / 8
return x
x = torch.randn(2, 3, 4)
self.run_model_test(
ArithmeticModule(), input=x, train=False, batch_size=BATCH_SIZE
)
def test_embedding(self):
model = nn.Embedding(10, 3, padding_idx=-1)
input = torch.LongTensor(list(range(10))[::-1])
self.run_model_test(model, train=False, input=input, batch_size=BATCH_SIZE)
def test_constantpad2d(self):
model = nn.ConstantPad2d((1, 2, 3, 4), 3.5)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_reflectionpad2d(self):
model = nn.ReflectionPad2d((1, 2, 3, 4))
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_replicationpad2d(self):
model = nn.ReplicationPad2d((1, 2, 3, 4))
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_maxpool2d(self):
model = nn.MaxPool2d(5, padding=(1, 2))
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_maxpool2d_single_padding(self):
model = nn.MaxPool2d(5, padding=2)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
@skipIfUnsupportedOpsetVersion([10])
def test_maxpool1d_ceil(self):
model = nn.MaxPool1d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedOpsetVersion([10])
def test_maxpool2d_ceil(self):
model = nn.MaxPool2d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 32, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedOpsetVersion([10])
def test_maxpool3d_ceil(self):
model = nn.MaxPool3d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 44, 31, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@unittest.skip("C2 and PyTorch have small difference in padding implementation")
def test_avgpool2d(self):
model = nn.AvgPool2d(5, padding=(2))
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_avgpool2d_with_count_include_pad_set_false(self):
model = nn.AvgPool2d(7, padding=(2), count_include_pad=False)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_avgpool2d_with_count_include_pad_set_true(self):
model = nn.AvgPool2d(7, padding=(2), count_include_pad=True)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_avgpool2d_no_padding(self):
model = nn.AvgPool2d(5)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
@unittest.skip("Disabled due to onnx optimizer deprecation")
@skipIfUnsupportedOpsetVersion([10])
def test_avg_pool1D_ceil(self):
model = torch.nn.AvgPool1d(3, 2, ceil_mode=True)
x = torch.randn(1, 1, 7, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedOpsetVersion([10])
def test_avg_pool2D_ceil(self):
model = torch.nn.AvgPool2d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 32, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@unittest.skip("Disabled due to onnx optimizer deprecation")
@skipIfUnsupportedOpsetVersion([10])
def test_avg_pool3D_ceil(self):
model = torch.nn.AvgPool3d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 44, 31, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
def test_adaptive_avg_pool1D(self):
model = torch.nn.AdaptiveAvgPool1d(5)
x = torch.randn(20, 16, 50, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
def test_adaptive_avg_pool2D(self):
model = torch.nn.AdaptiveAvgPool2d((5, 4))
x = torch.randn(20, 16, 50, 32, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
def test_adaptive_avg_pool3D(self):
model = torch.nn.AdaptiveAvgPool3d((5, 4, 3))
x = torch.randn(20, 16, 50, 44, 30, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(8)
def test_adaptive_max_pool1D(self):
model = torch.nn.AdaptiveMaxPool1d(5)
x = torch.randn(20, 16, 50, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(8)
def test_adaptive_max_pool2D(self):
model = torch.nn.AdaptiveMaxPool2d((5, 4))
x = torch.randn(20, 16, 50, 32, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(8)
def test_adaptive_max_pool3D(self):
model = torch.nn.AdaptiveMaxPool3d((5, 4, 3))
x = torch.randn(20, 16, 50, 44, 30, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
def test_weight_norm(self):
model = nn.utils.weight_norm(nn.Conv1d(1, 1, 3))
input = torch.randn(1, 1, 5, requires_grad=True)
self.run_model_test(model, train=True, batch_size=0, input=input, use_gpu=False)
def test_mnist(self):
model = MNIST()
input = torch.randn(BATCH_SIZE, 1, 28, 28)
state_dict = None
# TODO: test with state_dict
self.run_model_test(
model,
train=False,
input=input,
batch_size=BATCH_SIZE,
state_dict=state_dict,
)
def test_mm(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, m1, m2):
return torch.mm(m1, m2)
m1 = torch.randn(3, 4)
m2 = torch.randn(4, 5)
self.run_model_test(
MyModel(), train=False, input=(m1, m2), batch_size=BATCH_SIZE, use_gpu=False
)
def test_addmm(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, ma, m1, m2):
return torch.addmm(ma, m1, m2)
ma = torch.randn(5)
m1 = torch.randn(3, 4)
m2 = torch.randn(4, 5)
self.run_model_test(
MyModel(),
train=False,
input=(ma, m1, m2),
batch_size=BATCH_SIZE,
use_gpu=False,
)
def test_fuse_addmm(self):
class AddmmModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x
x = torch.randn(3, 3)
self.run_model_test(
AddmmModel(), train=False, input=x, batch_size=BATCH_SIZE, use_gpu=False
)
def test_scalar_type(self):
class ArithmeticModel(torch.nn.Module):
def forward(self, x):
return x.size(0) * 2 * x
x = torch.ones(2, 3, dtype=torch.float32)
self.run_model_test(
ArithmeticModel(), input=x, train=False, batch_size=BATCH_SIZE
)
class ReciprocalModel(torch.nn.Module):
def forward(self, x):
return torch.reciprocal(x)
x = torch.tensor([2.0, 4.0], dtype=torch.double)
self.run_model_test(
ReciprocalModel(), input=x, train=False, batch_size=BATCH_SIZE
)
class ComparisonModel(torch.nn.Module):
def forward(self, x, y):
return x.ge(0.5) & y.le(2)
x = torch.ones(2, 3, dtype=torch.int32)
y = torch.ones(2, 3, dtype=torch.float32)
self.run_model_test(
ComparisonModel(), input=(x, y), train=False, batch_size=BATCH_SIZE
)
class MatMulModel(torch.nn.Module):
def forward(self, x, y):
return torch.mm(x, y)
x = torch.ones(3, 4)
y = torch.ones(4, 5)
self.run_model_test(
MatMulModel(), input=(x, y), train=False, batch_size=BATCH_SIZE
)
class AddMMModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x
x = torch.ones(3, 3)
self.run_model_test(AddMMModel(), input=x, train=False, batch_size=BATCH_SIZE)
# test for a pytorch optimization pass, see https://github.com/pytorch/pytorch/pull/7872
def test_consecutive_transposes(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.transpose(1, 2).transpose(2, 3)
x = torch.randn(5, 6, 7, 8)
self.run_model_test(
MyModel(), train=False, input=x, batch_size=BATCH_SIZE, use_gpu=False
)
def test_sum(self):
shape = (3, 4, 5)
for params in [{}] + [{"dim": i} for i in range(len(shape))]:
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.sum(x, **params)
x = torch.randn(*shape)
self.run_model_test(
MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False
)
def test_cumsum(self):
shape = (3, 4, 5)
for params in [{"dim": i} for i in range(len(shape))]:
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.cumsum(x, **params)
x = torch.randn(*shape)
self.run_model_test(
MyModel(),
train=False,
input=(x),
batch_size=BATCH_SIZE,
use_gpu=False,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
def test_cosine_similarity(self):
shape = (100, 128)
x = torch.randn(*shape)
y = torch.randn(*shape)
self.run_model_test(
torch.nn.CosineSimilarity(dim=1, eps=1e-6),
train=False,
input=(x, y),
batch_size=BATCH_SIZE,
use_gpu=False,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
@unittest.skip("Disabled due to onnx optimizer deprecation")
@skipIfUnsupportedOpsetVersion([10])
def test_lstm_constant_folding(self):
class LstmNet(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super().__init__()
self.lstm = nn.LSTM(
input_size, hidden_size, num_layers, bidirectional=bidirectional
)
def forward(self, input, initial_state):
return self.lstm(input, initial_state)
def get_LstmNet_model_and_inputs(
input_size, hidden_size, num_layers, batch_size, seq_len, bidirectional
):
num_directions = 2 if bidirectional else 1
model = LstmNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
c0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, (h0, c0))
batch_size1 = 3
model1, input1 = get_LstmNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)
self.run_actual_test(
model1,
train=False,
batch_size=batch_size1,
input=input1,
use_gpu=False,
do_constant_folding=True,
)
batch_size2 = 4
model2, input2 = get_LstmNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)
self.run_actual_test(
model2,
train=False,
batch_size=batch_size2,
input=input2,
use_gpu=False,
do_constant_folding=True,
)
@unittest.skip("Disabled due to onnx optimizer deprecation")
@skipIfUnsupportedOpsetVersion([10])
def test_gru_constant_folding(self):
class GruNet(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super().__init__()
self.mygru = nn.GRU(
input_size, hidden_size, num_layers, bidirectional=bidirectional
)
def forward(self, input, initial_state):
out = self.mygru(input, initial_state)
return out
def get_GruNet_model_and_inputs(
input_size, hidden_size, num_layers, batch_size, seq_len, bidirectional
):
num_directions = 2 if bidirectional else 1
model = GruNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, h0)
batch_size1 = 3
model1, input1 = get_GruNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)
self.run_actual_test(
model1,
train=False,
batch_size=batch_size1,
input=input1,
use_gpu=False,
do_constant_folding=True,
)
batch_size2 = 4
model2, input2 = get_GruNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)
self.run_actual_test(
model2,
train=False,
batch_size=batch_size2,
input=input2,
use_gpu=False,
do_constant_folding=True,
)
def test_repeat(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.repeat(1, 2, 3, 4)
x = torch.randn(4, 3, 2, 1, requires_grad=True)
self.run_model_test(
MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False
)
@skipIfUnsupportedOpsetVersion([10])
def test_upsample(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
model = nn.Upsample(size=[v * 2 for v in x.size()[2:]], mode="nearest")
self.run_model_test(
model, train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False
)
@skipIfUnsupportedOpsetVersion([10])
def test_interpolate_upsample(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
# work around for now: turn the dynamic sizes into constant
size = [int(i) for i in size]
return nn.functional.interpolate(x, size=size, mode="nearest")
x = torch.randn(1, 2, 3, 4, requires_grad=True)
model = MyModel()
self.run_model_test(
model, train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False
)
@skipIfUnsupportedOpsetVersion([7, 8, 10])
def test_interpolate_upsample_dynamic_sizes(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
return nn.functional.interpolate(x, size=size, mode="nearest")
x = torch.randn(1, 2, 3, 4, requires_grad=True)
model = MyModel()
self.run_model_test(
model, train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False
)
def test_repeat_dim_overflow(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.repeat(1, 2, 3, 4)
x = torch.randn(1, 2, requires_grad=True)
self.run_model_test(
MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False
)
def test_repeat_dynamic(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x.repeat(y.size()[0] // 2, y.size()[1] * 2)
x = torch.randn(1, 2, requires_grad=True)
y = torch.randn(2, 4, requires_grad=True)
self.run_model_test(
MyModel(),
train=False,
input=(x, y),
batch_size=BATCH_SIZE,
use_gpu=False,
input_names=["x", "y"],
dynamic_axes={"x": [0, 1], "y": [0, 1]},
)
self.run_model_test(
MyModel(),
train=False,
input=(x, y),
batch_size=BATCH_SIZE,
use_gpu=False,
remained_onnx_input_idx=[0],
)
def test_mean(self):
shape = (3, 4, 5)
for params in [{}] + [{"dim": i} for i in range(len(shape))]:
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.mean(x, **params)
x = torch.randn(*shape)
self.run_model_test(
MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False
)
# TODO: Add test cases for prod once Caffe2 has support for ReduceProd
def test_softmax(self):
for i in range(2, 8):
for d in range(0, i - 1):
model = nn.Softmax(dim=d)
dims = [2] * (i - 2) + [3, 4]
input = torch.ones(*dims, requires_grad=True)
self.run_model_test(
model, train=False, batch_size=BATCH_SIZE, input=input
)
def test_softmax_dtype(self):
class SoftmaxModel(torch.nn.Module):
def forward(self, input):
return nn.functional.softmax(input, dim=0, dtype=torch.float64)
x = torch.randn(1, 2, 3, requires_grad=True, dtype=torch.float32)
self.run_model_test(SoftmaxModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_logsoftmax(self):
for i in range(7)[2:]:
model = nn.LogSoftmax(dim=i - 1)
dims = [2] * (i - 2) + [3, 4]
input = torch.ones(*dims, requires_grad=True)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE, input=input)
def test_logsoftmax_dim(self):
for i in range(-4, 3):
model = nn.LogSoftmax(dim=i)
input = torch.randn(3, 4, 5, 6)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE, input=input)
def test_randn(self):
x = torch.randn(1, 2, 3, 4)
class MyModule(torch.nn.Module):
def forward(self, x):
return (torch.randn(1, 2, 3, 4) + x).shape
self.run_model_test(
MyModule(),
train=False,
input=(x),
batch_size=BATCH_SIZE,
use_gpu=False,
remained_onnx_input_idx=[],
)
def test_rand(self):
x = torch.randn(1, 2, 3, 4)
class MyModule(torch.nn.Module):
def forward(self, x):
return (torch.rand(1, 2, 3, 4) + x).shape
self.run_model_test(
MyModule(),
train=False,
input=(x),
batch_size=BATCH_SIZE,
use_gpu=False,
remained_onnx_input_idx=[],
)
def test_convtranspose(self):
model = nn.ConvTranspose2d(
3, 3, 3, stride=3, bias=False, padding=1, output_padding=2
)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE, atol=1e-7)
def test_unsqueeze(self):
shape = (3, 4, 5)
# test negative dim as well.
for dim in range(-len(shape) - 1, len(shape) + 1):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.unsqueeze(dim)
x = torch.randn(*shape)
self.run_model_test(
MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, atol=1e-7
)
def test_squeeze(self):
shape = (1, 1, 1)
# test negative dim as well
for dim in range(-len(shape), len(shape)):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.squeeze(dim)
x = torch.randn(*shape)
self.run_model_test(
MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, atol=1e-7
)
# NB: InstanceNorm model includes unused weights, so skip this in TestCaffe2BackendEmbed
# TODO: We should have another pass to eliminate the unused initializers in ONNX models.
@skipIfEmbed
def test_instance_norm(self):
underlying = nn.InstanceNorm2d(3)
self.run_model_test(underlying, train=False, batch_size=BATCH_SIZE)
@unittest.skip("Disabled due to onnx optimizer deprecation")
def test_pixel_shuffle(self):
underlying = nn.PixelShuffle(4)
shape = (1, 32, 5, 5)
input = Variable(torch.randn(*shape), requires_grad=True)
self.run_model_test(
underlying, train=False, input=(input), batch_size=BATCH_SIZE
)
def test_dynamic_sizes(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
shape = torch.onnx.operators.shape_as_tensor(x)
new_shape = torch.cat((torch.LongTensor([-1]), shape[0].view(1)))
return torch.onnx.operators.reshape_from_tensor_shape(x, new_shape)
x = torch.randn(3, 5, 7)
self.run_model_test(
MyModel(), train=False, input=x, batch_size=BATCH_SIZE, use_gpu=False
)
def test_advanced_broadcast(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.mul(x, y)
x = torch.randn(1, 5, 10)
y = torch.randn(1, 5, 1)
self.run_model_test(
MyModel(), train=False, input=(x, y), batch_size=BATCH_SIZE, use_gpu=False
)
def test_int8_export(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.ByteTensor(3, 4).random_()
def forward(self, x):
return x * self.param.float()
import io
f = io.BytesIO()
from torch.onnx import ExportTypes
torch.onnx._export(
MyModel(),
(torch.rand(3, 4),),
f,
verbose=True,
export_type=ExportTypes.ZIP_ARCHIVE,
keep_initializers_as_inputs=True,
)
X = np.random.rand(3, 4).astype(np.float32)
f.seek(0)
import caffe2.python.onnx.backend as c2
model = c2.prepare_zip_archive(f)
model.run(X)
@skipIfUnsupportedOpsetVersion([10])
def test_neg_slice(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[-1, :, :]
x = torch.randn(3, 4, 5)
self.run_model_test(
NegSlice(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False
)
@skipIfUnsupportedOpsetVersion([10])
def test_neg_slice_large(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[:, :, :, :, -3]
x = torch.randn(3, 4, 5, 6, 7)
self.run_model_test(
NegSlice(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False
)
@unittest.skip("https://github.com/pytorch/pytorch/issues/10984")
@skipIfUnsupportedOpsetVersion([10])
def test_neg_slice_large_negone(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[:, :, :, :, -1]
x = torch.randn(3, 4, 5, 6, 7)
self.run_model_test(
NegSlice(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_dynamic_slice(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[: x.size(0) - i, i : x.size(2), i:3])
return tuple(results)
x = torch.rand(5, 5, 5)
self.run_model_test(
DynamicSliceExportMod(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_dynamic_slice_script(self):
class DynamicSliceModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x[1 : x.size(0)]
module = DynamicSliceModel()
x = torch.rand(1, 2)
self.run_model_test(
DynamicSliceModel(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_dynamic_slice_to_the_end(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[:, i:, x.size(2) - 5])
return tuple(results)
x = torch.rand(5, 5, 5)
self.run_model_test(
DynamicSliceExportMod(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
)
def test_unbind(self):
class UnbindModel(torch.nn.Module):
def forward(self, input):
return input.unbind()
x = torch.randn(3, 4, 5)
self.run_model_test(
UnbindModel(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False
)
class UnbindModel2(torch.nn.Module):
def forward(self, input):
_, out, _, _ = input.unbind(1)
return out
x = torch.randn(3, 4, 5)
self.run_model_test(
UnbindModel2(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_zero(self):
class Zero_(torch.nn.Module):
def forward(self, x):
return x.zero_()
x = torch.randn(2, 3, 4)
self.run_model_test(
Zero_(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_model_test(
Zero_(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
remained_onnx_input_idx=[],
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_fill(self):
class Fill_(torch.nn.Module):
def forward(self, x):
return x.fill_(3)
x = torch.randn(2, 3, 4)
self.run_model_test(
Fill_(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_model_test(
Fill_(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
remained_onnx_input_idx=[],
)
# ConstantFill is a deprecated experimental op (used in opsets < 9).
# Shape inference does not cover this op.
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_arithmetic(self):
class Arithmetic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self):
x = torch.ones(2, 3, 4)
y = torch.ones(2, 3, 4) * 2
x.add_(3)
y.mul_(x)
return x, y
x = torch.ones(2, 3, 4)
y = torch.ones(2, 3, 4) * 2
self.run_model_test(
Arithmetic(), train=False, input=(), batch_size=BATCH_SIZE, use_gpu=False
)
def test_tensor_factories(self):
class TensorFactory(torch.nn.Module):
def forward(self, x):
return torch.zeros(x.size()) + torch.ones(x.size())
x = torch.randn(2, 3, 4)
self.run_model_test(
TensorFactory(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_model_test(
TensorFactory(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
remained_onnx_input_idx=[],
)
def test_tensor_factories_script(self):
class TensorFactory(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.zeros(x.shape, dtype=torch.float) + torch.ones(
x.shape, dtype=torch.float
)
x = torch.randn(2, 3, 4)
self.run_model_test(
TensorFactory(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_model_test(
TensorFactory(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
remained_onnx_input_idx=[],
)
def test_tensor_like_factories_script(self):
class TensorFactory(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
zeros = torch.zeros_like(
x,
dtype=torch.float,
layout=torch.strided,
device=torch.device("cpu"),
)
ones = torch.ones_like(
x,
dtype=torch.float,
layout=torch.strided,
device=torch.device("cpu"),
)
return zeros + ones
x = torch.randn(2, 3, 4)
self.run_model_test(
TensorFactory(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2]},
)
remained_onnx_input_idx = None if self.opset_version < 9 else []
self.run_model_test(
TensorFactory(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
remained_onnx_input_idx=remained_onnx_input_idx,
)
def test_full(self):
class FullModel(torch.nn.Module):
def forward(self, x):
return torch.full((3, 4), x, dtype=torch.long)
x = torch.tensor(12)
self.run_model_test(
FullModel(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False
)
def test_full_script(self):
class FullClass(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.full((4, 5), x, dtype=torch.long)
x = torch.tensor(12)
self.run_model_test(
FullClass(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False
)
def test_clamp(self):
class ClampModel(torch.nn.Module):
def forward(self, x):
return x.clamp(-0.5, 0.5)
x = torch.randn(3, 4)
self.run_model_test(
ClampModel(), train=False, input=(x,), batch_size=BATCH_SIZE
)
class ClampMinModel(torch.nn.Module):
def forward(self, x):
return x.clamp(min=-0.5)
x = torch.randn(3, 4)
self.run_model_test(
ClampMinModel(), train=False, input=(x,), batch_size=BATCH_SIZE
)
class ClampMaxModel(torch.nn.Module):
def forward(self, x):
return x.clamp(max=0.5)
x = torch.randn(3, 4)
self.run_model_test(
ClampMaxModel(), train=False, input=(x,), batch_size=BATCH_SIZE
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_where_functional(self):
class WhereFunctional(torch.nn.Module):
def forward(self, x):
return torch.where(x > 2.0, x, torch.neg(x))
x = torch.randn(3, 4)
self.run_model_test(
WhereFunctional(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_where_method(self):
class WhereMethod(torch.nn.Module):
def forward(self, x):
return x.where(x > 2.0, torch.neg(x))
x = torch.randn(3, 4)
self.run_model_test(
WhereMethod(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False
)
def test_data_dependent_zeros_factory(self):
class ZerosFactory(torch.nn.Module):
def forward(self, input):
return torch.cat(
[input, torch.zeros(input.size(0), 1).type_as(input)], dim=1
)
x = torch.zeros(3, 4)
self.run_model_test(
ZerosFactory(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
)
def test_implicit_expand(self):
class ImplicitExpandExportMod(torch.nn.Module):
def forward(self, x):
return x + 1
x = torch.randn(3, 4)
self.run_model_test(
ImplicitExpandExportMod(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
)
def test_reduce_sum(self):
class ReduceSumNegativeIndices(torch.nn.Module):
def forward(self, x):
return x.sum(-1)
x = torch.randn(2, 3, 4)
self.run_model_test(
ReduceSumNegativeIndices(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
)
def test_reduce_sum_multi_dim(self):
class ReduceSumMultipleAxes(torch.nn.Module):
def forward(self, x):
return x.sum(dim=(2, 3), keepdim=True)
x = torch.randn(16, 3, 256, 256)
self.run_model_test(
ReduceSumMultipleAxes(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
use_gpu=False,
)
# InstanceNorm model (used in the subgraph) includes unused weights,
# so skip this in TestCaffe2BackendEmbed
@skipIfEmbed
def test_group_norm(self):
c = torch.randn(BATCH_SIZE, 6, 224, 224)
model = nn.GroupNorm(3, 6, eps=0.0002)
self.run_model_test(model, train=True, input=c, batch_size=BATCH_SIZE)
# InstanceNorm model (used in the subgraph) includes unused weights,
# so skip this in TestCaffe2BackendEmbed
@skipIfEmbed
def test_group_norm_noaffine(self):
c = torch.randn(BATCH_SIZE, 6, 224, 224)
model = nn.GroupNorm(3, 6, eps=0.0002, affine=False)
self.run_model_test(model, train=True, input=c, batch_size=BATCH_SIZE)
def test_rsub(self):
class RsubModel(torch.nn.Module):
def forward(self, x):
return 1 - x
x = torch.randn(1, 2)
self.run_model_test(
RsubModel(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_isnan(self):
class IsNaNModel(torch.nn.Module):
def forward(self, input):
return torch.isnan(input)
x = torch.tensor([1.0, float("nan"), 2.0])
self.run_model_test(
IsNaNModel(), train=False, input=x, batch_size=BATCH_SIZE, use_gpu=False
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices, values):
return input.scatter(1, indices, values)
input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_model_test(
ScatterModel(),
train=False,
input=(input, indices, values),
batch_size=BATCH_SIZE,
use_gpu=False,
)
input = torch.zeros(3, 4, 5, 6)
indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)
indices = indices.view(3, 2, 1, 1).expand(3, 2, 5, 6)
values = torch.arange(3 * 2 * 5 * 6, dtype=torch.float32).view(3, 2, 5, 6)
self.run_model_test(
ScatterModel(),
train=False,
input=(input, indices, values),
batch_size=BATCH_SIZE,
use_gpu=False,
)
input = torch.zeros(3, 4, 2)
indices = torch.tensor([[[1, 0], [0, 2]], [[1, 1], [0, 1]], [[2, 1], [2, 2]]])
values = torch.arange(3 * 2 * 2, dtype=torch.float32).view(3, 2, 2)
self.run_model_test(
ScatterModel(),
train=False,
input=(input, indices, values),
batch_size=BATCH_SIZE,
use_gpu=False,
)
@skipIfUnsupportedOpsetVersion([10])
def test_flatten(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input)
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.run_model_test(FlattenModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_flatten2D(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input, 1)
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.run_model_test(FlattenModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_max(self):
class MaxModel(torch.nn.Module):
def forward(self, input):
return torch.max(input, dim=1)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(MaxModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_max_keepdim(self):
class MaxModel(torch.nn.Module):
def forward(self, input):
return torch.max(input, dim=1, keepdim=True)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(MaxModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_max_tensors(self):
class MaxModel(torch.nn.Module):
def forward(self, input, other):
return torch.max(input, other)
x = torch.randn(4, 4, requires_grad=True)
y = torch.randn(4, 4, requires_grad=True)
self.run_model_test(
MaxModel(), train=False, input=(x, y), batch_size=BATCH_SIZE
)
def test_min(self):
class MinModel(torch.nn.Module):
def forward(self, input):
return torch.min(input, dim=1)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(MinModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_argmax(self):
class ArgmaxModel(torch.nn.Module):
def forward(self, input):
return torch.argmax(input, dim=1)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(ArgmaxModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_argmax_none_dim(self):
class ArgmaxModel(torch.nn.Module):
def forward(self, input):
return torch.argmax(input)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(ArgmaxModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_argmin(self):
class ArgminModel(torch.nn.Module):
def forward(self, input):
return torch.argmin(input, dim=1)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(ArgminModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_argmin_none_dim(self):
class ArgminModel(torch.nn.Module):
def forward(self, input):
return torch.argmin(input)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(ArgminModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_reshape(self):
class ReshapeModel(torch.nn.Module):
def forward(self, input):
return input.reshape(1, 1)
x = torch.randn(1, requires_grad=True)
self.run_model_test(ReshapeModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_reshape_as(self):
class ReshapeAsModel(torch.nn.Module):
def forward(self, input):
y = torch.randn(3, 1, 2, 1, requires_grad=False)
return input.reshape_as(y)
x = torch.randn(2, 3, requires_grad=True)
self.run_model_test(
ReshapeAsModel(), train=False, input=x, batch_size=BATCH_SIZE
)
@skipIfUnsupportedOpsetVersion([10])
def test_narrow(self):
class NarrowModel(torch.nn.Module):
def forward(self, input):
return torch.narrow(input, 0, 0, 2)
x = torch.randn(3, 3, requires_grad=True)
self.run_model_test(NarrowModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_randn_like(self):
class RandNLikeModel(torch.nn.Module):
def forward(self, input):
return torch.randn_like(input)
x = torch.randn(2, 3, 4, requires_grad=False)
model = RandNLikeModel()
onnxir, _ = do_export(model, x, keep_initializers_as_inputs=True)
onnx_model = onnx.ModelProto.FromString(onnxir)
prepared = c2.prepare(onnx_model)
caffe2_out = prepared.run(inputs=[x.cpu().numpy()])
self.assertEqual(caffe2_out[0].shape, x.shape)
def test_traced_ints(self):
A = 4
H = 10
W = 8
img_count = 3
# in this model, the constant propagation in JIT doesn't work
# so we have ListConstruct in the symbolic
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(A, 4 * A, 1, stride=1)
def forward(self, feature, im_info, anchors):
bbox_deltas = self.conv(feature)
a, b = torch.ops._caffe2.GenerateProposals(
feature,
bbox_deltas,
im_info,
anchors,
2.0,
6000,
300,
0.7,
16,
True,
-90,
90,
1.0,
True,
)
output = torch.ops._caffe2.RoIAlign(
feature,
a,
order="NCHW",
spatial_scale=1.0,
pooled_h=3,
pooled_w=3,
sampling_ratio=0,
aligned=False,
)
return output
feature = torch.empty(img_count, A, H, W)
im_info = torch.ones(img_count, 3, dtype=torch.float32)
anchors = torch.ones(A, 4, dtype=torch.float32)
inputs = (feature, im_info, anchors)
model = MyModel()
with torch.no_grad():
self.run_model_test(
MyModel(), train=False, input=inputs, batch_size=BATCH_SIZE
)
def test_c2_roi_align(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, feature, rois):
roi_feature = torch.ops._caffe2.RoIAlign(
feature,
rois,
order="NCHW",
spatial_scale=1.0,
pooled_h=3,
pooled_w=3,
sampling_ratio=3,
aligned=False,
)
return roi_feature
def rand_roi(N, C, H, W):
return [
float(int(N * np.random.rand())),
0.5 * np.random.rand() * W,
0.5 * np.random.rand() * H,
(0.5 + 0.5 * np.random.rand()) * W,
(0.5 + 0.5 * np.random.rand()) * H,
]
N, C, H, W = 1, 4, 10, 8
feature = torch.randn(N, C, H, W)
rois = torch.tensor([rand_roi(N, C, H, W) for _ in range(10)])
inputs = (feature, rois)
self.run_model_test(MyModel(), train=False, input=inputs, batch_size=3)
def test_c2_generate_proposals(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, scores, bbox_deltas, im_info, anchors):
a, b = torch.ops._caffe2.GenerateProposals(
scores,
bbox_deltas,
im_info,
anchors,
2.0,
6000,
300,
0.7,
16,
True,
-90,
90,
1.0,
True,
)
return a, b
A = 4
H = 10
W = 8
img_count = 3
scores = torch.ones(img_count, A, H, W, dtype=torch.float32)
bbox_deltas = torch.linspace(
0, 10, steps=img_count * 4 * A * H * W, dtype=torch.float32
)
bbox_deltas = bbox_deltas.view(img_count, 4 * A, H, W)
im_info = torch.ones(img_count, 3, dtype=torch.float32)
anchors = torch.ones(A, 4, dtype=torch.float32)
inputs = (scores, bbox_deltas, im_info, anchors)
self.run_model_test(MyModel(), train=False, input=inputs, batch_size=3)
def test_c2_bbox_transform(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, rois, deltas, im_info):
a, b = torch.ops._caffe2.BBoxTransform(
rois,
deltas,
im_info,
weights=[1.0, 1.0, 1.0, 1.0],
apply_scale=False,
rotated=True,
angle_bound_on=True,
angle_bound_lo=-90,
angle_bound_hi=90,
clip_angle_thresh=0.5,
legacy_plus_one=True,
)
return a, b
roi_counts = [0, 2, 3, 4, 5]
batch_size = len(roi_counts)
total_rois = sum(roi_counts)
im_dims = np.random.randint(100, 600, batch_size)
rois = generate_rois_rotated(roi_counts, im_dims)
box_dim = 5
num_classes = 7
deltas = np.random.randn(total_rois, box_dim * num_classes).astype(np.float32)
im_info = np.zeros((batch_size, 3)).astype(np.float32)
im_info[:, 0] = im_dims
im_info[:, 1] = im_dims
im_info[:, 2] = 1.0
im_info = torch.zeros((batch_size, 3))
inputs = (torch.tensor(rois), torch.tensor(deltas), torch.tensor(im_info))
self.run_model_test(
MyModel(), train=False, input=inputs, batch_size=3, use_gpu=False
)
# BoxWithNMSLimits has requirements for the inputs, so randomly generated inputs
# in Caffe2BackendTestEmbed doesn't work with this op.
@skipIfEmbed
def test_c2_box_with_nms_limits(self):
roi_counts = [0, 2, 3, 4, 5]
num_classes = 7
rotated = False
angle_bound_on = True
clip_angle_thresh = 0.5
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, rotated
)
pred_bbox, batch_splits = (
t.detach().numpy()
for t in torch.ops._caffe2.BBoxTransform(
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
[1.0, 1.0, 1.0, 1.0],
False,
rotated,
angle_bound_on,
-90,
90,
clip_angle_thresh,
legacy_plus_one=True,
)
)
class_prob = np.random.randn(sum(roi_counts), num_classes).astype(np.float32)
score_thresh = 0.5
nms_thresh = 0.5
topk_per_image = int(sum(roi_counts) / 2)
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, class_prob, pred_bbox, batch_splits):
a, b, c, d, e, f = torch.ops._caffe2.BoxWithNMSLimit(
class_prob,
pred_bbox,
batch_splits,
score_thresh=score_thresh,
nms=nms_thresh,
detections_per_im=topk_per_image,
soft_nms_enabled=False,
soft_nms_method="linear",
soft_nms_sigma=0.5,
soft_nms_min_score_thres=0.001,
rotated=rotated,
cls_agnostic_bbox_reg=False,
input_boxes_include_bg_cls=True,
output_classes_include_bg_cls=True,
legacy_plus_one=True,
)
return a, b, c, d, e, f
inputs = (
torch.tensor(class_prob),
torch.tensor(pred_bbox),
torch.tensor(batch_splits),
)
self.run_model_test(
MyModel(), train=False, input=inputs, batch_size=3, use_gpu=False
)
def test_c2_inference_lstm(self):
num_layers = 4
seq_lens = 6
emb_lens = 10
has_bias = True
batch_first = True
is_bidirectional = True
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, lstm_in):
a, b, c = torch.ops._caffe2.InferenceLSTM(
lstm_in, num_layers, has_bias, batch_first, is_bidirectional
)
return a, b, c
num_directions = 2
bsz = 5
hidden_size = 7
hx = np.zeros((num_layers * num_directions, bsz, hidden_size), dtype=np.float32)
inputs = np.random.randn(bsz, seq_lens, emb_lens).astype(np.float32)
torch_lstm = torch.nn.LSTM(
emb_lens,
hidden_size,
batch_first=batch_first,
bidirectional=is_bidirectional,
bias=has_bias,
num_layers=num_layers,
)
lstm_in = (
[
torch.from_numpy(inputs),
torch.from_numpy(hx),
torch.from_numpy(hx),
]
+ [param.detach() for param in torch_lstm._flat_weights],
)
self.run_model_test(
MyModel(), train=False, input=lstm_in, batch_size=3, use_gpu=False
)
def test_tuple_input_output(self):
class TupleModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(
self, a: Tuple[torch.Tensor, torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor]:
return a
x = (torch.randn(3, 4), torch.randn(4, 3))
self.run_model_test(
TupleModel(), train=False, input=(x,), batch_size=BATCH_SIZE
)
def test_nested_tuple_input_output(self):
class NestedTupleModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(
self,
a: torch.Tensor,
b: Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
) -> torch.Tensor:
return a + b[0] + b[1][0] + b[1][1]
x = torch.randn(4, 5)
y = (torch.randn(4, 5), (torch.randn(4, 5), torch.randn(4, 5)))
self.run_model_test(
NestedTupleModel(), train=False, input=(x, y), batch_size=BATCH_SIZE
)
def test_topk(self):
class TopKModel(torch.nn.Module):
def forward(self, input):
return torch.topk(input, 3)
x = torch.arange(1.0, 6.0)
self.run_model_test(TopKModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_topk_script(self):
class TopKModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.topk(input, 3, dim=0)
x = torch.randn(4, 3, requires_grad=True)
self.run_model_test(TopKModel(), train=False, input=(x,), batch_size=BATCH_SIZE)
def test_floor(self):
class FloorModel(torch.nn.Module):
def forward(self, input):
return torch.floor(input)
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.run_model_test(FloorModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_ceil(self):
class CeilModel(torch.nn.Module):
def forward(self, input):
return torch.ceil(input)
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.run_model_test(CeilModel(), train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test__dim_arange(self):
class DimArange(torch.nn.Module):
def forward(self, input):
return torch._dim_arange(input, 1)
x = torch.ones(5, 6)
self.run_model_test(
DimArange(),
train=False,
input=x,
batch_size=BATCH_SIZE,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_end(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_model_test(
ArangeScript(), train=False, input=(x,), batch_size=BATCH_SIZE
)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a
self.run_model_test(
ArangeModel(), train=False, input=(x,), batch_size=BATCH_SIZE
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_start_end(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_model_test(
ArangeScript(), train=False, input=(x,), batch_size=BATCH_SIZE
)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a
self.run_model_test(
ArangeModel(), train=False, input=(x,), batch_size=BATCH_SIZE
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_start_end_step(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return (
torch.arange(
2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float
).view(-1, 1)
+ a
)
x = torch.randn(3, 4, requires_grad=True)
self.run_model_test(
ArangeScript(), train=False, input=(x,), batch_size=BATCH_SIZE
)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return (
torch.arange(
2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float
).view(-1, 1)
+ a
)
self.run_model_test(
ArangeModel(), train=False, input=(x,), batch_size=BATCH_SIZE
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_size(self):
class SizeModel(torch.nn.Module):
def forward(self, input):
return torch.arange(input.size(0)), torch.arange(input.size(-1))
x = torch.randn(5, 3, 2)
self.run_model_test(
SizeModel(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_model_test(
SizeModel(),
train=False,
input=(x,),
batch_size=BATCH_SIZE,
remained_onnx_input_idx=[],
)
def test_log2(self):
class Log2Model(torch.nn.Module):
def forward(self, input):
return torch.log2(input)
x = torch.empty(BATCH_SIZE, 10, 10).uniform_(4, 9)
self.run_model_test(Log2Model(), train=False, input=x, batch_size=BATCH_SIZE)
def test__sample_dirichlet(self):
class DirichletModel(torch.nn.Module):
def forward(self, input):
return torch._sample_dirichlet(input)
x = torch.randn(2, 3, 4, requires_grad=False)
model = DirichletModel()
onnxir, _ = do_export(
model,
x,
keep_initializers_as_inputs=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
onnx_model = onnx.ModelProto.FromString(onnxir)
prepared = c2.prepare(onnx_model)
caffe2_out = prepared.run(inputs=[x.cpu().numpy()])
self.assertEqual(caffe2_out[0].shape, x.shape)
def test__standard_gamma(self):
class GammaModel(torch.nn.Module):
def forward(self, input):
return torch._standard_gamma(input)
x = torch.randn(2, 3, 4, requires_grad=False)
model = GammaModel()
onnxir, _ = do_export(
model,
x,
keep_initializers_as_inputs=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
onnx_model = onnx.ModelProto.FromString(onnxir)
prepared = c2.prepare(onnx_model)
caffe2_out = prepared.run(inputs=[x.cpu().numpy()])
self.assertEqual(caffe2_out[0].shape, x.shape)
# The order of returned indices from Multinomial is undefined, so randomly generated inputs
# in Caffe2BackendTestEmbed doesn't work with this op.
@skipIfEmbed
def test_multinomial(self):
class Multinomial(torch.nn.Module):
def forward(self, weight):
return torch.multinomial(weight, 3, replacement=True)
class MultinomialNoReplacement(torch.nn.Module):
def forward(self, weight):
return torch.multinomial(weight, 1)
weight = torch.tensor([[0, 10, 0, 0], [0, 0, 100, 0]], dtype=torch.float)
self.run_model_test(
Multinomial(), train=False, input=weight, batch_size=BATCH_SIZE
)
self.run_model_test(
MultinomialNoReplacement(), train=False, input=weight, batch_size=BATCH_SIZE
)
def test_prim_shape(self):
x = torch.randn(4, 5, requires_grad=True)
@torch.jit.script
def view_by_prim_shape(x):
return x.view(x.shape)
class PrimShapeModel(torch.nn.Module):
def forward(self, input):
return view_by_prim_shape(input)
self.run_model_test(
PrimShapeModel(), train=False, input=x, batch_size=BATCH_SIZE
)
def test_and(self):
class AndModel(torch.nn.Module):
def forward(self, x, y):
return x & y
x = torch.randint(0, 1, (3, 5), dtype=torch.bool)
y = torch.randint(0, 1, (3, 5), dtype=torch.bool)
self.run_model_test(
AndModel(), train=False, input=(x, y), batch_size=BATCH_SIZE
)
def test_or(self):
class OrModel(torch.nn.Module):
def forward(self, x, y):
return x | y
x = torch.randint(0, 1, (3, 5), dtype=torch.bool)
y = torch.randint(0, 1, (3, 5), dtype=torch.bool)
self.run_model_test(OrModel(), train=False, input=(x, y), batch_size=BATCH_SIZE)
def test_dropout(self):
class DropoutModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.dropout = torch.nn.Dropout(0.5)
def forward(self, x):
return self.dropout(x)
x = torch.randn(1, 2, 3)
self.run_model_test(DropoutModel(), train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test_while(self):
class WhileModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
a = 0
while a < 4:
a += 1
return x + a
model = WhileModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
self.run_model_test(
model,
train=False,
input=(inputs,),
batch_size=BATCH_SIZE,
)
def test_while_cond(self):
class WhileModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, a):
b = a < 4
while b:
a += b.to(torch.long)
b = a < 4
return x + a
model = WhileModel()
x = torch.zeros(1, 2, 3, dtype=torch.long)
a = torch.tensor([0], dtype=torch.long)
self.run_model_test(model, train=False, input=(x, a), batch_size=BATCH_SIZE)
@unittest.skip("Disabled due to onnx optimizer deprecation")
def test_loop(self):
class LoopModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(5):
x = x + i
return x
model = LoopModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE)
@unittest.skip("Disabled due to onnx optimizer deprecation")
def test_dynamic_loop(self):
class LoopModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(x.size(2)):
x = x + i
return x
model = LoopModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE)
@unittest.skip("Disabled due to onnx optimizer deprecation")
@skipIfUnsupportedMinOpsetVersion(9)
def test_nested_loops(self):
class NestedLoopsModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(5):
a = 0
while a < 4:
a += 1
for j in range(a):
x = x + j
x = x + a
return x
model = NestedLoopsModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
self.run_model_test(
model,
train=False,
input=(inputs,),
batch_size=BATCH_SIZE,
)
def test_select(self):
class SelectModel(torch.nn.Module):
def forward(self, x):
return torch.select(x, 0, 1)
model = SelectModel()
inputs = torch.randn(3, 2, 1)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE)
def test_std(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, unbiased=False)
model = StandardDeviation()
inputs = torch.randn(2, 3, 4)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE)
def test_std_along_dims(self):
class StandardDeviationAlongDims(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=False, keepdim=False)
model = StandardDeviationAlongDims()
inputs = torch.randn(2, 3, 4)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test_masked_fill(self):
class MaskedFillModel(torch.nn.Module):
def forward(self, x):
mask = torch.tensor([[0, 0, 1], [1, 1, 0]], dtype=torch.uint8)
return x.masked_fill(mask, 2)
x = torch.zeros(4, 2, 3, requires_grad=True)
self.run_model_test(
MaskedFillModel(), input=(x,), train=False, batch_size=BATCH_SIZE
)
class MaskedFillModel2(torch.nn.Module):
def forward(self, x):
return x.masked_fill(x > 3, -1)
x = torch.arange(16).view(2, 2, 4).to(torch.float32)
self.run_model_test(
MaskedFillModel2(), input=(x,), train=False, batch_size=BATCH_SIZE
)
@skipIfUnsupportedMinOpsetVersion(8)
def test_meshgrid(self):
class MeshgridModel(torch.nn.Module):
def forward(self, x, y, z):
return torch.meshgrid(x, y, z)
x = torch.ones(3, requires_grad=True)
y = torch.zeros(4, requires_grad=True)
z = torch.ones(5, requires_grad=True)
model = MeshgridModel()
self.run_model_test(model, train=False, input=(x, y, z), batch_size=BATCH_SIZE)
def test_remainder(self):
class RemainderModel(torch.nn.Module):
def forward(self, input, other):
return torch.remainder(input, other)
x = torch.randn(4, 2, 3)
y = torch.randn(1, 2, 1)
model = RemainderModel()
self.run_model_test(model, train=False, input=(x, y), batch_size=BATCH_SIZE)
def test_remainder_scalar(self):
class RemainderModel(torch.nn.Module):
def forward(self, input):
return torch.remainder(input, 2.55)
inputs = torch.randint(10, (2, 3))
model = RemainderModel()
self.run_model_test(
model,
train=False,
input=(inputs,),
batch_size=BATCH_SIZE,
)
def test_baddbmm(self):
class MyModule(torch.nn.Module):
def forward(self, input, batch1, batch2):
return torch.baddbmm(
input, batch1, batch2, alpha=torch.tensor(5), beta=3.5
)
x = torch.randn(10, 3, 5)
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
self.run_model_test(
MyModule(), input=(x, batch1, batch2), train=False, batch_size=BATCH_SIZE
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_gelu(self):
class GeluModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.gelu(x, approximate="none")
model = GeluModel()
inputs = torch.randn(2, 4, 5, 6, requires_grad=True)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test_tanh_gelu(self):
class GeluModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.gelu(x, approximate="tanh")
model = GeluModel()
inputs = torch.randn(2, 4, 5, 6, requires_grad=True)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_fill(self):
class IndexFillModel(torch.nn.Module):
def forward(self, input):
index = torch.tensor([2, 0])
return input.index_fill(2, index, -1)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_model_test(
IndexFillModel(), input=(x,), train=False, batch_size=BATCH_SIZE
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_copy(self):
class IndexCopyModel(torch.nn.Module):
def forward(self, input):
index = torch.tensor([2, 0])
source = torch.ones(3, 2, 5)
return input.index_copy(1, index, source)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_model_test(
IndexCopyModel(), input=(x,), train=False, batch_size=BATCH_SIZE
)
# a bit of metaprogramming to set up all the rnn tests
def make_test(
name,
base,
layer,
bidirectional,
initial_state,
variable_length,
dropout,
**extra_kwargs,
):
test_name = str(
"_".join(
[
"test",
name,
layer[1],
bidirectional[1],
initial_state[1],
variable_length[1],
dropout[1],
]
)
)
@unittest.skip("Disabled due to onnx optimizer deprecation")
@skipIfUnsupportedOpsetVersion([10])
@skipIfUnsupportedMinOpsetVersion(8)
def f(self):
self._dispatch_rnn_test(
base,
layers=layer[0],
bidirectional=bidirectional[0],
initial_state=initial_state[0],
packed_sequence=variable_length[0],
dropout=dropout[0],
**extra_kwargs,
)
f.__name__ = test_name
setattr(TestCaffe2Backend_opset9, f.__name__, f)
def setup_rnn_tests():
layers_opts = [(1, "unilayer"), (3, "trilayer")]
bidirectional_opts = [(False, "forward"), (True, "bidirectional")]
initial_state_opts = [(True, "with_initial_state"), (False, "no_initial_state")]
variable_length_opts = [
(0, "without_sequence_lengths"),
(1, "with_variable_length_sequences"),
(2, "with_batch_first_sequence_lengths"),
]
dropout_opts = [(0.2, "with_dropout"), (0.0, "without_dropout")]
test_count = 0
for (
layer,
bidirectional,
initial_state,
variable_length,
dropout,
) in itertools.product(
layers_opts,
bidirectional_opts,
initial_state_opts,
variable_length_opts,
dropout_opts,
):
for base, name, extra_kwargs in (
("elman", "elman_relu", {"nonlinearity": "relu"}),
("elman", "elman_tanh", {"nonlinearity": "tanh"}),
("lstm", "lstm", {}),
("gru", "gru", {}),
):
make_test(
name,
base,
layer,
bidirectional,
initial_state,
variable_length,
dropout,
**extra_kwargs,
)
test_count += 1
# sanity check that a representative example does exist
TestCaffe2Backend_opset9.test_gru_trilayer_forward_with_initial_state_without_sequence_lengths_with_dropout
# make sure no one accidentally disables all the tests without
# noticing
assert test_count == 192, test_count
setup_rnn_tests()
# add the same test suite as above, but switch embed_params=False
# to embed_params=True
TestCaffe2BackendEmbed_opset9 = type(
"TestCaffe2BackendEmbed_opset9",
(common_utils.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, embed_params=True),
)
# opset 7 tests
TestCaffe2Backend_opset7 = type(
"TestCaffe2Backend_opset7",
(common_utils.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, opset_version=7),
)
TestCaffe2BackendEmbed_opset7 = type(
"TestCaffe2BackendEmbed_opset7",
(common_utils.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, embed_params=True, opset_version=7),
)
# opset 8 tests
TestCaffe2Backend_opset8 = type(
"TestCaffe2Backend_opset8",
(common_utils.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, opset_version=8),
)
TestCaffe2BackendEmbed_opset8 = type(
"TestCaffe2BackendEmbed_opset8",
(common_utils.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, embed_params=True, opset_version=8),
)
# opset 10 tests
TestCaffe2Backend_opset10 = type(
"TestCaffe2Backend_opset10",
(common_utils.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, opset_version=10),
)
TestCaffe2BackendEmbed_opset10 = type(
"TestCaffe2BackendEmbed_opset10",
(common_utils.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, embed_params=True, opset_version=10),
)
# add the same test suite as above, but switch embed_params=False
# to embed_params=True
TestCaffe2BackendEmbed_opset9_new_jit_API = type(
"TestCaffe2BackendEmbed_opset9_new_jit_API",
(common_utils.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, embed_params=True),
)
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_pytorch_onnx_caffe2.py
|
import argparse
import glob
import os
import shutil
import traceback
import google.protobuf.text_format
import onnx.backend.test
import onnx_test_common
from test_caffe2_common import run_generated_test
from torch.testing._internal.common_device_type import get_all_device_types
_fail_test_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "fail", "generated"
)
_expect_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "expect")
def collect_generated_testcases(
root_dir=onnx_test_common.pytorch_converted_dir,
verbose=False,
fail_dir=None,
expect=True,
):
total_pass = 0
total_fail = 0
for d in os.listdir(root_dir):
dir_name = os.path.join(root_dir, d)
if os.path.isdir(dir_name):
failed = False
try:
model_file = os.path.join(dir_name, "model.onnx")
data_dir_pattern = os.path.join(dir_name, "test_data_set_*")
for data_dir in glob.glob(data_dir_pattern):
for device in get_all_device_types():
run_generated_test(model_file, data_dir, device)
if expect:
expect_file = os.path.join(
_expect_dir, f"PyTorch-generated-{d}.expect"
)
with open(expect_file, "w") as text_file:
model = onnx.load(model_file)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
text_file.write(
google.protobuf.text_format.MessageToString(model)
)
total_pass += 1
except Exception as e:
if verbose:
print(f"The test case in {dir_name} failed!")
traceback.print_exc()
if fail_dir is None:
shutil.rmtree(dir_name)
else:
target_dir = os.path.join(fail_dir, d)
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
shutil.move(dir_name, target_dir)
total_fail += 1
print(f"Successfully generated/updated {total_pass} test cases from PyTorch.")
if expect:
print(f"Expected pbtxt files are generated in {_expect_dir}.")
print(f"Failed {total_fail} testcases are moved to {_fail_test_dir}.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Check and filter the failed test cases."
)
parser.add_argument("-v", action="store_true", default=False, help="verbose")
parser.add_argument(
"--delete", action="store_true", default=False, help="delete failed test cases"
)
parser.add_argument(
"--no-expect",
action="store_true",
default=False,
help="generate expect txt files",
)
args = parser.parse_args()
verbose = args.v
delete = args.delete
expect = not args.no_expect
fail_dir = _fail_test_dir
if delete:
fail_dir = None
if fail_dir:
if not os.path.exists(fail_dir):
os.makedirs(fail_dir)
collect_generated_testcases(verbose=verbose, fail_dir=fail_dir, expect=expect)
# We already generate the expect files for test_operators.py.
collect_generated_testcases(
root_dir=onnx_test_common.pytorch_operator_dir,
verbose=verbose,
fail_dir=fail_dir,
expect=False,
)
|
pytorch-master
|
test/onnx/export_onnx_tests_filter.py
|
# Owner(s): ["module: onnx"]
import unittest
import onnxruntime # noqa: F401
import torch
from pytorch_test_common import (
skipIfNoBFloat16Cuda,
skipIfNoCuda,
skipIfUnsupportedMinOpsetVersion,
skipScriptTest,
)
from test_pytorch_onnx_onnxruntime import TestONNXRuntime
from torch.cuda.amp import autocast
from torch.onnx._globals import GLOBALS
from torch.testing._internal import common_utils
class TestONNXRuntime_cuda(common_utils.TestCase):
opset_version = GLOBALS.export_onnx_opset_version
keep_initializers_as_inputs = True
onnx_shape_inference = True
@skipIfUnsupportedMinOpsetVersion(9)
@skipIfNoCuda
def test_gelu_fp16(self):
class GeluModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.gelu(x)
x = torch.randn(
2,
4,
5,
6,
requires_grad=True,
dtype=torch.float16,
device=torch.device("cuda"),
)
self.run_test(GeluModel(), x, rtol=1e-3, atol=1e-5)
@skipIfUnsupportedMinOpsetVersion(9)
@skipIfNoCuda
@skipScriptTest()
def test_layer_norm_fp16(self):
class LayerNormModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer_norm = torch.nn.LayerNorm([10, 10])
@autocast()
def forward(self, x):
return self.layer_norm(x)
x = torch.randn(
20,
5,
10,
10,
requires_grad=True,
dtype=torch.float16,
device=torch.device("cuda"),
)
self.run_test(LayerNormModel().cuda(), x, rtol=1e-3, atol=1e-5)
@skipIfUnsupportedMinOpsetVersion(12)
@skipIfNoCuda
@skipScriptTest()
def test_softmaxCrossEntropy_fusion_fp16(self):
class FusionModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.NLLLoss(reduction="none")
self.m = torch.nn.LogSoftmax(dim=1)
@autocast()
def forward(self, input, target):
output = self.loss(self.m(2 * input), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, dtype=torch.float16, device=torch.device("cuda"))
target = torch.empty(N, dtype=torch.long, device=torch.device("cuda")).random_(
0, C
)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(FusionModel(), (input, target))
@skipIfNoCuda
@skipScriptTest()
def test_apex_o2(self):
class LinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 5)
def forward(self, x):
return self.linear(x)
try:
from apex import amp
except Exception:
raise unittest.SkipTest("Apex is not available")
input = torch.randn(3, 3, device=torch.device("cuda"))
model = amp.initialize(LinearModel(), opt_level="O2")
self.run_test(model, input)
# ONNX supports bfloat16 for opsets >= 13
# Add, Sub and Mul ops don't support bfloat16 cpu in onnxruntime.
@skipIfUnsupportedMinOpsetVersion(13)
@skipIfNoBFloat16Cuda
def test_arithmetic_bfp16(self):
class MyModule(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4, dtype=torch.bfloat16, device=torch.device("cuda"))
x = x.type_as(y)
return torch.mul(torch.add(x, y), torch.sub(x, y)).to(
dtype=torch.float16
)
x = torch.ones(
3, 4, requires_grad=True, dtype=torch.float16, device=torch.device("cuda")
)
self.run_test(MyModule(), x, rtol=1e-3, atol=1e-5)
@skipIfNoCuda
def test_deduplicate_initializers_diff_devices(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(
torch.ones(2, 3, device=torch.device("cpu"))
)
self.b = torch.nn.Parameter(torch.ones(3, device=torch.device("cuda")))
def forward(self, x, y):
return torch.matmul(self.w, x), y + self.b
x = torch.randn(3, 3, device=torch.device("cpu"))
y = torch.randn(3, 3, device=torch.device("cuda"))
self.run_test(Model(), (x, y))
TestONNXRuntime_cuda.setUp = TestONNXRuntime.setUp
TestONNXRuntime_cuda.run_test = TestONNXRuntime.run_test
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_pytorch_onnx_onnxruntime_cuda.py
|
# Owner(s): ["module: unknown"]
import io
import caffe2.python.onnx.backend as c2
import numpy as np
import onnx
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.onnx
from torch.testing._internal import common_utils
class TestQuantizedOps(common_utils.TestCase):
def generic_test(
self, model, sample_inputs, input_names=None, decimal=3, relaxed_check=False
):
torch.backends.quantized.engine = "qnnpack"
pt_inputs = tuple(torch.from_numpy(x) for x in sample_inputs)
model.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack")
q_model = torch.ao.quantization.prepare(model, inplace=False)
q_model = torch.ao.quantization.convert(q_model, inplace=False)
traced_model = torch.jit.trace(q_model, pt_inputs)
buf = io.BytesIO()
torch.jit.save(traced_model, buf)
buf.seek(0)
q_model = torch.jit.load(buf)
q_model.eval()
output = q_model(*pt_inputs)
f = io.BytesIO()
torch.onnx.export(
q_model,
pt_inputs,
f,
input_names=input_names,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
# Caffe2 doesn't support newer opset versions
opset_version=9,
)
f.seek(0)
onnx_model = onnx.load(f)
caffe_res = c2.run_model(onnx_model, dict(zip(input_names, sample_inputs)))[0]
# Due to change in requantization logic for certain ops such conv, linear
# in pytorch's integration of qnnpack, numerics may have a mismatc with C2.
# This mismatch should not be off my more than 1.
# This flag helps us override default behavior under certain circumstances.
if relaxed_check:
output_diff = np.absolute(np.squeeze(output.detach().numpy()) - caffe_res)
max_diff = np.amax(output_diff)
# This check had to be changed to account for changes in
# qnnpack's requant logic.
np.testing.assert_(
max_diff <= 1, "Maximum absolute difference must be less than 1"
)
else:
np.testing.assert_almost_equal(
output.detach().numpy(), caffe_res, decimal=decimal
)
def generic_unary_test(self, op):
class QModule(torch.nn.Module):
def __init__(self, op):
super().__init__()
self.quant1 = torch.ao.quantization.QuantStub()
self.op = op
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
res = self.op(self.quant1(x))
return self.dequant(res)
x = np.random.random((1, 2)).astype("float32")
self.generic_test(QModule(op), (x,), input_names=["x"])
def test_quantized_add(self):
class QAddModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant1 = torch.ao.quantization.QuantStub()
self.quant2 = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x, y):
res = torch.ops.quantized.add(self.quant1(x), self.quant2(y), 1.0, 0)
return self.dequant(res)
x = np.random.random(2).astype("float32")
y = np.random.random(2).astype("float32")
self.generic_test(QAddModule(), (x, y), input_names=["x", "y"])
def test_quantized_relu(self):
self.generic_unary_test(torch.nn.ReLU())
def export_to_onnx(self, model, input, input_names):
traced = torch.jit.trace(model, input)
buf = io.BytesIO()
torch.jit.save(traced, buf)
buf.seek(0)
model = torch.jit.load(buf)
f = io.BytesIO()
torch.onnx.export(
model,
input,
f,
input_names=input_names,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
# Caffe2 doesn't support newer opset versions
opset_version=9,
)
f.seek(0)
onnx_model = onnx.load(f)
return onnx_model
def test_qlinear_model(self):
class LinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.qconfig = torch.ao.quantization.default_qconfig
self.fc1 = torch.ao.quantization.QuantWrapper(
torch.nn.Linear(5, 10).to(dtype=torch.float)
)
def forward(self, x):
x = self.fc1(x)
return x
torch.backends.quantized.engine = "qnnpack"
qconfig = torch.ao.quantization.default_qconfig
model = LinearModel()
model.qconfig = qconfig
model = torch.ao.quantization.prepare(model)
model = torch.ao.quantization.convert(model)
x_numpy = np.random.rand(1, 2, 5).astype(np.float32)
x = torch.from_numpy(x_numpy).to(dtype=torch.float)
outputs = model(x)
input_names = ["x"]
onnx_model = self.export_to_onnx(model, x, input_names)
caffe_res = c2.run_model(onnx_model, dict(zip(input_names, x_numpy)))[0]
output_diff = np.absolute(np.squeeze(outputs.numpy()) - caffe_res)
max_diff = np.amax(output_diff)
# Permute pytorch output to NHWC
# This check had to be changed to account for changes in
# qnnpack's requant logic.
np.testing.assert_(
max_diff <= 1, "Maximum absolute difference must be less than 1"
)
def test_qconv_model(self):
class ConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.qconfig = torch.ao.quantization.default_qconfig
self.fc1 = torch.ao.quantization.QuantWrapper(
torch.nn.Conv2d(3, 5, 2, bias=True).to(dtype=torch.float)
)
def forward(self, x):
x = self.fc1(x)
return x
torch.backends.quantized.engine = "qnnpack"
qconfig = torch.ao.quantization.default_qconfig
model = ConvModel()
model.qconfig = qconfig
model = torch.ao.quantization.prepare(model)
model = torch.ao.quantization.convert(model)
x_numpy = np.random.rand(1, 3, 6, 6).astype(np.float32)
x = torch.from_numpy(x_numpy).to(dtype=torch.float)
outputs = model(x)
input_names = ["x"]
onnx_model = self.export_to_onnx(model, x, input_names)
y = np.expand_dims(x_numpy, axis=0)
caffe_res = c2.run_model(onnx_model, dict(zip(input_names, y)))[0]
output_diff = np.absolute(np.squeeze(outputs.numpy()) - caffe_res)
max_diff = np.amax(output_diff)
# Permute pytorch output to NHWC
# This check had to be changed to account for changes in
# qnnpack's requant logic.
np.testing.assert_(
max_diff <= 1, "Maximum absolute difference must be less than 1"
)
def test_upsample(self):
class QUpsampleModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant1 = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
res = torch.nn.quantized.functional.interpolate(
self.quant1(x), size=[6, 8], mode="nearest"
)
return self.dequant(res)
x = np.random.rand(1, 2, 3, 4).astype("float32")
self.generic_test(QUpsampleModule(), (x,), input_names=["x"], decimal=5)
def test_avg_pool2d(self):
class QAvgPool2dModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant1 = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
res = torch.nn.functional.avg_pool2d(
self.quant1(x), kernel_size=2, stride=1, padding=0
)
return self.dequant(res)
x = np.random.rand(1, 2, 8, 8).astype("float32")
self.generic_test(
QAvgPool2dModule(), (x,), input_names=["x"], relaxed_check=True
)
def test_reshape(self):
class QReshapeModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant1 = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
res = self.quant1(x).reshape((1, 2, 1, 12))
return self.dequant(res)
x = np.random.rand(1, 2, 3, 4).astype("float32")
self.generic_test(QReshapeModule(), (x,), input_names=["x"], decimal=5)
def test_slice(self):
class QSliceModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant1 = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
qx = self.quant1(x)
res = qx[:, 1:2]
return self.dequant(res)
x = np.random.rand(1, 2, 3, 4).astype("float32")
self.generic_test(QSliceModule(), (x,), input_names=["x"], decimal=5)
def test_cat(self):
class QConcatModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant1 = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x, y):
res = torch.ops.quantized.cat(
[self.quant1(x), self.quant1(y)], dim=1, scale=1.0, zero_point=0
)
return self.dequant(res)
x = np.random.rand(1, 2, 3, 4).astype("float32")
y = np.random.rand(1, 4, 3, 4).astype("float32")
self.generic_test(
QConcatModule(),
(
x,
y,
),
input_names=["x", "y"],
)
def test_max_pool2d(self):
class QMaxPool2dModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant1 = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
res = torch.nn.functional.max_pool2d(
self.quant1(x), kernel_size=2, stride=1, padding=0
)
return self.dequant(res)
x = np.random.rand(1, 2, 8, 8).astype("float32")
self.generic_test(QMaxPool2dModule(), (x,), input_names=["x"], decimal=5)
def test_quantized_sigmoid(self):
self.generic_unary_test(torch.nn.Sigmoid())
def test_small_model(self):
class SimpleModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
self.func_add = nnq.FloatFunctional()
self.conv1 = nn.Conv2d(3, 2, 5, bias=None).to(dtype=torch.float)
self.act1 = nn.Sigmoid()
self.conv2 = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float)
self.fc = nn.Linear(72, 10).to(dtype=torch.float)
self.fc.qconfig = None
def forward(self, x):
x = self.quant(x)
x = self.func_add.add(x, x)
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.dequant(x)
x = x.reshape(-1, 72).contiguous()
x = self.fc(x)
return x
x = np.random.rand(2, 3, 10, 10).astype("float32")
self.generic_test(SimpleModel(), (x,), input_names=["x"], relaxed_check=True)
def test_sequential(self):
class ConvBNReLUModule(nn.Sequential):
def __init__(self):
super().__init__(
nn.Conv2d(3, 3, 1, 1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(inplace=False),
)
class ModelWithClassifierHead(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, 1)
self.relu1 = nn.ReLU(inplace=False)
layers = []
for i in range(3):
layers.append(ConvBNReLUModule())
self.features = nn.Sequential(*layers)
head = [nn.Linear(300, 10), nn.ReLU(inplace=False)]
self.classifier = nn.Sequential(*head)
self.seq = nn.Sequential()
self.quant = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.relu1(x)
x = self.features(x)
x = torch.reshape(x, (-1, 3 * 10 * 10))
x = self.classifier(x)
x = self.seq(x)
x = self.dequant(x)
return x
model = ModelWithClassifierHead().eval()
torch.ao.quantization.fuse_modules(
model,
[
["conv1", "relu1"],
["features.0.0", "features.0.1", "features.0.2"],
["features.1.0", "features.1.1", "features.1.2"],
["features.2.0", "features.2.1", "features.2.2"],
],
inplace=True,
)
x = np.random.rand(1, 3, 10, 10).astype("float32")
self.generic_test(model, (x,), input_names=["x"], relaxed_check=True)
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_pytorch_onnx_caffe2_quantized.py
|
import sys
import caffe2.python.onnx.backend as c2
import onnx
import pytorch_test_common
import torch
import torch.jit
from torch.autograd import Variable
torch.set_default_tensor_type("torch.FloatTensor")
try:
import torch
except ImportError:
print("Cannot import torch, hence caffe2-torch test will not run.")
sys.exit(0)
def run_embed_params(proto, model, input, state_dict=None, use_gpu=True):
"""
This is only a helper debug function so we can test embed_params=False
case as well on pytorch front
This should likely be removed from the release version of the code
"""
device = "CPU"
if use_gpu:
device = "CUDA"
model_def = onnx.ModelProto.FromString(proto)
onnx.checker.check_model(model_def)
prepared = c2.prepare(model_def, device=device)
if state_dict:
parameters = []
# Passed in state_dict may have a different order. Make
# sure our order is consistent with the model's order.
# TODO: Even better: keyword arguments!
for k in model.state_dict():
if k in state_dict:
parameters.append(state_dict[k])
else:
parameters = list(model.state_dict().values())
W = {}
for k, v in zip(
model_def.graph.input, pytorch_test_common.flatten((input, parameters))
):
if isinstance(v, Variable):
W[k.name] = v.data.cpu().numpy()
else:
W[k.name] = v.cpu().numpy()
caffe2_out = prepared.run(inputs=W)
return caffe2_out
|
pytorch-master
|
test/onnx/debug_embed_params.py
|
# Owner(s): ["module: onnx"]
from __future__ import annotations
import os
import random
from typing import Any, Mapping, Type
import numpy as np
import onnxruntime
import torch
from torch.onnx import _constants, verification
from torch.testing._internal import common_utils
onnx_model_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
"repos",
"onnx",
"onnx",
"backend",
"test",
"data",
)
pytorch_converted_dir = os.path.join(onnx_model_dir, "pytorch-converted")
pytorch_operator_dir = os.path.join(onnx_model_dir, "pytorch-operator")
_ORT_PROVIDERS = ("CPUExecutionProvider",)
def run_model_test(test_suite: _TestONNXRuntime, *args, **kwargs):
kwargs["ort_providers"] = _ORT_PROVIDERS
kwargs["opset_version"] = test_suite.opset_version
kwargs["keep_initializers_as_inputs"] = test_suite.keep_initializers_as_inputs
if hasattr(test_suite, "check_shape"):
kwargs["check_shape"] = test_suite.check_shape
if hasattr(test_suite, "check_dtype"):
kwargs["check_dtype"] = test_suite.check_dtype
return verification.verify(*args, **kwargs)
def parameterize_class_name(cls: Type, idx: int, input_dicts: Mapping[Any, Any]):
"""Combine class name with the parameterized arguments.
This function is passed to `parameterized.parameterized_class` as the
`class_name_func` argument.
"""
suffix = "_".join(f"{k}_{v}" for k, v in input_dicts.items())
return f"{cls.__name__}_{suffix}"
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
class _TestONNXRuntime(common_utils.TestCase):
opset_version = _constants.onnx_default_opset
keep_initializers_as_inputs = True # For IR version 3 type export.
is_script = False
check_shape = True
check_dtype = True
def setUp(self):
set_rng_seed(0)
onnxruntime.set_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
os.environ["ALLOW_RELEASED_ONNX_OPSET_ONLY"] = "0"
self.is_script_test_enabled = True
# The exported ONNX model may have less inputs than the pytorch model because of const folding.
# This mostly happens in unit test, where we widely use torch.size or torch.shape.
# So the output is only dependent on the input shape, not value.
# remained_onnx_input_idx is used to indicate which pytorch model input idx is remained in ONNX model.
def run_test(
self,
model,
input_args,
input_kwargs=None,
rtol=1e-3,
atol=1e-7,
do_constant_folding=True,
dynamic_axes=None,
additional_test_inputs=None,
input_names=None,
output_names=None,
fixed_batch_size=False,
training=torch.onnx.TrainingMode.EVAL,
remained_onnx_input_idx=None,
verbose=False,
):
def _run_test(m, remained_onnx_input_idx, flatten=True):
return run_model_test(
self,
m,
input_args=input_args,
input_kwargs=input_kwargs,
rtol=rtol,
atol=atol,
do_constant_folding=do_constant_folding,
dynamic_axes=dynamic_axes,
additional_test_inputs=additional_test_inputs,
input_names=input_names,
output_names=output_names,
fixed_batch_size=fixed_batch_size,
training=training,
remained_onnx_input_idx=remained_onnx_input_idx,
flatten=flatten,
verbose=verbose,
)
if isinstance(remained_onnx_input_idx, dict):
scripting_remained_onnx_input_idx = remained_onnx_input_idx["scripting"]
tracing_remained_onnx_input_idx = remained_onnx_input_idx["tracing"]
else:
scripting_remained_onnx_input_idx = remained_onnx_input_idx
tracing_remained_onnx_input_idx = remained_onnx_input_idx
is_model_script = isinstance(
model, (torch.jit.ScriptModule, torch.jit.ScriptFunction)
)
if self.is_script_test_enabled and self.is_script:
script_model = model if is_model_script else torch.jit.script(model)
_run_test(script_model, scripting_remained_onnx_input_idx, flatten=False)
if not is_model_script and not self.is_script:
_run_test(model, tracing_remained_onnx_input_idx)
|
pytorch-master
|
test/onnx/onnx_test_common.py
|
import io
import onnx
import torch.onnx
from caffe2.python.core import BlobReference, Net
from caffe2.python.onnx.backend import Caffe2Backend
_next_idx = 0
# Clone net takes a dict instead of a lambda
# It should probably take a lambda, it is more flexible
# We fake dict here
class _FakeDict:
def __init__(self, fn):
self.fn = fn
def get(self, name, _):
return self.fn(name)
def PyTorchModule(helper, model, sample_arguments, caffe2_inputs, prefix_name=None):
"""
Embed an ONNX-exportable PyTorch Model into a Caffe2 model being built.
Args:
helper (caffe2.python.core.ModelHelder): the model helper where
this imported network should be inserted
model (torch.nn.Module): the model to be exported
sample_arguments (tuple of arguments): the inputs to
the model, e.g., such that ``model(*args)`` is a valid
invocation of the model. Any non-Variable arguments will
be hard-coded into the exported model; any Variable arguments
will become inputs of the exported model, in the order they
occur in args. If args is a Variable, this is equivalent
to having called it with a 1-ary tuple of that Variable.
(Note: passing keyword arguments to the model is not currently
supported. Give us a shout if you need it.)
caffe2_inputs (list of str or caffe2.python.core.BlobReference): the
caffe2 Blobs that should be inputs to this network. Must be
the same length as sample_arguments
prefix_name: prefix name to add to each member of the blob, if None then
a fresh prefix pytorch_input_N/ is used
Returns:
A tuple of caffe2.python.core.BlobReference objects referring to the
models outputs, or a single BlobReference when the model returns a single
value.
"""
if prefix_name is None:
global _next_idx
prefix_name = "pytorch_import_" + str(_next_idx) + "/"
_next_idx += 1
# TODO: handle the case where model cannot be exported
# and embed as a Python op in Caffe2
f = io.BytesIO()
torch.onnx.export(model, sample_arguments, f, export_params=True)
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
initialized = {x.name for x in onnx_model.graph.initializer}
uninitialized_inputs = {
x.name: i
for i, x in enumerate(onnx_model.graph.input)
if x.name not in initialized
}
if len(uninitialized_inputs) != len(caffe2_inputs):
raise ValueError(
"Expected {} inputs but found {}".format(
len(uninitialized_inputs), len(caffe2_inputs)
)
)
def remap_blob_name(name):
if name in uninitialized_inputs:
idx = uninitialized_inputs[name]
return str(caffe2_inputs[idx])
return prefix_name + name
predict_net = Net(predict_net).Clone("anon", _FakeDict(remap_blob_name))
helper.net.AppendNet(predict_net)
init_net = Net(init_net).Clone("anon", _FakeDict(remap_blob_name))
helper.param_init_net.AppendNet(init_net)
results = tuple(
BlobReference(remap_blob_name(x.name), helper.net)
for x in onnx_model.graph.output
)
return results
|
pytorch-master
|
test/onnx/pytorch_helper.py
|
# Owner(s): ["module: onnx"]
import caffe2.python.onnx.backend as backend
import torch
from torch.autograd import Function
from torch.nn import Module, Parameter
from torch.testing._internal import common_utils
from verify import verify
class TestVerify(common_utils.TestCase):
maxDiff = None
def assertVerifyExpectFail(self, *args, **kwargs):
try:
verify(*args, **kwargs)
except AssertionError as e:
if str(e):
# substring a small piece of string because the exact message
# depends on system's formatting settings
# self.assertExpected(str(e)[:60])
# NB: why we comment out the above check? because numpy keeps
# changing the error format, and we have to keep updating the
# expect files let's relax this constraint
return
else:
raise
# Don't put this in the try block; the AssertionError will catch it
self.assertTrue(False, msg="verify() did not fail when expected to")
def test_result_different(self):
class BrokenAdd(Function):
@staticmethod
def symbolic(g, a, b):
return g.op("Add", a, b)
@staticmethod
def forward(ctx, a, b):
return a.sub(b) # yahaha! you found me!
class MyModel(Module):
def forward(self, x, y):
return BrokenAdd().apply(x, y)
x = torch.tensor([1, 2])
y = torch.tensor([3, 4])
self.assertVerifyExpectFail(MyModel(), (x, y), backend)
def test_jumbled_params(self):
class MyModel(Module):
def __init__(self):
super().__init__()
def forward(self, x):
y = x * x
self.param = Parameter(torch.tensor([2.0]))
return y
x = torch.tensor([1, 2])
with self.assertRaisesRegex(RuntimeError, "state_dict changed"):
verify(MyModel(), x, backend)
def test_dynamic_model_structure(self):
class MyModel(Module):
def __init__(self):
super().__init__()
self.iters = 0
def forward(self, x):
if self.iters % 2 == 0:
r = x * x
else:
r = x + x
self.iters += 1
return r
x = torch.tensor([1, 2])
self.assertVerifyExpectFail(MyModel(), x, backend)
def test_embedded_constant_difference(self):
class MyModel(Module):
def __init__(self):
super().__init__()
self.iters = 0
def forward(self, x):
r = x[self.iters % 2]
self.iters += 1
return r
x = torch.tensor([[1, 2], [3, 4]])
self.assertVerifyExpectFail(MyModel(), x, backend)
def test_explicit_test_args(self):
class MyModel(Module):
def forward(self, x):
if x.data.sum() == 1.0:
return x + x
else:
return x * x
x = torch.tensor([[6, 2]])
y = torch.tensor([[2, -1]])
self.assertVerifyExpectFail(MyModel(), x, backend, test_args=[(y,)])
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_verify.py
|
# Owner(s): ["module: onnx"]
import copy
import io
import onnx
import torch
import torch.onnx
import torch.utils.cpp_extension
import torchvision
from autograd_helper import CustomFunction as CustomFunction2
from pytorch_test_common import (
skipIfNoCuda,
skipIfUnsupportedMaxOpsetVersion,
skipIfUnsupportedMinOpsetVersion,
)
from torch.onnx import (
OperatorExportTypes,
register_custom_op_symbolic,
TrainingMode,
unregister_custom_op_symbolic,
utils,
)
from torch.onnx.symbolic_helper import (
_set_operator_export_type,
_set_opset_version,
_unpack_list,
parse_args,
)
from torch.testing._internal import common_utils
from torch.testing._internal.common_utils import skipIfNoCaffe2, skipIfNoLapack
from verify import verify
class _BaseTestCase(common_utils.TestCase):
def setUp(self):
super().setUp()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
def _model_to_graph(
self,
model,
input,
do_constant_folding=True,
training=TrainingMode.EVAL,
operator_export_type=OperatorExportTypes.ONNX,
input_names=None,
dynamic_axes=None,
):
if training == torch.onnx.TrainingMode.TRAINING:
model.train()
elif training == torch.onnx.TrainingMode.EVAL:
model.eval()
utils._validate_dynamic_axes(dynamic_axes, model, None, None)
graph, params_dict, torch_out = utils._model_to_graph(
model,
input,
do_constant_folding=do_constant_folding,
_disable_torch_constant_prop=True,
operator_export_type=operator_export_type,
training=training,
input_names=input_names,
dynamic_axes=dynamic_axes,
)
return graph, params_dict, torch_out
class TestUtilityFuns_opset_independent(_BaseTestCase):
def test_unconvertible_ops(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.cumsum(x, dim=0)
model = MyModule()
x = torch.randn(2, 3, 4)
graph, unconvertible_ops = utils.unconvertible_ops(model, (x,), opset_version=9)
iter = graph.nodes()
self.assertEqual(next(iter).kind(), "onnx::Constant")
self.assertEqual(next(iter).kind(), "prim::Constant")
self.assertEqual(next(iter).kind(), "aten::cumsum")
self.assertEqual(len(unconvertible_ops), 1)
self.assertEqual(unconvertible_ops, ["aten::cumsum"])
class TestUtilityFuns_opset9(_BaseTestCase):
opset_version = 9
def test_is_in_onnx_export(self):
test_self = self
class MyModule(torch.nn.Module):
def forward(self, x):
test_self.assertTrue(torch.onnx.is_in_onnx_export())
raise ValueError
return x + 1
x = torch.randn(3, 4)
f = io.BytesIO()
try:
torch.onnx.export(MyModule(), x, f, opset_version=self.opset_version)
except ValueError:
self.assertFalse(torch.onnx.is_in_onnx_export())
def test_validate_dynamic_axes_invalid_input_output_name(self):
import warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
utils._validate_dynamic_axes(
{"input1": {}, "output": {}, "invalid_name1": {}, "invalid_name2": {}},
None,
["input1", "input2"],
["output"],
)
messages = [str(warning.message) for warning in w]
self.assertIn(
"Provided key invalid_name1 for dynamic axes is not a valid input/output name",
messages,
)
self.assertIn(
"Provided key invalid_name2 for dynamic axes is not a valid input/output name",
messages,
)
self.assertEqual(len(messages), 2)
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_to_slice(self):
class SplitModule(torch.nn.Module):
def forward(self, x, y, t):
splits = (x.size(1), y.size(1))
out, out2 = torch.split(t, splits, dim=1)
return out, out2
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.randn(2, 3)
y = torch.randn(2, 4)
t = torch.randn(2, 7)
graph, _, _ = self._model_to_graph(
SplitModule(),
(x, y, t),
input_names=["x", "y", "t"],
dynamic_axes={"x": [0, 1], "y": [0, 1], "t": [0, 1]},
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::SplitToSequence")
def test_constant_fold_transpose(self):
class TransposeModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = torch.transpose(a, 1, 0)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(3, 2)
graph, _, __ = self._model_to_graph(
TransposeModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Transpose")
self.assertNotEqual(node.kind(), "onnx::Cast")
self.assertEqual(len(list(graph.nodes())), 2)
def test_constant_fold_reduceL2(self):
class ReduceModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = torch.norm(a, p=2, dim=-2, keepdim=False)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(2, 3)
graph, _, __ = self._model_to_graph(
ReduceModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::ReduceL2")
self.assertEqual(len(list(graph.nodes())), 2)
def test_constant_fold_reduceL1(self):
class NormModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = torch.norm(a, p=1, dim=-2)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(2, 3)
graph, _, __ = self._model_to_graph(
NormModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::ReduceL1")
self.assertEqual(len(list(graph.nodes())), 2)
def test_constant_fold_slice(self):
class NarrowModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = torch.narrow(a, 0, 0, 1)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 3)
graph, _, __ = self._model_to_graph(
NarrowModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Slice")
self.assertNotEqual(node.kind(), "onnx::Cast")
self.assertEqual(len(list(graph.nodes())), 2)
def test_constant_fold_slice_index_exceeds_dim(self):
class SliceIndexExceedsDimModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = a[1:10] # index exceeds dimension
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 3)
graph, _, __ = self._model_to_graph(
SliceIndexExceedsDimModule(),
(x,),
input_names=["x"],
dynamic_axes={"x": [0, 1]},
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Slice")
self.assertNotEqual(node.kind(), "onnx::Cast")
self.assertEqual(len(list(graph.nodes())), 2)
def test_constant_fold_slice_negative_index(self):
class SliceNegativeIndexModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = a[0:-1] # index relative to the end
c = torch.select(a, dim=-1, index=-2)
d = torch.select(a, dim=1, index=0)
return b + x, c + d
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 3)
graph, _, __ = self._model_to_graph(
SliceNegativeIndexModule(),
(x,),
input_names=["x"],
dynamic_axes={"x": [0, 1]},
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Slice")
self.assertNotEqual(node.kind(), "onnx::Cast")
def test_constant_fold_gather(self):
class GatherModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = torch.select(a, dim=1, index=-2)
c = torch.index_select(a, dim=-2, index=torch.tensor([0, 1]))
return b + 1, c + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 3)
model = GatherModule()
model(x)
graph, _, __ = self._model_to_graph(
GatherModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Gather")
def test_constant_fold_unsqueeze(self):
class UnsqueezeModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = torch.unsqueeze(a, -2)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 2, 3)
graph, _, __ = self._model_to_graph(
UnsqueezeModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Unsqueeze")
self.assertNotEqual(node.kind(), "onnx::Cast")
self.assertEqual(len(list(graph.nodes())), 2)
def test_constant_fold_unsqueeze_multi_axies(self):
class PReluModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.prelu = torch.nn.PReLU()
def forward(self, x):
a = torch.randn(2, 3, 4, 5, 8, 7)
return self.prelu(x) + a
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.randn(2, 3, 4, 5, 8, 7)
graph, _, __ = self._model_to_graph(
PReluModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2, 3, 4, 5]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Unsqueeze")
self.assertNotEqual(node.kind(), "onnx::Cast")
self.assertEqual(len(list(graph.nodes())), 5)
def test_constant_fold_squeeze_without_axes(self):
class SqueezeModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]])
return torch.squeeze(a) + x + torch.squeeze(a)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(2, 3)
graph, _, __ = self._model_to_graph(
SqueezeModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Squeeze")
self.assertNotEqual(node.kind(), "onnx::Cast")
self.assertEqual(len(list(graph.nodes())), 4)
def test_constant_fold_squeeze_with_axes(self):
class SqueezeAxesModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]])
return torch.squeeze(a, dim=-3) + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(2, 3)
graph, _, __ = self._model_to_graph(
SqueezeAxesModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Squeeze")
self.assertNotEqual(node.kind(), "onnx::Cast")
self.assertEqual(len(list(graph.nodes())), 2)
def test_constant_fold_concat(self):
class ConcatModule(torch.nn.Module):
def forward(self, x):
# Why did I insert a Cast here? There appears to be intentional
# behavior in ONNX constant folding where constant tensors which
# are not attached to any known to be foldable onnx
# operations don't get extracted into the initializer graph. So
# without these casts, we will actually fail to pull out one of
# the constants, thus failing constant folding. I think the
# test is wrong but I don't have time to write a more correct
# test (I think the right way to go about the test is to setup
# a predicate for what invariant graphs should hold after
# constant folding, and then verify this predicate holds.
# I think the asserts below are an attempt at this predicate,
# but it is not right!)
#
# More commentary at
# https://github.com/pytorch/pytorch/pull/18698/files#r340107552
a = torch.tensor([[1.0, 2.0, 3.0]]).to(torch.float)
b = torch.tensor([[4.0, 5.0, 6.0]]).to(torch.float)
c = torch.cat((a, b), 0)
d = b + c
return x + d
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(2, 3)
graph, _, __ = self._model_to_graph(
ConcatModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Concat")
self.assertNotEqual(node.kind(), "onnx::Cast")
self.assertEqual(len(list(graph.nodes())), 2)
def test_constant_fold_lstm(self):
class GruNet(torch.nn.Module):
def __init__(self):
super().__init__()
self.mygru = torch.nn.GRU(7, 3, 1, bidirectional=False)
def forward(self, input, initial_state):
return self.mygru(input, initial_state)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
input = torch.randn(5, 3, 7)
h0 = torch.randn(1, 3, 3)
graph, _, __ = self._model_to_graph(
GruNet(),
(input, h0),
input_names=["input", "h0"],
dynamic_axes={"input": [0, 1, 2], "h0": [0, 1, 2]},
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Slice")
self.assertNotEqual(node.kind(), "onnx::Concat")
self.assertNotEqual(node.kind(), "onnx::Unsqueeze")
if self.opset_version <= 12:
self.assertEqual(len(list(graph.nodes())), 3)
else:
# Unsqueeze op parameter "axes" as an input instead of as an attribute when opset version >= 13
self.assertEqual(len(list(graph.nodes())), 4)
def test_constant_fold_transpose_matmul(self):
class MatMulNet(torch.nn.Module):
def __init__(self):
super().__init__()
self.B = torch.nn.Parameter(torch.ones(5, 3))
def forward(self, A):
return torch.matmul(A, torch.transpose(self.B, -1, -2))
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
A = torch.randn(2, 3)
graph, _, __ = self._model_to_graph(
MatMulNet(), (A,), input_names=["A"], dynamic_axes={"A": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Transpose")
self.assertEqual(len(list(graph.nodes())), 1)
def test_constant_fold_reshape(self):
class ReshapeModule(torch.nn.Module):
def __init__(
self,
):
super().__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
b = self.weight.reshape(1, -1, 1, 1)
return x * b
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.randn(4, 5)
graph, _, __ = self._model_to_graph(
ReshapeModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Reshape")
self.assertEqual(len(list(graph.nodes())), 1)
def test_constant_fold_div(self):
class Module(torch.nn.Module):
def __init__(
self,
):
super().__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
div = self.weight.div(torch.tensor([1, 2, 3, 4, 5]))
return div * x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = self._model_to_graph(
Module(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Div")
self.assertEqual(len(list(graph.nodes())), 1)
def test_constant_fold_mul(self):
class Module(torch.nn.Module):
def __init__(
self,
):
super().__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
mul = self.weight.mul(torch.tensor([1, 2, 3, 4, 5]))
return mul / x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = self._model_to_graph(
Module(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Mul")
self.assertEqual(len(list(graph.nodes())), 1)
def test_constant_fold_add(self):
class Module(torch.nn.Module):
def __init__(
self,
):
super().__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
add = self.weight + torch.tensor([1, 2, 3, 4, 5])
return add - x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, params_dict, __ = self._model_to_graph(
Module(),
(x,),
do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX,
input_names=["x"],
dynamic_axes={"x": [0, 1]},
)
for node in graph.nodes():
self.assertTrue(node.kind() != "onnx::Add")
self.assertEqual(len(list(graph.nodes())), 1)
params = list(params_dict.values())
self.assertEqual(len(params), 1)
weight = params[0]
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(weight, torch.tensor([2, 3, 4, 5, 6]))
def test_constant_fold_sub(self):
class Module(torch.nn.Module):
def __init__(
self,
):
super().__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
sub = self.weight - torch.tensor([1, 2, 3, 4, 5])
return sub + x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, params_dict, __ = self._model_to_graph(
Module(),
(x,),
do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX,
input_names=["x"],
dynamic_axes={"x": [0, 1]},
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Sub")
self.assertEqual(len(list(graph.nodes())), 1)
params = list(params_dict.values())
self.assertEqual(len(params), 1)
weight = params[0]
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(weight, torch.tensor([0, -1, -2, -3, -4]))
def test_constant_fold_sqrt(self):
class Module(torch.nn.Module):
def __init__(
self,
):
super().__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
sqrt = torch.sqrt(self.weight)
return sqrt / x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = self._model_to_graph(
Module(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Sqrt")
self.assertEqual(len(list(graph.nodes())), 1)
def test_constant_fold_shape(self):
class ShapeModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
shape = self.weight.shape[0]
return x + shape
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = self._model_to_graph(
ShapeModule(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]}
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::Shape")
self.assertEqual(len(list(graph.nodes())), 2)
def test_constant_fold_upsample_scale_fold_as_constant(self):
# upsample scale is a constant, not a model parameter,
# therefore should not be added as initializer after constant folding.
model = torch.nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)
x = torch.randn(1, 32, 224, 224)
f = io.BytesIO()
torch.onnx.export(model, x, f)
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
self.assertEqual(len(onnx_model.graph.initializer), 0)
def test_verbose(self):
class MyModule(torch.nn.Module):
def forward(self, input):
return torch.exp(input)
x = torch.randn(3, 4)
def is_model_stripped(f, verbose=None):
if verbose is None:
torch.onnx.export(MyModule(), x, f, opset_version=self.opset_version)
else:
torch.onnx.export(
MyModule(), x, f, verbose=verbose, opset_version=self.opset_version
)
model = onnx.load(io.BytesIO(f.getvalue()))
model_strip = copy.copy(model)
onnx.helper.strip_doc_string(model_strip)
return model == model_strip
# test verbose=False (default)
self.assertTrue(is_model_stripped(io.BytesIO()))
# test verbose=True
self.assertFalse(is_model_stripped(io.BytesIO(), True))
# NB: remove this test once DataParallel can be correctly handled
def test_error_on_data_parallel(self):
model = torch.nn.DataParallel(torch.nn.ReflectionPad2d((1, 2, 3, 4)))
x = torch.randn(1, 2, 3, 4)
f = io.BytesIO()
with self.assertRaisesRegex(
ValueError,
"torch.nn.DataParallel is not supported by ONNX "
"exporter, please use 'attribute' module to "
"unwrap model from torch.nn.DataParallel. Try ",
):
torch.onnx.export(model, x, f, opset_version=self.opset_version)
@skipIfUnsupportedMinOpsetVersion(11)
def test_sequence_dim(self):
class Module(torch.nn.Module):
def forward(self, x, y):
return [x, y]
model = Module()
# Export with scripting to keep output as Sequence type.
# Tracing unpacks the list.
script_model = torch.jit.script(model)
x = torch.randn(2, 3)
# Case 1: dynamic axis
f = io.BytesIO()
y = torch.randn(2, 3)
torch.onnx.export(
script_model,
(x, y),
f,
opset_version=self.opset_version,
input_names=["x", "y"],
dynamic_axes={"y": [1]},
)
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
loop_output_value_info_proto = onnx_model.graph.output[0]
ref_value_info_proto = onnx.helper.make_tensor_sequence_value_info(
loop_output_value_info_proto.name, 1, [2, None]
)
self.assertEqual(loop_output_value_info_proto, ref_value_info_proto)
# Case 2: no dynamic axes.
f = io.BytesIO()
y = torch.randn(2, 3)
torch.onnx.export(script_model, (x, y), f, opset_version=self.opset_version)
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
loop_output_value_info_proto = onnx_model.graph.output[0]
ref_value_info_proto = onnx.helper.make_tensor_sequence_value_info(
loop_output_value_info_proto.name, 1, [2, 3]
)
self.assertEqual(loop_output_value_info_proto, ref_value_info_proto)
def test_export_mode(self):
class MyModule(torch.nn.Module):
def forward(self, x):
y = x + 1
return y
model = MyModule()
x = torch.randn(10, 3, 128, 128)
f = io.BytesIO()
# set mode to in inference mode and export in training mode
model.eval()
old_state = model.training
torch.onnx.export(
model,
(x,),
f,
opset_version=self.opset_version,
training=torch.onnx.TrainingMode.TRAINING,
)
# verify that the model state is preserved
self.assertEqual(model.training, old_state)
# set mode to training mode and export in inference mode
model.train()
old_state = model.training
torch.onnx.export(
model,
(x,),
f,
opset_version=self.opset_version,
training=torch.onnx.TrainingMode.EVAL,
)
# verify that the model state is preserved
self.assertEqual(model.training, old_state)
@skipIfUnsupportedMinOpsetVersion(15)
def test_local_function(self):
class N(torch.nn.Module):
def __init__(self, prob):
super().__init__()
self.dropout = torch.nn.Dropout(prob)
def forward(self, x):
return self.dropout(x)
class M(torch.nn.Module):
def __init__(self, num_layers):
super().__init__()
self.num_layers = num_layers
self.lns = torch.nn.ModuleList(
[torch.nn.LayerNorm(3, eps=i) for i in range(num_layers)]
)
self.celu1 = torch.nn.CELU(1.0)
self.celu2 = torch.nn.CELU(2.0)
self.dropout = N(0.5)
def forward(self, x, y, z):
res1 = self.celu1(x)
res2 = self.celu2(y)
for ln in self.lns:
z = ln(z)
return res1 + res2, self.dropout(z)
x = torch.randn(2, 3)
y = torch.randn(2, 3)
z = torch.randn(2, 3)
# Export specified modules. Test against specifying modules that won't
# exist in the exported model.
# Model export in inference mode will remove dropout node,
# thus the dropout module no longer exist in graph.
f = io.BytesIO()
torch.onnx.export(
M(3),
(x, y, z),
f,
opset_version=self.opset_version,
export_modules_as_functions={
torch.nn.CELU,
torch.nn.Dropout,
torch.nn.LayerNorm,
},
)
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
# Check function definition
funcs = onnx_model.functions
celu_funcs = [f for f in funcs if f.name == "CELU"]
self.assertEqual(len(celu_funcs), 1)
self.assertEqual(celu_funcs[0].domain, "torch.nn.modules.activation")
self.assertEqual(len(celu_funcs[0].attribute), 3)
ln_funcs = [f for f in funcs if f.name == "LayerNorm"]
self.assertEqual(len(ln_funcs), 1)
self.assertEqual(ln_funcs[0].domain, "torch.nn.modules.normalization")
self.assertEqual(len(ln_funcs[0].attribute), 3)
# Check local function nodes
nodes = onnx_model.graph.node
celu_ns = [n for n in nodes if n.op_type == "CELU"]
ln_ns = [n for n in nodes if n.op_type == "LayerNorm"]
self.assertEqual(len(celu_ns), 2)
self.assertEqual(celu_ns[0].domain, "torch.nn.modules.activation")
self.assertEqual(len(celu_ns[0].attribute), 3)
self.assertEqual(len(ln_ns), 3)
self.assertEqual(ln_ns[0].domain, "torch.nn.modules.normalization")
self.assertEqual(len(ln_ns[0].attribute), 3)
# Export specified modules.
f = io.BytesIO()
torch.onnx.export(
M(3),
(x, y, z),
f,
opset_version=self.opset_version,
export_modules_as_functions={torch.nn.CELU},
)
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
funcs = onnx_model.functions
self.assertEqual(len(funcs), 1)
self.assertEqual(funcs[0].name, "CELU")
# Export with empty specified modules. Normal export.
f = io.BytesIO()
torch.onnx.export(
M(3),
(x, y, z),
f,
opset_version=self.opset_version,
export_modules_as_functions=set(),
)
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
funcs = onnx_model.functions
self.assertEqual(len(funcs), 0)
# Export all modules. Should contain {M, CELU, LayerNorm}.
f = io.BytesIO()
torch.onnx.export(
M(3),
(x, y, z),
f,
opset_version=self.opset_version,
export_modules_as_functions=True,
)
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
funcs = onnx_model.functions
self.assertEqual(len(funcs), 3)
@skipIfUnsupportedMinOpsetVersion(15)
def test_local_function_overloads(self):
class NWithOverloads(torch.nn.Module):
def forward(self, x, y=None, z=None):
if y is None:
return x + 1
elif z is None:
return x + y
else:
return x + y, x + z
class M(torch.nn.Module):
def __init__(self, num_layers):
super().__init__()
self.n = NWithOverloads()
def forward(self, x, y, z):
return self.n(x), self.n(x, y), self.n(x, y, z)
x = torch.randn(2, 3)
y = torch.randn(2, 3)
z = torch.randn(2, 3)
f = io.BytesIO()
torch.onnx.export(
M(3),
(x, y, z),
f,
opset_version=self.opset_version,
export_modules_as_functions={NWithOverloads},
)
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
funcs = onnx_model.functions
self.assertEqual(len(funcs), 3)
func_names = [f.name for f in funcs]
self.assertIn("NWithOverloads", func_names)
self.assertIn("NWithOverloads.1", func_names)
self.assertIn("NWithOverloads.2", func_names)
@skipIfUnsupportedMinOpsetVersion(15)
def test_local_function_infer_scopes(self):
class M(torch.nn.Module):
def forward(self, x):
# Concatenation of scalars inserts unscoped tensors in IR graph.
new_tensor_shape = x.size()[:-1] + (1, 1, -1)
tensor = x.view(*new_tensor_shape)
return tensor
x = torch.randn(4, 5)
f = io.BytesIO()
torch.onnx.export(
M(),
(x,),
f,
export_modules_as_functions=True,
opset_version=self.opset_version,
do_constant_folding=False,
)
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
funcs = onnx_model.functions
self.assertIn("M", [f.name for f in funcs])
@skipIfUnsupportedMinOpsetVersion(15)
def test_local_function_predefined_attributes(self):
class M(torch.nn.Module):
num_layers: int
def __init__(self, num_layers):
super().__init__()
self.num_layers = num_layers
self.lns = torch.nn.ModuleList(
[torch.nn.LayerNorm(3, eps=1e-4) for _ in range(num_layers)]
)
def forward(self, x):
for ln in self.lns:
x = ln(x)
return x
x = torch.randn(2, 3)
f = io.BytesIO()
model = M(3)
torch.onnx.export(
model,
(x,),
f,
export_modules_as_functions=True,
opset_version=self.opset_version,
)
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
funcs = onnx_model.functions
m_funcs = [fn for fn in funcs if fn.name == "M"]
self.assertEqual(m_funcs[0].attribute, ["num_layers"])
ln_funcs = [fn for fn in funcs if fn.name == "LayerNorm"]
self.assertEqual(ln_funcs[0].attribute, ["eps", "elementwise_affine"])
from onnx import helper
m_node = [n for n in onnx_model.graph.node if n.op_type == "M"]
self.assertEqual(
m_node[0].attribute[0],
helper.make_attribute("num_layers", model.num_layers),
)
ln_nodes = [n for n in m_funcs[0].node if n.op_type == "LayerNorm"]
expected_ln_attrs = [
helper.make_attribute(
"elementwise_affine", model.lns[0].elementwise_affine
),
helper.make_attribute("eps", model.lns[0].eps),
]
for ln_node in ln_nodes:
self.assertIn(ln_node.attribute[0], expected_ln_attrs)
self.assertIn(ln_node.attribute[1], expected_ln_attrs)
def test_aten_fallthrough(self):
# Test aten export of op with no symbolic
class Module(torch.nn.Module):
def forward(self, x):
return torch.erfc(x)
x = torch.randn(2, 3, 4)
_set_opset_version(self.opset_version)
graph, _, __ = self._model_to_graph(
Module(),
(x,),
operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2]},
)
iter = graph.nodes()
self.assertEqual(next(iter).kind(), "aten::erfc")
def test_custom_op_fallthrough(self):
# Test custom op
op_source = """
#include <torch/script.h>
torch::Tensor custom_add(torch::Tensor self, torch::Tensor other) {
return self + other;
}
static auto registry =
torch::RegisterOperators("custom_namespace::custom_op", &custom_add);
"""
torch.utils.cpp_extension.load_inline(
name="custom_add",
cpp_sources=op_source,
is_python_module=False,
verbose=True,
)
class FooModel(torch.nn.Module):
def forward(self, input, other):
# Calling custom op
return torch.ops.custom_namespace.custom_op(input, other)
x = torch.randn(2, 3, 4, requires_grad=False)
y = torch.randn(2, 3, 4, requires_grad=False)
model = FooModel()
graph, _, __ = self._model_to_graph(
model,
(x, y),
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH,
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2], "y": [0, 1, 2]},
)
iter = graph.nodes()
self.assertEqual(next(iter).kind(), "custom_namespace::custom_op")
def test_custom_opsets_gelu(self):
self.addCleanup(unregister_custom_op_symbolic, "::gelu", 1)
def gelu(g, self, approximate):
return g.op("com.microsoft::Gelu", self).setType(self.type())
register_custom_op_symbolic("::gelu", gelu, 1)
model = torch.nn.GELU(approximate="none")
x = torch.randn(3, 3)
f = io.BytesIO()
torch.onnx.export(
model,
(x,),
f,
opset_version=self.opset_version,
custom_opsets={"com.microsoft": 1},
)
graph = onnx.load(io.BytesIO(f.getvalue()))
self.assertEqual(graph.graph.node[0].op_type, "Gelu")
self.assertEqual(graph.opset_import[0].version, self.opset_version)
self.assertEqual(graph.opset_import[1].domain, "com.microsoft")
self.assertEqual(graph.opset_import[1].version, 1)
def test_register_aten_custom_op_symbolic(self):
self.addCleanup(unregister_custom_op_symbolic, "aten::gelu", 1)
def gelu(g, self, approximate):
return g.op("com.microsoft::Gelu", self).setType(self.type())
register_custom_op_symbolic("aten::gelu", gelu, 1)
model = torch.nn.GELU(approximate="none")
x = torch.randn(3, 3)
f = io.BytesIO()
torch.onnx.export(model, (x,), f, opset_version=self.opset_version)
graph = onnx.load(io.BytesIO(f.getvalue()))
self.assertEqual(graph.graph.node[0].op_type, "Gelu")
self.assertEqual(graph.opset_import[1].domain, "com.microsoft")
@skipIfNoLapack
def test_custom_opsets_inverse(self):
class CustomInverse(torch.nn.Module):
def forward(self, x):
return torch.inverse(x) + x
def inverse(g, self):
return g.op("com.microsoft::Inverse", self).setType(self.type())
register_custom_op_symbolic("::inverse", inverse, 1)
model = CustomInverse()
x = torch.randn(2, 3, 3)
f = io.BytesIO()
torch.onnx.export(
model,
(x,),
f,
opset_version=self.opset_version,
custom_opsets={"com.microsoft": 1},
)
graph = onnx.load(io.BytesIO(f.getvalue()))
self.assertEqual(graph.graph.node[0].op_type, "Inverse")
self.assertEqual(graph.opset_import[0].version, self.opset_version)
self.assertEqual(graph.opset_import[1].domain, "com.microsoft")
self.assertEqual(graph.opset_import[1].version, 1)
def test_onnx_fallthrough(self):
# Test aten export of op with symbolic for aten
class Module(torch.nn.Module):
def forward(self, x):
return torch.digamma(x)
x = torch.randn(100, 128)
graph, _, __ = self._model_to_graph(
Module(),
(x,),
operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH,
input_names=["x"],
dynamic_axes={"x": [0, 1]},
)
iter = graph.nodes()
self.assertEqual(next(iter).kind(), "aten::digamma")
# prim::ListConstruct is exported as onnx::SequenceConstruct for opset >= 11
@skipIfUnsupportedMaxOpsetVersion(10)
def test_prim_fallthrough(self):
# Test prim op
class PrimModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
if isinstance(x, list):
y = x
else:
y = [x]
return y
x = torch.tensor([2])
model = PrimModule()
model.eval()
graph, _, __ = self._model_to_graph(
model,
(x,),
operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH,
input_names=["x"],
dynamic_axes={"x": [0]},
)
iter = graph.nodes()
self.assertEqual(next(iter).kind(), "prim::ListConstruct")
def test_custom_layer_tuple(self):
class CustomFunction(torch.autograd.Function):
@staticmethod
def symbolic(g, input):
return g.op("CustomNamespace::Custom", input, outputs=2)
@staticmethod
def forward(ctx, input):
return input, input
class Custom(torch.nn.Module):
def forward(self, input):
return CustomFunction.apply(input)
model = Custom()
batch = torch.FloatTensor(1, 3)
graph, _, _ = self._model_to_graph(
model, batch, input_names=["batch"], dynamic_axes={"batch": [0, 1]}
)
iter = graph.nodes()
self.assertEqual(next(iter).kind(), "CustomNamespace::Custom")
def test_autograd_onnx_fallthrough(self):
class CustomFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input
class Custom(torch.nn.Module):
def forward(self, input):
return CustomFunction.apply(input)
model = Custom()
batch = torch.FloatTensor(1, 3)
graph, _, _ = self._model_to_graph(
model,
batch,
operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH,
input_names=["batch"],
dynamic_axes={"batch": [0, 1]},
)
iter = graph.nodes()
self.assertEqual(next(iter).kind(), "prim::PythonOp")
def test_autograd_module_name(self):
class CustomFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input
class Custom(torch.nn.Module):
def forward(self, input):
return CustomFunction.apply(input) + CustomFunction2.apply(input)
model = Custom()
batch = torch.FloatTensor(1, 3)
graph, _, _ = self._model_to_graph(
model,
batch,
operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH,
input_names=["batch"],
dynamic_axes={"batch": [0, 1]},
)
iter = graph.nodes()
autograd1 = next(iter)
autograd2 = next(iter)
self.assertEqual(autograd1.kind(), "prim::PythonOp")
self.assertEqual(autograd2.kind(), "prim::PythonOp")
self.assertNotEqual(autograd1.s("module"), autograd2.s("module"))
def test_unused_initializers(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv2 = torch.nn.ConvTranspose2d(
16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(1, 1)
)
self.k_proj = torch.nn.Linear(5, 5, bias=True)
def forward(self, x):
x = self.conv2(x)
return x
x = torch.randn(20, 16, 50, 100)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
_, params_dict, __ = self._model_to_graph(
Model(),
(x,),
do_constant_folding=False,
operator_export_type=OperatorExportTypes.ONNX,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2, 3]},
)
self.assertEqual(len(params_dict), 2)
def test_scripting_param(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(
3, 16, kernel_size=1, stride=2, padding=3, bias=True
)
self.bn = torch.nn.BatchNorm2d(16, affine=True)
def forward(self, x):
x = self.conv(x)
bn = self.bn(x)
return bn
model = torch.jit.script(MyModule())
x = torch.randn(10, 3, 128, 128)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = self._model_to_graph(
model,
(x,),
do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX,
training=torch.onnx.TrainingMode.TRAINING,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2, 3]},
)
graph_input_params = [param.debugName() for param in graph.inputs()]
for item in dict(model.named_parameters()):
self.assertIn(
item,
graph_input_params,
"Graph parameter names does not match model parameters.",
)
@skipIfNoCaffe2
def test_modifying_params(self):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.tensor([2.0]))
def forward(self, x):
y = x * x
self.param.data.add_(1.0)
return y
x = torch.tensor([1, 2])
# Move import to local as caffe2 backend requires additional build flag,
# and is only used in this test case.
import caffe2.python.onnx.backend as backend
verify(MyModel(), x, backend, do_constant_folding=False)
def test_fuse_conv_bn(self):
class Fuse(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(
3, 2, kernel_size=1, stride=2, padding=3, bias=True
)
self.bn = torch.nn.BatchNorm2d(2)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
x = torch.randn(2, 3, 2, 2, requires_grad=True)
graph, _, __ = self._model_to_graph(
Fuse(),
(x,),
training=TrainingMode.EVAL,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2, 3]},
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::BatchNormalization")
self.assertEqual(node.kind(), "onnx::Conv")
self.assertEqual(len(list(graph.nodes())), 1)
def test_fuse_resnet18(self):
model = torchvision.models.resnet18(pretrained=False)
x = torch.randn(2, 3, 224, 224, requires_grad=True)
graph, _, __ = self._model_to_graph(
model,
(x,),
training=TrainingMode.EVAL,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2, 3]},
)
for node in graph.nodes():
self.assertNotEqual(node.kind(), "onnx::BatchNormalization")
def test_onnx_function_substitution_pass(self):
@torch.jit.script
def f(x: torch.Tensor, y: torch.Tensor):
z = x - y
return x + z
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return f(x, y)
input_1 = torch.tensor([11])
input_2 = torch.tensor([12])
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = self._model_to_graph(
MyModule(),
(input_1, input_2),
do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX,
input_names=["input_1", "input_2"],
dynamic_axes={"input_1": [0], "input_2": [0]},
)
# Check that the prim::Constant node in the graph for representing the
# scripted function `f` is removed and the following prim::CallFunction
# is replced by inline graph, with onnx::Sub and onnx::Add nodes.
for node in graph.nodes():
self.assertNotEqual(node.kind(), "prim::Constant")
self.assertEqual(
len(list(graph.nodes())), 2
) # onnx::Sub and onnx::Add nodes only.
def test_onnx_value_name(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.in_weight = torch.nn.Parameter(torch.Tensor(3, 3))
self.in_bias = torch.nn.Parameter(torch.Tensor(3))
def forward(self, x):
start = 0
end = None
weight = self.in_weight
bias = self.in_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return torch.nn.functional.linear(x, weight, bias)
model = MyModule()
x = torch.randn(3, 3)
f = io.BytesIO()
model.eval()
torch.onnx.export(
model,
(x,),
f,
opset_version=self.opset_version,
keep_initializers_as_inputs=True,
)
graph = onnx.load(io.BytesIO(f.getvalue()))
self.assertEqual(graph.graph.input[1].name, "in_weight")
self.assertEqual(graph.graph.input[2].name, "in_bias")
def test_onnx_intermediate_renaming(self):
class RenamedIntermediateModule(torch.nn.Module):
def __init__(self):
super().__init__()
self._module_1 = torch.nn.Linear(10, 10)
self._module_2 = torch.nn.Linear(10, 10)
self._module_3 = torch.nn.Linear(10, 10)
self._module_4 = torch.nn.Linear(10, 10)
def forward(self, x):
y = self._module_1(x)
z = self._module_2(y)
z = self._module_3(y * z)
z = self._module_4(y * z)
return z
module = RenamedIntermediateModule()
g, p, o = utils._model_to_graph(module, torch.ones(1, 10), output_names=["y"])
renamed_intermediate = 0
for n in g.nodes():
for v in n.inputs():
if v.debugName().startswith("onnx::Mul_"):
renamed_intermediate += 1
self.assertEqual(renamed_intermediate, 2)
def _test_deduplicate_initializers(self, torchscript=False):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(3, 3)
self.layer2 = torch.nn.Linear(3, 3)
# Reusing layers.
self.layer3 = self.layer1
# Reusing parameters.
self.layer2.weight = self.layer1.weight
self.layer1.bias = self.layer2.bias
# Parameter with different tensors equal in value.
self.param1 = torch.nn.Parameter(torch.tensor([1.0, 2.0, 3.0]))
self.param2 = torch.nn.Parameter(torch.tensor([1.0, 2.0, 3.0]))
def forward(self, x):
return (
self.layer3(self.layer2(self.layer1(x))) + self.param1 + self.param2
)
model = torch.jit.script(MyModule()) if torchscript else MyModule()
x = torch.randn(3, 3)
param_name_set = {k for k, _ in model.named_parameters()}
# Test training mode.
model.train()
f = io.BytesIO()
torch.onnx.export(
model,
(x,),
f,
training=TrainingMode.TRAINING,
opset_version=self.opset_version,
)
graph = onnx.load(io.BytesIO(f.getvalue()))
self.assertSetEqual({i.name for i in graph.graph.initializer}, param_name_set)
model.train()
f = io.BytesIO()
torch.onnx.export(
model,
(x,),
f,
training=TrainingMode.PRESERVE,
opset_version=self.opset_version,
)
graph = onnx.load(io.BytesIO(f.getvalue()))
self.assertSetEqual({i.name for i in graph.graph.initializer}, param_name_set)
# Test eval mode.
model.eval()
f = io.BytesIO()
torch.onnx.export(model, (x,), f, opset_version=self.opset_version)
graph = onnx.load(io.BytesIO(f.getvalue()))
param_name_set.remove("param2")
self.assertSetEqual({i.name for i in graph.graph.initializer}, param_name_set)
def test_deduplicate_initializers(self):
self._test_deduplicate_initializers(torchscript=False)
def test_deduplicate_initializers_torchscript(self):
self._test_deduplicate_initializers(torchscript=True)
@skipIfNoCuda
def test_deduplicate_initializers_diff_devices(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.w_cpu = torch.nn.Parameter(
torch.ones(3, device=torch.device("cpu"))
)
self.w_cuda = torch.nn.Parameter(
torch.ones(3, device=torch.device("cuda"))
)
def forward(self, x, y):
return x + self.w_cpu, y + self.w_cuda
x = torch.randn(3, 3, device=torch.device("cpu"))
y = torch.randn(3, 3, device=torch.device("cuda"))
f = io.BytesIO()
torch.onnx.export(Model(), (x, y), f, opset_version=self.opset_version)
graph = onnx.load(io.BytesIO(f.getvalue()))
self.assertSetEqual({i.name for i in graph.graph.initializer}, {"w_cpu"})
def test_duplicated_output_node(self):
class DuplicatedOutputNet(torch.nn.Module):
def __init__(self, input_size, num_classes):
super().__init__()
self.fc1 = torch.nn.Linear(input_size, num_classes)
def forward(self, input0, input1):
out1 = self.fc1(input0)
out2 = self.fc1(input1)
return out1, out1, out2, out1, out2
N, D_in, H, D_out = 64, 784, 500, 10
pt_model = DuplicatedOutputNet(D_in, D_out)
f = io.BytesIO()
x = torch.randn(N, D_in)
dynamic_axes = {
"input0": {0: "input0_dim0", 1: "input0_dim1"},
"input1": {0: "input1_dim0", 1: "input1_dim1"},
"output-0": {0: "output-0_dim0", 1: "output-0_dim1"},
"output-1": {0: "output-1_dim0", 1: "output-1_dim1"},
"output-2": {0: "output-2_dim0", 1: "output-2_dim1"},
"output-3": {0: "output-3_dim0", 1: "output-3_dim1"},
"output-4": {0: "output-4_dim0", 1: "output-4_dim1"},
}
torch.onnx.export(
pt_model,
(x, x),
f,
input_names=["input0", "input1"],
output_names=["output-0", "output-1", "output-2", "output-3", "output-4"],
do_constant_folding=False,
training=torch.onnx.TrainingMode.TRAINING,
dynamic_axes=dynamic_axes,
verbose=True,
keep_initializers_as_inputs=True,
)
graph = onnx.load(io.BytesIO(f.getvalue()))
self.assertEqual(graph.graph.input[0].name, "input0")
self.assertEqual(graph.graph.input[1].name, "input1")
for i in range(5):
self.assertEqual(graph.graph.output[i].name, f"output-{i}")
self.assertEqual(graph.graph.node[0].op_type, "Gemm")
self.assertEqual(graph.graph.node[1].op_type, "Identity")
self.assertEqual(graph.graph.node[2].op_type, "Identity")
self.assertEqual(graph.graph.node[3].op_type, "Gemm")
self.assertEqual(graph.graph.node[4].op_type, "Identity")
def test_deduplicate_ignore_upsample_scale(self):
# upsample scale is a constant, not a model parameter,
# therefore should be ignored by shared weight deduplication.
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.upsample_1 = torch.nn.Upsample(scale_factor=2)
self.upsample_2 = torch.nn.Upsample(scale_factor=2)
def forward(self, x):
return self.upsample_1(x), self.upsample_2(x)
f = io.BytesIO()
x = torch.randn(1, 32, 224, 224)
torch.onnx.export(Model(), x, f)
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
# aten::upsample converts to onnx::resize
resize_nodes = [n for n in onnx_model.graph.node if n.op_type == "Resize"]
self.assertEqual(len(resize_nodes), 2)
for resize_node in resize_nodes:
scale_node = [
n for n in onnx_model.graph.node if n.output[0] == resize_node.input[2]
]
self.assertEqual(len(scale_node), 1)
self.assertEqual(scale_node[0].op_type, "Constant")
def test_bad_symbolic_registration(self):
_onnx_opset_version = 9
@parse_args("v")
def cat(g, tensor_list, dim):
tensors = _unpack_list(tensor_list)
return g.op("Concat", *tensors, axis_i=dim)
register_custom_op_symbolic("::cat", cat, _onnx_opset_version)
class CatModel(torch.nn.Module):
def forward(self, x):
return torch.cat((x, x, x), 0)
model = CatModel()
x = torch.randn(2, 3)
f = io.BytesIO()
self.assertExpectedRaisesInline(
AssertionError,
lambda: torch.onnx.export(
model, (x,), f, opset_version=_onnx_opset_version
),
(
"A mismatch between the number of arguments (2) and their descriptors (1) was found at symbolic function "
"'cat'. If you believe this is not due to custom symbolic implementation within your code or an external "
"library, please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml to "
"report this bug."
),
)
unregister_custom_op_symbolic("::cat", _onnx_opset_version)
class TestUtilityFuns_opset10(TestUtilityFuns_opset9):
opset_version = 10
class TestUtilityFuns_opset11(TestUtilityFuns_opset9):
opset_version = 11
class TestUtilityFuns_opset12(TestUtilityFuns_opset9):
opset_version = 12
class TestUtilityFuns_opset13(TestUtilityFuns_opset9):
opset_version = 13
class TestUtilityFuns_opset14(TestUtilityFuns_opset9):
opset_version = 14
class TestUtilityFuns_opset15(TestUtilityFuns_opset9):
opset_version = 15
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_utility_funs.py
|
# Owner(s): ["module: onnx"]
import glob
import inspect
import io
import itertools
import os
import shutil
import tempfile
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
from pytorch_test_common import (
BATCH_SIZE,
flatten,
RNN_HIDDEN_SIZE,
RNN_INPUT_SIZE,
RNN_SEQUENCE_LENGTH,
)
from torch.autograd import Function, Variable
from torch.nn import functional, Module
from torch.onnx.symbolic_helper import (
_get_tensor_dim_size,
_get_tensor_sizes,
parse_args,
)
from torch.testing._internal import common_utils
from torch.testing._internal.common_utils import skipIfCaffe2, skipIfNoLapack
"""Usage: python test/onnx/test_operators.py [--no-onnx] [--produce-onnx-test-data]
--no-onnx: no onnx python dependence
--produce-onnx-test-data: generate onnx test data
--accept: accept onnx updates and overwrite models
"""
# Full diff for expect files
import unittest
unittest.TestCase.maxDiff = None
_onnx_test = False # flag to produce onnx test cases.
_onnx_dep = True # flag to import onnx package.
def export_to_pbtxt(model, inputs, *args, **kwargs):
return torch.onnx.export_to_pretty_string(
model, inputs, google_printer=True, *args, **kwargs
)
def export_to_pb(model, inputs, *args, **kwargs):
f = io.BytesIO()
with torch.no_grad():
torch.onnx.export(model, inputs, f, *args, **kwargs)
return f.getvalue()
class FuncModule(Module):
def __init__(self, f, params=None):
if params is None:
params = ()
super().__init__()
self.f = f
self.params = nn.ParameterList(list(params))
def forward(self, *args):
return self.f(*itertools.chain(args, self.params))
class TestOperators(common_utils.TestCase):
def assertONNX(self, f, args, params=None, **kwargs):
if params is None:
params = ()
if isinstance(f, nn.Module):
m = f
else:
m = FuncModule(f, params)
m.eval()
onnx_model_pbtxt = export_to_pbtxt(m, args, **kwargs)
subname = kwargs.pop("subname", None)
self.assertExpected(onnx_model_pbtxt, subname)
if _onnx_dep:
onnx_model_pb = export_to_pb(m, args, **kwargs)
import onnx
import onnx.checker
import onnx.numpy_helper
import onnx_test_common
model_def = onnx.ModelProto.FromString(onnx_model_pb)
onnx.checker.check_model(model_def)
if _onnx_test:
test_function = inspect.stack()[1][0].f_code.co_name
test_name = test_function[0:4] + "_operator" + test_function[4:]
output_dir = os.path.join(
onnx_test_common.pytorch_operator_dir, test_name
)
# Assume:
# 1) the old test should be delete before the test.
# 2) only one assertONNX in each test, otherwise will override the data.
assert not os.path.exists(output_dir), "{} should not exist!".format(
output_dir
)
os.makedirs(output_dir)
with open(os.path.join(output_dir, "model.onnx"), "wb") as file:
file.write(model_def.SerializeToString())
data_dir = os.path.join(output_dir, "test_data_set_0")
os.makedirs(data_dir)
if isinstance(args, Variable):
args = (args,)
for index, var in enumerate(flatten(args)):
tensor = onnx.numpy_helper.from_array(var.data.numpy())
with open(
os.path.join(data_dir, f"input_{index}.pb"), "wb"
) as file:
file.write(tensor.SerializeToString())
outputs = m(*args)
if isinstance(outputs, Variable):
outputs = (outputs,)
for index, var in enumerate(flatten(outputs)):
tensor = onnx.numpy_helper.from_array(var.data.numpy())
with open(
os.path.join(data_dir, f"output_{index}.pb"), "wb"
) as file:
file.write(tensor.SerializeToString())
def assertONNXRaises(self, err, f, args, params=None, **kwargs):
if params is None:
params = ()
if isinstance(f, nn.Module):
m = f
else:
m = FuncModule(f, params)
self.assertExpectedRaises(err, lambda: export_to_pbtxt(m, args, **kwargs))
def assertONNXRaisesRegex(self, err, reg, f, args, params=None, **kwargs):
if params is None:
params = ()
if isinstance(f, nn.Module):
m = f
else:
m = FuncModule(f, params)
with self.assertRaisesRegex(err, reg):
export_to_pbtxt(m, args, **kwargs)
def test_basic(self):
x = torch.tensor([0.4], requires_grad=True)
y = torch.tensor([0.7], requires_grad=True)
self.assertONNX(lambda x, y: -torch.sigmoid(torch.tanh(x * (x + y))), (x, y))
def test_view(self):
x = torch.tensor([0.0], requires_grad=True)
self.assertONNX(lambda x: x.view(1, 1), x)
def test_index(self):
x = torch.tensor([[0.0]], requires_grad=True)
self.assertONNX(lambda x: x[0], x)
def test_type_as(self):
x = torch.tensor([0.0], requires_grad=True)
self.assertONNX(lambda x: x.type_as(x), x)
def test_addconstant(self):
x = torch.randn(2, 3, requires_grad=True).double()
self.assertONNX(lambda x: x + 1, x)
def test_add_broadcast(self):
x = torch.randn(2, 3, requires_grad=True).double()
y = torch.randn(3, requires_grad=True).double()
self.assertONNX(lambda x, y: x + y, (x, y))
def test_add_left_broadcast(self):
x = torch.randn(3, requires_grad=True).double()
y = torch.randn(2, 3, requires_grad=True).double()
self.assertONNX(lambda x, y: x + y, (x, y))
def test_add_size1_broadcast(self):
x = torch.randn(2, 3, requires_grad=True).double()
y = torch.randn(2, 1, requires_grad=True).double()
self.assertONNX(lambda x, y: x + y, (x, y))
def test_add_size1_right_broadcast(self):
x = torch.randn(2, 3, requires_grad=True).double()
y = torch.randn(3, requires_grad=True).double()
self.assertONNX(lambda x, y: x + y, (x, y))
def test_add_size1_singleton_broadcast(self):
x = torch.randn(2, 3, requires_grad=True).double()
y = torch.randn(1, 3, requires_grad=True).double()
self.assertONNX(lambda x, y: x + y, (x, y))
def test_rsub(self):
x = torch.randn(2, 3, requires_grad=True).double()
self.assertONNX(lambda x: 1 - x, (x,))
def test_mul_bool(self):
x = torch.tensor([True, False, True, False])
y = torch.tensor([True, True, False, False])
self.assertONNX(lambda x, y: torch.mul(x, y), (x, y))
def test_mul_fp_bool(self):
x = torch.tensor([9.4, 1.7, 3.6])
y = torch.tensor([True, True, False])
self.assertONNX(lambda x, y: torch.mul(x, y), (x, y))
def test_transpose(self):
x = torch.tensor([[0.0, 1.0], [2.0, 3.0]], requires_grad=True)
self.assertONNX(lambda x: x.transpose(0, 1).transpose(1, 0), x)
def test_chunk(self):
x = torch.tensor([0.0, 1.0, 2.0], requires_grad=True)
self.assertONNX(lambda x: x.chunk(2), x)
def test_split(self):
x = torch.tensor(
[[0.0, 1.0, 1.0, 0.0, 2.0, 2.0], [2.0, 3.0, 3.0, 2.0, 1.0, 1.0]]
)
self.assertONNX(lambda x: torch.split(x, 2, 1), x)
def test_split_with_sizes(self):
x = torch.tensor(
[[0.0, 1.0, 1.0, 0.0, 2.0, 2.0], [2.0, 3.0, 3.0, 2.0, 1.0, 1.0]]
)
self.assertONNX(lambda x: torch.split(x, [2, 1, 3], 1), x)
def test_concat2(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
self.assertONNX(lambda inputs: torch.cat(inputs, 1), ((x, y),))
def test_mm(self):
m1 = torch.randn(2, 3, requires_grad=True)
m2 = torch.randn(3, 4, requires_grad=True)
self.assertONNX(torch.mm, (m1, m2))
def test_addmm(self):
m1 = torch.randn(2, 3, requires_grad=True)
m2 = torch.randn(3, 4, requires_grad=True)
m3 = torch.randn(4, requires_grad=True)
self.assertONNX(
lambda x, y, z: torch.addmm(torch.addmm(z, x, y), x, y), (m1, m2, m3)
)
def test_permute2(self):
x = torch.tensor([[[[[[0.0]]]]]], requires_grad=True)
self.assertONNX(lambda x: x.permute(0, 1, 4, 2, 5, 3), x)
def test_pad(self):
x = torch.tensor(
[[[[0.0, 1.0, 1.0, 1.0], [2.0, 3.0, 7.0, 7.0]]]], requires_grad=True
)
self.assertONNX(nn.ReflectionPad2d((2, 3, 0, 1)), x)
def test_params(self):
x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True)
y = nn.Parameter(torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True))
self.assertONNX(
lambda x, y: -torch.sigmoid(torch.tanh(x * (x + y))),
x,
params=(y,),
keep_initializers_as_inputs=True,
)
def test_params_onnx_irv4(self):
x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True)
y = nn.Parameter(torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True))
self.assertONNX(
lambda x, y: -torch.sigmoid(torch.tanh(x * (x + y))),
x,
params=(y,),
keep_initializers_as_inputs=False,
)
def test_symbolic_mismatch(self):
class MyFun(Function):
@staticmethod
def symbolic(g, x):
# The inside of this function should never be invoked, because
# we will fail due to an argument mismatch first.
raise AssertionError()
@staticmethod
def forward(ctx, x, y):
return x + y
x = torch.ones(2, 2)
y = torch.ones(2, 2)
# NB: Don't use expect test here, the type error wobbles depending
# on Python version
with self.assertRaisesRegex(TypeError, "occurred when translating MyFun"):
export_to_pbtxt(FuncModule(MyFun().apply), (x, y))
# TODO: Do an nn style test for these
def test_batchnorm(self):
x = torch.ones(2, 2, 2, 2, requires_grad=True)
self.assertONNX(nn.BatchNorm2d(2), x, keep_initializers_as_inputs=True)
def test_batchnorm_onnx_irv4(self):
x = torch.ones(2, 2, 2, 2, requires_grad=True)
self.assertONNX(nn.BatchNorm2d(2), x)
def test_batchnorm_1d(self):
x = torch.ones(2, 2, requires_grad=True)
self.assertONNX(nn.BatchNorm1d(2), x, keep_initializers_as_inputs=True)
def test_batchnorm_training(self):
x = torch.ones(2, 2, 2, 2, requires_grad=True)
self.assertONNX(
nn.BatchNorm2d(2),
x,
training=torch.onnx.TrainingMode.TRAINING,
keep_initializers_as_inputs=True,
)
def test_conv(self):
x = torch.ones(20, 16, 50, 40, requires_grad=True)
self.assertONNX(
nn.Conv2d(16, 13, 3, bias=False), x, keep_initializers_as_inputs=True
)
def test_conv_onnx_irv4(self):
x = torch.ones(20, 16, 50, 40, requires_grad=True)
self.assertONNX(nn.Conv2d(16, 13, 3, bias=False), x)
def test_conv_onnx_irv4_opset8(self):
# This test point checks that for opset 8 (or lower), even if
# keep_initializers_as_inputs is set to False, it is ignored,
# and initializers are listed as ONNX graph input, in accordance
# with ONNX IR v3 semantics (which apply to opset version <= 8).
x = torch.ones(1, 2, 5, 7, requires_grad=True)
conv_node = nn.Conv2d(2, 4, 3, bias=False)
conv_node.weight.data.fill_(1.0)
self.assertONNX(
conv_node, x, opset_version=8, keep_initializers_as_inputs=False
)
def test_conv_variable_length(self):
x = torch.ones(5, 3, 6, 6, requires_grad=True)
model = torch.nn.Conv2d(3, 2, 3)
dynamic_axes = {
"input_1": [0, 2, 3],
"output_1": {0: "output_1_variable_dim_0", 1: "output_1_variable_dim_1"},
}
model_proto_file = tempfile.NamedTemporaryFile()
torch.onnx.export(
model,
x,
model_proto_file.name,
verbose=True,
input_names=["input_1"],
output_names=["output_1"],
dynamic_axes=dynamic_axes,
)
import onnx
onnx_model = onnx.load(model_proto_file.name)
onnx.checker.check_model(onnx_model)
# Asserting the default dynamic axes names are generated when custom names are not provided
assert (
onnx_model.graph.input[0].type.tensor_type.shape.dim[0].dim_param
== "input_1_dynamic_axes_1"
)
assert (
onnx_model.graph.input[0].type.tensor_type.shape.dim[2].dim_param
== "input_1_dynamic_axes_2"
)
assert (
onnx_model.graph.input[0].type.tensor_type.shape.dim[3].dim_param
== "input_1_dynamic_axes_3"
)
# Asserting the custom names are applied when provided
assert (
onnx_model.graph.output[0].type.tensor_type.shape.dim[0].dim_param
== "output_1_variable_dim_0"
)
assert (
onnx_model.graph.output[0].type.tensor_type.shape.dim[1].dim_param
== "output_1_variable_dim_1"
)
def test_convtranspose(self):
x = torch.ones(2, 3, 4, 5, requires_grad=True)
self.assertONNX(
nn.ConvTranspose2d(
3, 3, 3, stride=3, bias=False, padding=1, output_padding=2
),
x,
keep_initializers_as_inputs=True,
)
def test_maxpool(self):
x = torch.randn(20, 16, 50)
self.assertONNX(nn.MaxPool1d(3, stride=2), x)
def test_maxpool_dilations(self):
x = torch.randn(20, 16, 50)
self.assertONNX(nn.MaxPool1d(2, stride=1, dilation=2), x, opset_version=10)
def test_avg_pool2d(self):
x = torch.randn(20, 16, 50, 32)
self.assertONNX(nn.AvgPool2d(3, stride=2), x)
def test_maxpool_indices(self):
x = torch.randn(20, 16, 50)
self.assertONNX(nn.MaxPool1d(3, stride=2, return_indices=True), x)
@skipIfCaffe2
def test_at_op(self):
x = torch.randn(3, 4)
class MyFun(Function):
@staticmethod
def symbolic(g, x):
return g.at("add", x, x)
@staticmethod
def forward(ctx, x):
return x + x
class MyModule(Module):
def forward(self, x):
return MyFun.apply(x)
self.assertONNX(
MyModule(),
x,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
def test_clip(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.clamp(x, min=-0.5, max=0.5), x)
def test_clip_min(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: x.clamp(min=-0.1), x)
def test_clip_max(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: x.clamp(max=0.1), x)
def test_hardtanh(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.nn.Hardtanh(-0.5, 0.5)(x), x)
def test_full(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.full(x.shape, 2.0), x)
def test_full_like(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.full_like(x, 2), x)
def test_max(self):
x = torch.randn(3, 4, requires_grad=True)
y = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x, y: torch.max(x, y), (x, y))
def test_min(self):
x = torch.randn(3, 4, requires_grad=True)
y = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x, y: torch.min(x, y), (x, y))
def test_mean(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.mean(x), x)
def test_reduced_mean(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.mean(x, dim=2), x)
def test_reduced_mean_keepdim(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.mean(x, dim=(2, 3), keepdim=True), x)
def test_mean_dtype(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.mean(x, dtype=torch.double), x)
def test_reduced_mean_dtype(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.mean(x, dim=0, dtype=torch.double), x)
def test_sum(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.sum(x), x)
def test_sum_dtype(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.sum(x, dtype=torch.double), x)
def test_reduced_sum_dtype(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.sum(x, dim=0, dtype=torch.double), x)
def test_reduced_sum(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.sum(x, dim=(1, 2)), x)
def test_reduced_sum_keepdim(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.sum(x, dim=2, keepdim=True), x)
def test_prod(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.prod(x), x)
def test_reduced_prod(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.prod(x, dim=2), x)
def test_reduced_prod_keepdim(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.prod(x, dim=2, keepdim=True), x)
def test_prod_dtype(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.prod(x, dtype=torch.double), x)
def test_reduced_prod_dtype(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.prod(x, dim=0, dtype=torch.double), x)
def test_sqrt(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.sqrt(x), x)
def test_rsqrt(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.rsqrt(x), x)
def test_equal(self):
x = torch.randn(1, 2, 3, 1, requires_grad=False).int()
y = torch.randn(1, 4, requires_grad=False).int()
self.assertONNX(lambda x, y: x == y, (x, y))
def test_lt(self):
x = torch.randn(1, 2, 3, 1, requires_grad=False).int()
y = torch.randn(1, 4, requires_grad=False).int()
self.assertONNX(lambda x, y: x < y, (x, y))
def test_gt(self):
x = torch.randn(1, 2, 3, 1, requires_grad=False).int()
y = torch.randn(1, 4, requires_grad=False).int()
self.assertONNX(lambda x, y: x > y, (x, y))
def test_le(self):
x = torch.randn(3, 4, requires_grad=False).int()
y = torch.randn(3, 4, requires_grad=False).int()
self.assertONNX(lambda x, y: x <= y, (x, y))
def test_ge(self):
x = torch.randn(3, 4, requires_grad=False).int()
y = torch.randn(3, 4, requires_grad=False).int()
self.assertONNX(lambda x, y: x >= y, (x, y))
def test_exp(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: x.exp(), x)
def test_sin(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: x.sin(), x)
def test_cos(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: x.cos(), x)
def test_tan(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: x.tan(), x)
def test_asin(self):
x = torch.rand(3, 4, requires_grad=True)
self.assertONNX(lambda x: x.asin(), x)
def test_acos(self):
x = torch.rand(3, 4, requires_grad=True)
self.assertONNX(lambda x: x.acos(), x)
def test_slice(self):
x = torch.rand(3, 4, requires_grad=True)
self.assertONNX(lambda x: x[:, 1:2], x)
def test_slice_dynamic(self):
x = torch.rand(3, 4, requires_grad=True)
self.assertONNX(lambda x: x[x.size(0) :, x.size(1) - 3], x, opset_version=10)
def test_sign(self):
x = torch.rand(3, 4, requires_grad=True)
self.assertONNX(lambda x: x.sign(), x)
def test_narrow(self):
x = torch.randn(3, 3, requires_grad=True)
self.assertONNX(lambda x: torch.narrow(x, 0, 0, 2), x)
def test_atan(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: x.atan(), x)
def test_view_flatten(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: x.view(x.size()[0], x.numel() // x.size()[0]), x)
def test_flatten(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.flatten(x), x)
def test_flatten2D(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.flatten(x, 1), x)
def test_isnan(self):
x = torch.tensor([1, float("nan"), 2])
self.assertONNX(lambda x: torch.isnan(x), x)
def test_argmax(self):
x = torch.randn(4, 4, requires_grad=True)
self.assertONNX(lambda x: torch.argmax(x, dim=1), x)
def test_logsoftmax(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(nn.LogSoftmax(dim=3), x)
def test_pow(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
y = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x, y: x.pow(y), (x, y))
def test_elu(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(nn.ELU(), x)
def test_selu(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(nn.SELU(), x)
def test_repeat(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: x.repeat(1, 2, 3, 4), x)
def test_repeat_dim_overflow(self):
x = torch.randn(1, 2, requires_grad=True)
self.assertONNX(lambda x: x.repeat(1, 2, 3, 4), x)
def test_norm_p1(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: x.norm(p=1, dim=2), (x))
def test_norm_p2(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: x.norm(p=2, dim=2), (x))
def test_upsample_nearest_scale(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(
lambda x: nn.functional.interpolate(
x, scale_factor=2.0, mode="nearest", recompute_scale_factor=False
),
x,
)
def test_upsample_nearest_scale_default_scale_factor(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(
lambda x: nn.functional.interpolate(x, scale_factor=2.0, mode="nearest"), x
)
def test_upsample_nearest_size(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.assertONNX(
lambda x: nn.functional.interpolate(x, size=16, mode="nearest"), x
)
def test_unsqueeze(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: x.unsqueeze(len(x.shape)), x)
def test_batchnorm_noaffine(self):
x = torch.randn(128, 128, 1, 1, requires_grad=True)
self.assertONNX(
nn.BatchNorm2d(128, affine=False, momentum=0.3),
x,
keep_initializers_as_inputs=True,
)
@skipIfCaffe2
def test_embedding_bags(self):
emb_bag = nn.EmbeddingBag(10, 8)
input = torch.tensor([1, 2, 3, 4]).long()
offset = torch.tensor([0]).long()
self.assertONNX(
emb_bag,
(input, offset),
keep_initializers_as_inputs=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
def test_implicit_expand(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: x + 1, x)
def test_reduce_sum_negative_indices(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: x.sum(-1), x)
def test_randn(self):
x = torch.randn(1, 2, 3, 4)
self.assertONNX(lambda x: torch.randn(1, 2, 3, 4) + x, x)
def test_rand(self):
x = torch.rand(1, 2, 3, 4)
self.assertONNX(lambda x: torch.rand(1, 2, 3, 4) + x, x)
def test_rrelu(self):
x = torch.randn(1, 2, 3, 4)
self.assertONNX(torch.nn.RReLU(), x)
def test_prelu(self):
x = torch.randn(1, 2, 3, 4)
self.assertONNX(torch.nn.PReLU(2), x, keep_initializers_as_inputs=True)
def test_log_sigmoid(self):
x = torch.randn(1, 2, 3, 4)
self.assertONNX(torch.nn.LogSigmoid(), x)
def test_linear(self):
x = torch.randn(3, 4)
self.assertONNX(
torch.nn.Linear(4, 5, bias=True), x, keep_initializers_as_inputs=True
)
def test_empty_like(self):
x = torch.randn(5, 8, requires_grad=True)
self.assertONNX(lambda x: torch.empty_like(x), x)
def test_zeros_like(self):
x = torch.randn(5, 8, requires_grad=True)
self.assertONNX(lambda x: torch.zeros_like(x), x)
def test_ones_like(self):
x = torch.randn(6, 10, requires_grad=True)
self.assertONNX(lambda x: torch.ones_like(x), x)
def test_expand(self):
x = torch.randn(6, 1, requires_grad=True)
self.assertONNX(lambda x: x.expand(4, 6, 2), x)
def test_ne(self):
x = torch.randn(1, 2, 3, 1, requires_grad=False).int()
y = torch.randn(1, 4, requires_grad=False).int()
self.assertONNX(lambda x, y: torch.ne(x, y), (x, y))
def test_reducemax(self):
x = torch.randn(1, 2, 3, 4)
self.assertONNX(lambda x: torch.max(x), x)
def test_reducemin(self):
x = torch.randn(1, 2, 3, 4)
self.assertONNX(lambda x: torch.min(x), x)
def test_erf(self):
x = torch.randn(1, 2, 3, 4)
self.assertONNX(lambda x: x.erf(), x)
def test_dropout(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.max(functional.dropout(x, training=False)), x)
def test_dropout_default(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(
lambda x: torch.max(
functional.dropout(
x,
)
),
x,
)
def test_dropout_training(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(
lambda x: torch.max(functional.dropout(x)),
x,
training=torch.onnx.TrainingMode.TRAINING,
)
def test_dropout_opset12(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(
lambda x: torch.max(functional.dropout(x, training=False)),
x,
opset_version=12,
)
def test_dropout_training_opset12(self):
x = torch.randn(3, 4, requires_grad=True)
self.assertONNX(
lambda x: torch.max(functional.dropout(x)),
x,
opset_version=12,
training=torch.onnx.TrainingMode.TRAINING,
)
def test_nonzero(self):
x = torch.tensor(
[[[2.0, 2.0], [1.0, 0.0]], [[0.0, 0.0], [1.0, 1.0]]], requires_grad=True
)
self.assertONNX(lambda x: torch.nonzero(x), x)
def test_gather(self):
data = torch.randn(3, 4, 3, requires_grad=True)
index = torch.tensor([2, 0]).view(1, 2, 1).expand(3, 2, 3)
self.assertONNX(lambda data, index: data.gather(1, index), (data, index))
def test_gather_opset11(self):
data = torch.randn(3, 4, 3, requires_grad=True)
index = torch.tensor([2, 0]).view(1, 2, 1).expand(3, 2, 3)
self.assertONNX(
lambda data, index: data.gather(1, index), (data, index), opset_version=11
)
def test_scatter_add(self):
data = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.assertONNX(
lambda data, index: data.scatter_add(1, indices, values),
(data, (indices, values)),
)
def test_scatter_add_opset11(self):
data = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.assertONNX(
lambda data, index: data.scatter_add(1, indices, values),
(data, (indices, values)),
opset_version=11,
)
def test_scatter_add_opset16(self):
data = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[0, 0], [1, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.assertONNX(
lambda data, index: data.scatter_add(1, indices, values),
(data, (indices, values)),
opset_version=16,
)
def test_master_opset(self):
x = torch.randn(2, 3).float()
y = torch.randn(2, 3).float()
self.assertONNX(lambda x, y: x + y, (x, y), opset_version=10)
def test_std(self):
x = torch.randn(2, 3, 4).float()
self.assertONNX(
lambda x: torch.std(x, dim=(0, 1), unbiased=True, keepdim=True), x
)
def test_cumsum(self):
x = torch.randn(2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: torch.cumsum(x, dim=1), x, opset_version=11)
# Github Issue: https://github.com/pytorch/pytorch/issues/71095
# def test_c2_op(self):
# class MyModel(torch.nn.Module):
# def __init__(self):
# super(MyModel, self).__init__()
#
# def forward(self, scores, bbox_deltas, im_info, anchors):
# a, b = torch.ops._caffe2.GenerateProposals(
# (scores), (bbox_deltas), (im_info), (anchors),
# 2.0, 6000, 300, 0.7, 16, True, -90, 90, 1.0, True,
# )
# return a, b
#
# model = MyModel()
# A = 4
# H = 10
# W = 8
# img_count = 3
# scores = torch.ones(img_count, A, H, W, dtype=torch.float32)
# bbox_deltas = torch.linspace(0, 10, steps=img_count * 4 * A * H * W,
# dtype=torch.float32)
# bbox_deltas = bbox_deltas.view(img_count, 4 * A, H, W)
# im_info = torch.ones(img_count, 3, dtype=torch.float32)
# anchors = torch.ones(A, 4, dtype=torch.float32)
# inputs = (scores, bbox_deltas, im_info, anchors)
# self.assertONNX(model, inputs, custom_opsets={"org.pytorch._caffe2": 0})
def test_dict(self):
class MyModel(torch.nn.Module):
def forward(self, x_in):
x_out = {}
x_out["test_key_out"] = torch.add(
x_in[list(x_in.keys())[0]], list(x_in.keys())[0]
)
return x_out
x = {torch.tensor(1.0): torch.randn(1, 2, 3)}
self.assertONNX(MyModel(), (x, {}))
def test_dict_str(self):
class MyModel(torch.nn.Module):
def forward(self, x_in):
x_out = {}
x_out["test_key_out"] = torch.add(x_in["test_key_in"], 2.0)
return x_out
x = {"test_key_in": torch.randn(1, 2, 3)}
self.assertONNX(MyModel(), (x, {}))
def test_arange_dynamic(self):
class TestModel(torch.nn.Module):
def forward(self, input):
return torch.arange(input.shape[0], input.shape[0] + 5, 0.5)
input = torch.randn(5, 3, 2)
self.assertONNX(TestModel(), input, opset_version=11)
def test_bitshift(self):
class BitshiftModel(torch.nn.Module):
def forward(self, input):
return input >> 1, input >> 2
input = torch.arange(24, dtype=torch.uint8).reshape(3, 4, 2)
self.assertONNX(BitshiftModel(), input, opset_version=11)
@skipIfCaffe2
def test_layer_norm_aten(self):
model = torch.nn.LayerNorm([10, 10])
x = torch.randn(20, 5, 10, 10)
self.assertONNX(
model,
x,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
def test_pixel_shuffle(self):
x = torch.randn(2, 8, 3, 4).float()
self.assertONNX(
lambda x: torch.pixel_shuffle(x, upscale_factor=2), x, opset_version=11
)
def test_frobenius_norm(self):
x = torch.randn(2, 3, 4).float()
self.assertONNX(lambda x: torch.norm(x, p="fro", dim=(0, 1), keepdim=True), x)
def test_unfold(self):
x = torch.randn(2, 3, 4, requires_grad=True)
self.assertONNX(lambda x: x.unfold(dimension=2, size=2, step=2), x)
def test_remainder(self):
x = torch.randn(2, 3, 4)
y = torch.randn(2, 1, 4)
self.assertONNX(lambda x, y: torch.remainder(x, y), (x, y))
def test_fmod(self):
x = torch.randn(2, 3, 4)
y = torch.randn(2, 1, 4)
self.assertONNX(lambda x, y: torch.fmod(x, y), (x, y), opset_version=10)
def test_gelu(self):
x = torch.randn(2, 3, 4, 5, requires_grad=True)
self.assertONNX(lambda x: torch.nn.functional.gelu(x), x)
def test_unique(self):
x = torch.randint(3, (2, 3, 4, 5)).float()
self.assertONNX(
lambda x: torch.unique(
x, dim=0, sorted=True, return_inverse=False, return_counts=True
),
x,
opset_version=11,
)
def test_meshgrid(self):
x = torch.ones(3, requires_grad=True)
y = torch.zeros(4, requires_grad=True)
z = torch.ones(5, requires_grad=True)
self.assertONNX(lambda x, y, z: torch.meshgrid(x, y, z), (x, y, z))
def test_topk(self):
x = torch.arange(1.0, 6.0, requires_grad=True)
k = torch.tensor(3)
self.assertONNX(lambda x, k: torch.topk(x, k), (x, k), opset_version=10)
def test_topk_smallest_unsorted(self):
x = torch.arange(1.0, 6.0, requires_grad=True)
k = torch.tensor(3)
self.assertONNX(
lambda x, k: torch.topk(x, k, largest=False, sorted=False),
(x, k),
opset_version=11,
)
def test_baddbmm(self):
x = torch.randn(10, 3, 5)
b1 = torch.randn(10, 3, 4)
b2 = torch.randn(10, 4, 5)
self.assertONNX(lambda x, b1, b2: torch.baddbmm(x, b1, b2), (x, b1, b2))
def test_round(self):
x = torch.tensor([0.9920, -1.0362, -1.5000, 2.5000], requires_grad=True)
self.assertONNX(lambda x: torch.round(x), x, opset_version=11)
def test_dim(self):
x = torch.ones((2, 2), requires_grad=True)
self.assertONNX(lambda x: torch.scalar_tensor(x.dim()), x)
@skipIfNoLapack
def test_det(self):
x = torch.randn(2, 3, 5, 5, device=torch.device("cpu"))
self.assertONNX(lambda x: torch.det(x), x, opset_version=11)
self.assertONNX(lambda x: torch.linalg.det(x), x, opset_version=11)
def test_softmaxcrossentropy(self):
x = torch.randn(3, 5)
y = torch.empty(3, dtype=torch.long).random_(5)
self.assertONNX(torch.nn.CrossEntropyLoss(), (x, y), opset_version=12)
def test_softmaxcrossentropy_ignore_index(self):
x = torch.randn(3, 5)
y = torch.empty(3, dtype=torch.long).random_(5)
self.assertONNX(
torch.nn.CrossEntropyLoss(ignore_index=1), (x, y), opset_version=12
)
def test_softmaxcrossentropy_weights(self):
x = torch.randn(3, 5)
y = torch.empty(3, dtype=torch.long).random_(5)
self.assertONNX(
torch.nn.CrossEntropyLoss(weight=torch.randn(5)), (x, y), opset_version=12
)
def test_softmaxcrossentropy_3d(self):
x = torch.randn(3, 5, 2)
y = torch.empty(3, 2, dtype=torch.long).random_(5)
self.assertONNX(torch.nn.CrossEntropyLoss(), (x, y), opset_version=12)
def test_softmaxcrossentropy_3d_none(self):
x = torch.randn(3, 5, 2)
y = torch.empty(3, 2, dtype=torch.long).random_(5)
self.assertONNX(
torch.nn.CrossEntropyLoss(reduction="none"), (x, y), opset_version=12
)
def test_softmaxcrossentropy_4d(self):
x = torch.randn(3, 5, 2, 1)
y = torch.empty(3, 2, 1, dtype=torch.long).random_(5)
self.assertONNX(torch.nn.CrossEntropyLoss(), (x, y), opset_version=12)
def test_lstm_none_sequence_lens(self):
"""Test symbolic shape inference for LSTM when the input sequence_lens = None."""
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
h0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
class LSTMModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.rnn = torch.nn.LSTM(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False
)
def forward(self, x, h0, c0):
a, b = self.rnn(x, (h0, c0))
return torch.ones(b[0].shape)
self.assertONNX(
LSTMModel(),
(input, h0, c0),
input_names=["x", "y"],
dynamic_axes={"x": {0: "batch"}},
opset_version=12,
)
def test_dynamic_axes_add(self):
m1 = torch.randn(2, 3, requires_grad=True)
m2 = torch.randn(2, 1, requires_grad=True)
self.assertONNX(
lambda x, y: torch.add(x, y),
(m1, m2),
input_names=["input_1", "input_2"],
dynamic_axes={"input_1": {1: "dim_1"}, "input_2": {1: "dim_2"}},
opset_version=12,
)
def test_dynamic_axes_add_inputs_same_symbolic_shape(self):
m1 = torch.randn(2, 3, requires_grad=True)
self.assertONNX(
lambda x: torch.add(x, x),
(m1,),
input_names=["input_1"],
dynamic_axes={"input_1": {1: "dim_1"}},
opset_version=12,
)
def test_dynamic_axes_matmul(self):
m1 = torch.randn(2, 2, 4, requires_grad=True)
m2 = torch.randn(2, 4, 3, requires_grad=True)
self.assertONNX(
lambda x, y: torch.matmul(x, y),
(m1, m2),
input_names=["input_1", "input_2"],
dynamic_axes={"input_1": {1: "dim_0"}, "input_2": {2: "dim_1"}},
opset_version=12,
)
def test_dynamic_axes_reduce_mean(self):
m1 = torch.randn(2, 3, 4, requires_grad=True)
self.assertONNX(
lambda x: torch.mean(x, dim=1),
(m1),
input_names=["input"],
dynamic_axes={"input": {1: "dim_1", 2: "dim_2"}},
opset_version=12,
)
def test_dynamic_axes_unchange(self):
"""Test ProcessUnchangeNode in symbolic shape inference."""
m1 = torch.randn(2, 3, requires_grad=True)
self.assertONNX(
lambda x: torch.softmax(x, dim=0),
(m1,),
input_names=["input"],
dynamic_axes={"input": {1: "dim_1"}},
opset_version=12,
)
def test_aten_embedding_1(self):
_onnx_opset_version = 12
@parse_args("v", "v", "i", "b", "b")
def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse):
custom_attributes_json = (
"{"
f'"padding_idx":{str(padding_idx)},'
f'"scale_grad_by_freq":{str(scale_grad_by_freq).lower()},'
f'"sparse":{str(sparse).lower()}'
"}"
)
output = g.at(
"embedding",
weight,
indices,
custom_attributes_json_s=custom_attributes_json,
)
return output
torch.onnx.register_custom_op_symbolic(
"::embedding", embedding, _onnx_opset_version
)
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(4, 8)
def forward(self, x, y):
res = self.emb(x)
res = res + y
return torch.ones(res.shape[0])
model = Model()
x = torch.ones(32, dtype=torch.long)
y = torch.randn(1, 8)
self.assertONNX(model, (x, y), opset_version=_onnx_opset_version)
torch.onnx.unregister_custom_op_symbolic("::embedding", _onnx_opset_version)
# This is test_aten_embedding_1 with shape inference on custom symbolic aten::embedding.
@skipIfCaffe2
def test_aten_embedding_2(self):
_onnx_opset_version = 12
@parse_args("v", "v", "i", "b", "b")
def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse):
custom_attributes_json = (
"{"
f'"padding_idx":{str(padding_idx)},'
f'"scale_grad_by_freq":{str(scale_grad_by_freq).lower()},'
f'"sparse":{str(sparse).lower()}'
"}"
)
output = g.at(
"embedding",
weight,
indices,
custom_attributes_json_s=custom_attributes_json,
)
# do shape inference and set it via setType
indices_shape = _get_tensor_sizes(indices)
if indices_shape is not None and hasattr(weight.type(), "with_sizes"):
output_type = weight.type().with_sizes(
indices_shape + [_get_tensor_dim_size(weight, 1)]
)
output.setType(output_type)
return output
torch.onnx.register_custom_op_symbolic(
"::embedding", embedding, _onnx_opset_version
)
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(4, 8)
def forward(self, x, y):
res = self.emb(x)
res = res + y
return torch.ones(res.shape[0])
model = Model()
x = torch.ones(32, dtype=torch.long)
y = torch.randn(1, 8)
self.assertONNX(
model,
(x, y),
opset_version=_onnx_opset_version,
input_names=["input_1", "input_2"],
dynamic_axes={"input_1": {0: "dim_0"}, "input_2": {0: "dim_1", 1: "dim_2"}},
keep_initializers_as_inputs=False,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
torch.onnx.unregister_custom_op_symbolic("::embedding", _onnx_opset_version)
# Without shapeValueMap, the onnx graph looks like:
# graph(%0 : Float(*, 1, 128, 1, strides=[128, 128, 1, 1], requires_grad=0, device=cpu)):
# %2 : Long(4, strides=[1], device=cpu) = onnx::Shape(%0)
# %4 : Long(device=cpu) = onnx::Constant[value={0}]()
# %5 : Long(device=cpu) = onnx::Gather[axis=0](%2, %4)
# %6 : Long(device=cpu) = onnx::Constant[value={1}]()
# %7 : Long(device=cpu) = onnx::Constant[value={2}]()
# %8 : Long(device=cpu) = onnx::Constant[value={-1}]()
# %9 : int[] = prim::ListConstruct(%5, %6, %7, %8)
# %10 : Float(*, *, *, *, strides=[128, 128, 64, 1], requires_grad=0, device=cpu) = onnx::Reshape(%0, %9)
# ...
# With shapeValueMap, it becomes:
# ...
# %10 : Float(*, 1, 2, 64, strides=[128, 128, 64, 1], requires_grad=0, device=cpu) = onnx::Reshape(%0, %9)
# ...
def test_shape_value_map(self):
class RSoftMax(torch.nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
return x
radix = 2
cardinality = 1
x = torch.randn(10, 1, 128, 1)
self.assertONNX(
RSoftMax(radix, cardinality),
(x,),
input_names=["x"],
dynamic_axes={"x": {0: "dim_0"}},
)
if __name__ == "__main__":
no_onnx_dep_flag = "--no-onnx"
_onnx_dep = no_onnx_dep_flag not in common_utils.UNITTEST_ARGS
if no_onnx_dep_flag in common_utils.UNITTEST_ARGS:
common_utils.UNITTEST_ARGS.remove(no_onnx_dep_flag)
onnx_test_flag = "--produce-onnx-test-data"
_onnx_test = onnx_test_flag in common_utils.UNITTEST_ARGS
if onnx_test_flag in common_utils.UNITTEST_ARGS:
common_utils.UNITTEST_ARGS.remove(onnx_test_flag)
if _onnx_test:
_onnx_dep = True
import onnx_test_common
for d in glob.glob(
os.path.join(onnx_test_common.pytorch_operator_dir, "test_operator_*")
):
shutil.rmtree(d)
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_operators.py
|
# Owner(s): ["module: onnx"]
import unittest
import torch
from onnx_test_common import run_model_test
from torch.onnx import OperatorExportTypes
from torch.onnx._globals import GLOBALS
from torch.onnx.utils import _model_to_graph
class TestAutogradFuns(unittest.TestCase):
opset_version = GLOBALS.export_onnx_opset_version
keep_initializers_as_inputs = False
onnx_shape_inference = True
def test_single_output(self):
class SingleOut(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i.exp()
result = result.log()
ctx.save_for_backward(result)
return result
@staticmethod
def backward(ctx, grad_output):
(result,) = ctx.saved_tensors
return grad_output * result
class Caller(torch.nn.Module):
def forward(self, input):
result = input + 5
return SingleOut.apply(result) + 3
model = Caller()
input = torch.ones(1)
run_model_test(self, model, input_args=(input,))
def test_multi_output(self):
class MultiOut(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result_exp = i.exp()
result_log = result_exp.log()
ctx.save_for_backward(result_exp, result_log)
return result_exp, result_log
@staticmethod
def backward(ctx, grad_output):
(result,) = ctx.saved_tensors
return grad_output * result
class Caller(torch.nn.Module):
def forward(self, input):
return MultiOut.apply(input)
model = Caller()
input = torch.ones(1, 5)
run_model_test(self, model, input_args=(input,))
def test_partial_output(self):
class PartialOut(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
values, indices = torch.topk(input, 3)
return values
class Caller(torch.nn.Module):
def forward(self, input):
return PartialOut.apply(input)
model = Caller()
input = torch.ones(1, 5)
run_model_test(self, model, input_args=(input,))
def test_nested_autograd(self):
class Child(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i.log()
result_log = result.log()
ctx.save_for_backward(result_log)
return result_log
@staticmethod
def backward(ctx, grad_output):
(result,) = ctx.saved_tensors
return grad_output * result
class Parent(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result_exp = i.exp()
result_log = Child.apply(result_exp)
ctx.save_for_backward(result_exp, result_log)
return result_exp, result_log
@staticmethod
def backward(ctx, grad_output):
(result,) = ctx.saved_tensors
return grad_output * result
class Caller(torch.nn.Module):
def forward(self, input):
return Parent.apply(input)
model = Caller()
input = torch.ones(1, 5)
run_model_test(self, model, input_args=(input,))
# Run export in ONNX_FALLTHROUGH mode as torch.erf() is not supported
def test_aten_unsupported(self):
class Erf(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
erf_out = torch.special.erf(x)
ctx.save_for_backward(erf_out)
return erf_out
@staticmethod
def backward(ctx, grad_output):
result = ctx.saved_tensors
return torch.special.erfinv(result), None
class Caller(torch.nn.Module):
def forward(self, input):
return Erf.apply(input)
model = Caller()
input = torch.ones(1, 5)
# Test ONNX_FALLTHROUGH_MODE
graph, _, _ = _model_to_graph(
model,
(input,),
operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH,
)
iter = graph.nodes()
self.assertEqual(next(iter).kind(), "prim::PythonOp")
# Test ATEN_FALLBACK_MODE
graph, _, _ = _model_to_graph(
model,
(input,),
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
iter = graph.nodes()
self.assertEqual(next(iter).kind(), "prim::PythonOp")
def test_inline_and_symbolic(self):
class Exp(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
ctx.save_for_backward(input)
return i.exp()
@staticmethod
def symbolic(g, input):
return g.op("Exp", input)
class LogLog(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
ctx.save_for_backward(input)
return i.log().log()
class Caller(torch.nn.Module):
def forward(self, input):
exp_result = Exp.apply(input)
return LogLog.apply(exp_result)
model = Caller()
input = torch.ones(1)
run_model_test(self, model, input_args=(input,))
def test_inline_with_scoped_tracing(self):
class Exp(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
ctx.save_for_backward(input)
return i.exp()
@staticmethod
def symbolic(g, input):
return g.op("Exp", input)
class LogLog(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
ctx.save_for_backward(input)
return i.log().log()
class Caller(torch.nn.Module):
def forward(self, input):
exp_result = Exp.apply(input)
return LogLog.apply(exp_result)
model = Caller()
input = torch.ones(1)
torch.jit._trace._trace_module_map = {
_m: torch.typename(type(_m)) for _m in model.modules()
}
run_model_test(self, model, input_args=(input,))
torch.jit._trace._trace_module_map = None
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
test/onnx/test_autograd_funs.py
|
# Owner(s): ["module: onnx"]
import caffe2.python.onnx.backend as c2
import numpy as np
import onnx
import onnx_test_common
import torch
import torch.utils.cpp_extension
from test_pytorch_onnx_caffe2 import do_export
from torch.onnx import symbolic_helper
from torch.testing._internal import common_utils
class TestCustomOps(common_utils.TestCase):
def test_custom_add(self):
op_source = """
#include <torch/script.h>
torch::Tensor custom_add(torch::Tensor self, torch::Tensor other) {
return self + other;
}
static auto registry =
torch::RegisterOperators("custom_namespace::custom_add", &custom_add);
"""
torch.utils.cpp_extension.load_inline(
name="custom_add",
cpp_sources=op_source,
is_python_module=False,
verbose=True,
)
class CustomAddModel(torch.nn.Module):
def forward(self, a, b):
return torch.ops.custom_namespace.custom_add(a, b)
def symbolic_custom_add(g, self, other):
return g.op("Add", self, other)
from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic(
"custom_namespace::custom_add", symbolic_custom_add, 9
)
x = torch.randn(2, 3, 4, requires_grad=False)
y = torch.randn(2, 3, 4, requires_grad=False)
model = CustomAddModel()
onnxir, _ = do_export(model, (x, y), opset_version=11)
onnx_model = onnx.ModelProto.FromString(onnxir)
prepared = c2.prepare(onnx_model)
caffe2_out = prepared.run(inputs=[x.cpu().numpy(), y.cpu().numpy()])
np.testing.assert_array_equal(caffe2_out[0], model(x, y).cpu().numpy())
class TestCustomAutogradFunction(common_utils.TestCase):
opset_version = 9
keep_initializers_as_inputs = False
onnx_shape_inference = True
def test_symbolic(self):
class MyClip(torch.autograd.Function):
@staticmethod
def forward(ctx, input, scalar):
ctx.save_for_backward(input)
return input.clamp(min=scalar)
@staticmethod
def symbolic(g, input, scalar):
return g.op("Clip", input, min_f=scalar)
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.clip = MyClip.apply
def forward(self, x):
h = self.clip(x, 2)
return h
x = torch.randn(2, 3, 4, requires_grad=True)
model = MyModule()
onnx_test_common.run_model_test(self, model, input_args=(x,))
def test_register_custom_op(self):
class MyClip(torch.autograd.Function):
@staticmethod
def forward(ctx, input, scalar):
ctx.save_for_backward(input)
return input.clamp(min=scalar)
class MyRelu(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.clamp(min=0)
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.clip = MyClip.apply
self.relu = MyRelu.apply
def forward(self, x):
h = self.clip(x, 2)
h = self.relu(h)
return h
def symbolic_pythonop(ctx: torch.onnx.SymbolicContext, g, *args, **kwargs):
n = ctx.cur_node
name = kwargs["name"]
if name == "MyClip":
return g.op("Clip", args[0], min_f=args[1], outputs=n.outputsSize())
elif name == "MyRelu":
return g.op("Relu", args[0], outputs=n.outputsSize())
else:
return symbolic_helper._unimplemented(
"prim::PythonOp", "unknown node kind: " + name
)
from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic("prim::PythonOp", symbolic_pythonop, 1)
x = torch.randn(2, 3, 4, requires_grad=True)
model = MyModule()
onnx_test_common.run_model_test(self, model, input_args=(x,))
class TestExportAsContribOps(common_utils.TestCase):
opset_version = 14
keep_initializers_as_inputs = False
onnx_shape_inference = True
def test_contrib_op_with_loop(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.gelu = torch.nn.GELU(approximate="none")
def forward(self, x):
res = []
res2 = []
for i in range(x.size(0)):
if len(res) > 0:
res2.append(res[0])
else:
res2.append(self.gelu(x[0]))
res.append(x[0])
return torch.stack(res), torch.stack(res2)
def symbolic_custom_gelu(g, input, approximate):
return g.op("com.microsoft::Gelu", input).setType(input.type())
from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic("::gelu", symbolic_custom_gelu, 1)
x = torch.randn(3, 3, 4, requires_grad=True)
model = torch.jit.script(M())
onnx_test_common.run_model_test(self, model, input_args=(x,))
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_custom_ops.py
|
# Owner(s): ["module: onnx"]
import numpy as np
import torch
from pytorch_test_common import skipIfUnsupportedMinOpsetVersion
from torch.onnx import _constants, symbolic_helper
from torch.testing._internal import common_utils
def expect_tensor(scalar_type, shape=None):
def verify(actual_type):
np.testing.assert_equal(actual_type.scalarType(), scalar_type)
# if shape is not None:
# np.testing.assert_equal(actual_type.sizes(), shape)
if shape is not None:
np.testing.assert_equal(actual_type.varyingSizes(), shape)
return verify
class TestONNXShapeInference(common_utils.TestCase):
def setUp(self):
self.opset_version = _constants.onnx_main_opset
symbolic_helper._set_onnx_shape_inference(True)
symbolic_helper._set_opset_version(self.opset_version)
def run_test(self, g, n, type_assertion_funcs):
if not isinstance(type_assertion_funcs, list):
type_assertion_funcs = [type_assertion_funcs]
torch._C._jit_pass_onnx_graph_shape_type_inference(g, {}, self.opset_version)
for out, type_assertion_func in zip(n.outputs(), type_assertion_funcs):
type_assertion_func(out.type())
def create_empty_graph(self):
g = torch._C.Graph()
# kick off initialization for ConstantMap.
torch._C._jit_pass_onnx_graph_shape_type_inference(g, {}, self.opset_version)
return g
def insert_tensor_constant(self, g, tensor):
return g.op("Constant", value_t=tensor)
def test_cast(self):
# Test cast with input of unknown scalar type.
g = self.create_empty_graph()
input = g.addInput()
cast_out = g.op("Cast", input, to_i=1)
self.run_test(g, cast_out.node(), expect_tensor("Float"))
def test_constant_of_shape(self):
# Test ConstantOfShape with input of onnx::Shape node.
g = self.create_empty_graph()
constant = self.insert_tensor_constant(g, torch.ones(1, 2, 3, 4))
shape = g.op("Shape", constant)
constant_of_shape = g.op("ConstantOfShape", shape, value_t=torch.tensor([2.0]))
self.run_test(
g, constant_of_shape.node(), expect_tensor("Float", shape=(1, 2, 3, 4))
)
def test_constant_of_shape_static(self):
# Test ConstantOfShape with input of prim::ListConstruct of static tensor
rank = 4
g = self.create_empty_graph()
constants = [
self.insert_tensor_constant(g, torch.tensor(i + 1)) for i in range(rank)
]
shape = g.op("prim::ListConstruct", *constants)
shape.setType(torch._C.ListType.ofInts())
constant_of_shape = g.op("ConstantOfShape", shape, value_t=torch.tensor([2.0]))
self.run_test(
g, constant_of_shape.node(), expect_tensor("Float", shape=(1, 2, 3, 4))
)
def test_constant_of_shape_dynamic(self):
# Test ConstantOfShape with input of prim::ListConstruct of dynamic tensor
rank = 4
g = self.create_empty_graph()
inputs = [g.addInput() for i in range(rank)]
shape = g.op("prim::ListConstruct", *inputs)
shape.setType(torch._C.ListType.ofInts())
constant_of_shape = g.op("ConstantOfShape", shape, value_t=torch.tensor([2.0]))
self.run_test(
g,
constant_of_shape.node(),
expect_tensor("Float", shape=(None, None, None, None)),
)
def test_gather_dynamic_index(self):
g = self.create_empty_graph()
input = g.addInput()
input.setType(
input.type().with_dtype(torch.float).with_sizes([None, 3, 16, 16])
)
indices = g.addInput()
indices.setType(indices.type().with_dtype(torch.int64).with_sizes([None]))
output = g.op("Gather", input, indices, axis_i=1)
self.run_test(
g, output.node(), expect_tensor("Float", shape=([None, None, 16, 16]))
)
def test_gather_scalar_index(self):
g = self.create_empty_graph()
input = g.addInput()
input.setType(
input.type().with_dtype(torch.float).with_sizes([None, 3, 16, 16])
)
indices = self.insert_tensor_constant(g, torch.tensor(1))
output = g.op("Gather", input, indices, axis_i=1)
self.run_test(g, output.node(), expect_tensor("Float", shape=([None, 16, 16])))
def test_reshape(self):
g = self.create_empty_graph()
constant = self.insert_tensor_constant(g, torch.ones(2, 16, 5, 5))
constant_2 = self.insert_tensor_constant(g, torch.tensor([2, 0, -1]))
shape = g.op("Reshape", constant, constant_2)
self.run_test(g, shape.node(), expect_tensor("Float", shape=(2, 16, 25)))
g = self.create_empty_graph()
constant = self.insert_tensor_constant(g, torch.ones(2, 16, 5, 4))
constant_2 = self.insert_tensor_constant(g, torch.tensor([-1, 0, 4]))
shape = g.op("Reshape", constant, constant_2)
self.run_test(g, shape.node(), expect_tensor("Float", shape=(10, 16, 4)))
g = self.create_empty_graph()
constant = self.insert_tensor_constant(g, torch.ones(2, 16, 5, 4))
constant_2 = self.insert_tensor_constant(g, torch.tensor([-1, 0, 0]))
shape = g.op("Reshape", constant, constant_2)
self.run_test(g, shape.node(), expect_tensor("Float", shape=(8, 16, 5)))
def test_reshape_symbolic(self):
g = self.create_empty_graph()
input = g.addInput()
input.setType(input.type().with_sizes([None, None, 2, 8]))
constant = self.insert_tensor_constant(g, torch.tensor([0, 0, -1]))
output = g.op("Reshape", input, constant)
self.run_test(g, output.node(), expect_tensor(None, shape=(None, None, 16)))
@skipIfUnsupportedMinOpsetVersion(14)
def test_reshape_allowzero(self):
g = self.create_empty_graph()
input = g.addInput()
input.setType(input.type().with_sizes([3, 4, 0]))
constant = self.insert_tensor_constant(g, torch.tensor([0, 4, 3]))
output = g.op("Reshape", input, constant, allowzero_i=1)
self.run_test(g, output.node(), expect_tensor(None, shape=(0, 4, 3)))
def test_slice(self):
g = self.create_empty_graph()
input = g.addInput()
input.setType(input.type().with_sizes([None, None]))
start_input = g.addInput()
start_input.setType(start_input.type().with_sizes([None]))
end = self.insert_tensor_constant(g, torch.tensor([3]))
axis = self.insert_tensor_constant(g, torch.tensor([0]))
step = self.insert_tensor_constant(g, torch.tensor([1]))
slice = g.op("Slice", input, start_input, end, axis, step)
self.run_test(g, slice.node(), expect_tensor(None, shape=(None, None)))
def test_broadcast_matmul(self):
g = self.create_empty_graph()
constant = self.insert_tensor_constant(g, torch.ones(5, 1, 2))
constant_2 = self.insert_tensor_constant(g, torch.ones(3, 1, 2, 1))
shape = g.op("MatMul", constant, constant_2)
self.run_test(g, shape.node(), expect_tensor("Float", shape=(3, 5, 1, 1)))
# test when first input is of rank 1
g = self.create_empty_graph()
constant = self.insert_tensor_constant(g, torch.ones(2))
constant_2 = self.insert_tensor_constant(g, torch.ones(3, 1, 2, 1))
shape = g.op("MatMul", constant, constant_2)
self.run_test(g, shape.node(), expect_tensor("Float", shape=(3, 1, 1)))
# test when second input is of rank 1
g = self.create_empty_graph()
constant = self.insert_tensor_constant(g, torch.ones(5, 1, 2))
constant_2 = self.insert_tensor_constant(g, torch.ones(2))
shape = g.op("MatMul", constant, constant_2)
self.run_test(g, shape.node(), expect_tensor("Float", shape=(5, 1)))
# test when both inputs are of rank 1
g = self.create_empty_graph()
constant = self.insert_tensor_constant(g, torch.ones(2))
constant_2 = self.insert_tensor_constant(g, torch.ones(2))
shape = g.op("MatMul", constant, constant_2)
self.run_test(g, shape.node(), expect_tensor("Float", shape=()))
def test_expand(self):
g = self.create_empty_graph()
input = g.addInput()
constant = self.insert_tensor_constant(g, torch.ones(2, 4))
input.setType(constant.type().with_sizes([None, None]))
shape = g.op("Shape", input)
expand = g.op("Expand", constant, shape)
self.run_test(g, expand.node(), expect_tensor("Float", shape=(None, None)))
def test_pad(self):
g = self.create_empty_graph()
input = g.addInput()
input.setType(input.type().with_dtype(torch.float).with_sizes([3, 320, 100]))
constant = self.insert_tensor_constant(g, torch.ones(6, dtype=torch.long))
none = g.op("prim::Constant").setType(torch.NoneType.get())
pad = g.op("Pad", input, constant, none, mode_s="constant")
self.run_test(g, pad.node(), expect_tensor("Float", shape=(5, 322, 102)))
def test_pad_with_dynamic_input_shape(self):
g = self.create_empty_graph()
input = g.addInput()
input.setType(input.type().with_dtype(torch.float).with_sizes([3, None, None]))
constant = self.insert_tensor_constant(g, torch.ones(6, dtype=torch.long))
none = g.op("prim::Constant").setType(torch.NoneType.get())
pad = g.op("Pad", input, constant, none, mode_s="constant")
self.run_test(g, pad.node(), expect_tensor("Float", shape=(5, None, None)))
def test_pad_with_dynamic_pad_size(self):
g = self.create_empty_graph()
input = g.addInput()
input.setType(input.type().with_dtype(torch.float).with_sizes([3, 320, 100]))
pad_size = g.addInput()
pad_size.setType(pad_size.type().with_dtype(torch.long).with_sizes([6]))
none = g.op("prim::Constant").setType(torch.NoneType.get())
pad = g.op("Pad", input, pad_size, none, mode_s="constant")
self.run_test(g, pad.node(), expect_tensor("Float", shape=(None, None, None)))
def test_resize(self):
g = self.create_empty_graph()
input = g.addInput()
input.setType(input.type().with_dtype(torch.float).with_sizes([4, 32, 64, 64]))
none = g.op("prim::Constant").setType(torch.NoneType.get())
scales = self.insert_tensor_constant(
g, torch.tensor([1, 1, 2, 2], dtype=torch.float)
)
resize = g.op(
"Resize",
input,
none,
scales,
coordinate_transformation_mode_s="align_corners",
cubic_coeff_a_f=-0.75,
mode_s="linear",
nearest_mode_s="floor",
)
self.run_test(g, resize.node(), expect_tensor("Float", shape=(4, 32, 128, 128)))
def test_resize_after_concat(self):
g = self.create_empty_graph()
input = g.addInput()
input.setType(input.type().with_dtype(torch.float).with_sizes([4, 32, 64, 64]))
none = g.op("prim::Constant").setType(torch.NoneType.get())
scale_1 = self.insert_tensor_constant(
g, torch.tensor([1, 1], dtype=torch.float)
)
scale_2 = self.insert_tensor_constant(
g, torch.tensor([2, 2], dtype=torch.float)
)
# `scales` values should be statically known due to constant folding in shape inference.
scales = g.op("Concat", scale_1, scale_2, axis_i=0)
resize = g.op(
"Resize",
input,
none,
scales,
coordinate_transformation_mode_s="align_corners",
cubic_coeff_a_f=-0.75,
mode_s="linear",
nearest_mode_s="floor",
)
self.run_test(g, resize.node(), expect_tensor("Float", shape=(4, 32, 128, 128)))
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_pytorch_onnx_shape_inference.py
|
# Owner(s): ["module: onnx"]
import functools
import os
import sys
import unittest
import torch
from torch.autograd import function
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(-1, pytorch_test_dir)
torch.set_default_tensor_type("torch.FloatTensor")
BATCH_SIZE = 2
RNN_BATCH_SIZE = 7
RNN_SEQUENCE_LENGTH = 11
RNN_INPUT_SIZE = 5
RNN_HIDDEN_SIZE = 3
def _skipper(condition, reason):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return f(*args, **kwargs)
return wrapper
return decorator
skipIfNoCuda = _skipper(lambda: not torch.cuda.is_available(), "CUDA is not available")
skipIfTravis = _skipper(lambda: os.getenv("TRAVIS"), "Skip In Travis")
skipIfNoBFloat16Cuda = _skipper(
lambda: not torch.cuda.is_bf16_supported(), "BFloat16 CUDA is not available"
)
# skips tests for all versions below min_opset_version.
# if exporting the op is only supported after a specific version,
# add this wrapper to prevent running the test for opset_versions
# smaller than the currently tested opset_version
def skipIfUnsupportedMinOpsetVersion(min_opset_version):
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.opset_version < min_opset_version:
raise unittest.SkipTest(
f"Unsupported opset_version: {self.opset_version} < {min_opset_version}"
)
return func(self, *args, **kwargs)
return wrapper
return skip_dec
# skips tests for all versions above max_opset_version.
def skipIfUnsupportedMaxOpsetVersion(max_opset_version):
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.opset_version > max_opset_version:
raise unittest.SkipTest(
f"Unsupported opset_version: {self.opset_version} > {max_opset_version}"
)
return func(self, *args, **kwargs)
return wrapper
return skip_dec
# skips tests for all opset versions.
def skipForAllOpsetVersions():
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.opset_version:
raise unittest.SkipTest(
"Skip verify test for unsupported opset_version"
)
return func(self, *args, **kwargs)
return wrapper
return skip_dec
def skipTraceTest(min_opset_version=float("inf")):
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.is_trace_test_enabled = self.opset_version >= min_opset_version
if not self.is_trace_test_enabled and not self.is_script:
raise unittest.SkipTest("Skip verify test for torch trace")
return func(self, *args, **kwargs)
return wrapper
return skip_dec
def skipScriptTest(min_opset_version=float("inf")):
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.is_script_test_enabled = self.opset_version >= min_opset_version
if not self.is_script_test_enabled and self.is_script:
raise unittest.SkipTest("Skip verify test for TorchScript")
return func(self, *args, **kwargs)
return wrapper
return skip_dec
# skips tests for opset_versions listed in unsupported_opset_versions.
# if the caffe2 test cannot be run for a specific version, add this wrapper
# (for example, an op was modified but the change is not supported in caffe2)
def skipIfUnsupportedOpsetVersion(unsupported_opset_versions):
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.opset_version in unsupported_opset_versions:
raise unittest.SkipTest(
"Skip verify test for unsupported opset_version"
)
return func(self, *args, **kwargs)
return wrapper
return skip_dec
def skipShapeChecking(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.check_shape = False
return func(self, *args, **kwargs)
return wrapper
def skipDtypeChecking(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.check_dtype = False
return func(self, *args, **kwargs)
return wrapper
def flatten(x):
return tuple(function._iter_filter(lambda o: isinstance(o, torch.Tensor))(x))
|
pytorch-master
|
test/onnx/pytorch_test_common.py
|
# Owner(s): ["module: onnx"]
import os
import unittest
import onnx_test_common
import parameterized
import PIL
import torch
import torchvision
from torch import nn
def _get_test_image_tensor():
data_dir = os.path.join(os.path.dirname(__file__), "assets")
img_path = os.path.join(data_dir, "grace_hopper_517x606.jpg")
input_image = PIL.Image.open(img_path)
# Based on example from https://pytorch.org/hub/pytorch_vision_resnet/
preprocess = torchvision.transforms.Compose(
[
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
return preprocess(input_image).unsqueeze(0)
# Due to precision error from quantization, check only that the top prediction matches.
class _TopPredictor(nn.Module):
def __init__(self, base_model):
super().__init__()
self.base_model = base_model
def forward(self, x):
x = self.base_model(x)
_, topk_id = torch.topk(x[0], 1)
return topk_id
# TODO: All torchvision quantized model test can be written as single parameterized test case,
# after per-parameter test decoration is supported via #79979, or after they are all enabled,
# whichever is first.
@parameterized.parameterized_class(
("is_script",),
[(True,), (False,)],
class_name_func=onnx_test_common.parameterize_class_name,
)
class TestQuantizedModelsONNXRuntime(onnx_test_common._TestONNXRuntime):
def run_test(self, model, inputs, *args, **kwargs):
model = _TopPredictor(model)
return super().run_test(model, inputs, *args, **kwargs)
def test_mobilenet_v3(self):
model = torchvision.models.quantization.mobilenet_v3_large(
pretrained=True, quantize=True
)
self.run_test(model, _get_test_image_tensor())
@unittest.skip("quantized::cat not supported")
def test_inception_v3(self):
model = torchvision.models.quantization.inception_v3(
pretrained=True, quantize=True
)
self.run_test(model, _get_test_image_tensor())
@unittest.skip("quantized::cat not supported")
def test_googlenet(self):
model = torchvision.models.quantization.googlenet(
pretrained=True, quantize=True
)
self.run_test(model, _get_test_image_tensor())
@unittest.skip("quantized::cat not supported")
def test_shufflenet_v2_x0_5(self):
model = torchvision.models.quantization.shufflenet_v2_x0_5(
pretrained=True, quantize=True
)
self.run_test(model, _get_test_image_tensor())
def test_resnet18(self):
model = torchvision.models.quantization.resnet18(pretrained=True, quantize=True)
self.run_test(model, _get_test_image_tensor())
def test_resnet50(self):
model = torchvision.models.quantization.resnet50(pretrained=True, quantize=True)
self.run_test(model, _get_test_image_tensor())
def test_resnext101_32x8d(self):
model = torchvision.models.quantization.resnext101_32x8d(
pretrained=True, quantize=True
)
self.run_test(model, _get_test_image_tensor())
|
pytorch-master
|
test/onnx/test_models_quantized_onnxruntime.py
|
import io
import os
import shutil
import traceback
import onnx
import onnx_test_common
import torch
from onnx import numpy_helper
from test_nn import new_module_tests
from torch.autograd import Variable
from torch.testing._internal.common_nn import module_tests
# Take a test case (a dict) as input, return the test name.
def get_test_name(testcase):
if "fullname" in testcase:
return "test_" + testcase["fullname"]
test_name = "test_" + testcase["constructor"].__name__
if "desc" in testcase:
test_name += "_" + testcase["desc"]
return test_name
# Take a test case (a dict) as input, return the input for the module.
def gen_input(testcase):
if "input_size" in testcase:
if (
testcase["input_size"] == ()
and "desc" in testcase
and testcase["desc"][-6:] == "scalar"
):
testcase["input_size"] = (1,)
return Variable(torch.randn(*testcase["input_size"]))
elif "input_fn" in testcase:
input = testcase["input_fn"]()
if isinstance(input, Variable):
return input
return Variable(testcase["input_fn"]())
def gen_module(testcase):
if "constructor_args" in testcase:
args = testcase["constructor_args"]
module = testcase["constructor"](*args)
module.train(False)
return module
module = testcase["constructor"]()
module.train(False)
return module
def print_stats(FunctionalModule_nums, nn_module):
print(f"{FunctionalModule_nums} functional modules detected.")
supported = []
unsupported = []
not_fully_supported = []
for key, value in nn_module.items():
if value == 1:
supported.append(key)
elif value == 2:
unsupported.append(key)
elif value == 3:
not_fully_supported.append(key)
def fun(info, l):
print(info)
for v in l:
print(v)
# Fully Supported Ops: All related test cases of these ops have been exported
# Semi-Supported Ops: Part of related test cases of these ops have been exported
# Unsupported Ops: None of related test cases of these ops have been exported
for info, l in [
[f"{len(supported)} Fully Supported Operators:", supported],
[
f"{len(not_fully_supported)} Semi-Supported Operators:",
not_fully_supported,
],
[f"{len(unsupported)} Unsupported Operators:", unsupported],
]:
fun(info, l)
def convert_tests(testcases, sets=1):
print(f"Collect {len(testcases)} test cases from PyTorch.")
failed = 0
FunctionalModule_nums = 0
nn_module = {}
for t in testcases:
test_name = get_test_name(t)
module = gen_module(t)
module_name = str(module).split("(")[0]
if module_name == "FunctionalModule":
FunctionalModule_nums += 1
else:
if module_name not in nn_module:
nn_module[module_name] = 0
try:
input = gen_input(t)
f = io.BytesIO()
torch.onnx._export(
module,
input,
f,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
onnx_model = onnx.load_from_string(f.getvalue())
onnx.checker.check_model(onnx_model)
onnx.helper.strip_doc_string(onnx_model)
output_dir = os.path.join(onnx_test_common.pytorch_converted_dir, test_name)
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
with open(os.path.join(output_dir, "model.onnx"), "wb") as file:
file.write(onnx_model.SerializeToString())
for i in range(sets):
output = module(input)
data_dir = os.path.join(output_dir, f"test_data_set_{i}")
os.makedirs(data_dir)
for index, var in enumerate([input]):
tensor = numpy_helper.from_array(var.data.numpy())
with open(
os.path.join(data_dir, f"input_{index}.pb"), "wb"
) as file:
file.write(tensor.SerializeToString())
for index, var in enumerate([output]):
tensor = numpy_helper.from_array(var.data.numpy())
with open(
os.path.join(data_dir, f"output_{index}.pb"), "wb"
) as file:
file.write(tensor.SerializeToString())
input = gen_input(t)
if module_name != "FunctionalModule":
nn_module[module_name] |= 1
except: # noqa: E722,B001
traceback.print_exc()
if module_name != "FunctionalModule":
nn_module[module_name] |= 2
failed += 1
print(
"Collect {} test cases from PyTorch repo, failed to export {} cases.".format(
len(testcases), failed
)
)
print(
"PyTorch converted cases are stored in {}.".format(
onnx_test_common.pytorch_converted_dir
)
)
print_stats(FunctionalModule_nums, nn_module)
if __name__ == "__main__":
testcases = module_tests + new_module_tests
convert_tests(testcases)
|
pytorch-master
|
test/onnx/export_onnx_tests_generator.py
|
# Owner(s): ["module: onnx"]
import os
import unittest
from collections import OrderedDict
from typing import List, Mapping, Tuple
import onnx_test_common
import parameterized
import PIL
import test_models
import torch
import torchvision
from pytorch_test_common import skipIfUnsupportedMinOpsetVersion, skipScriptTest
from torch import nn
from torch.testing._internal import common_utils
from torchvision import ops
from torchvision.models.detection import (
faster_rcnn,
image_list,
keypoint_rcnn,
mask_rcnn,
roi_heads,
rpn,
transform,
)
def exportTest(
self,
model,
inputs,
rtol=1e-2,
atol=1e-7,
opset_versions=None,
acceptable_error_percentage=None,
):
opset_versions = opset_versions if opset_versions else [7, 8, 9, 10, 11, 12, 13, 14]
for opset_version in opset_versions:
self.opset_version = opset_version
self.onnx_shape_inference = True
onnx_test_common.run_model_test(
self,
model,
input_args=inputs,
rtol=rtol,
atol=atol,
acceptable_error_percentage=acceptable_error_percentage,
)
if self.is_script_test_enabled and opset_version > 11:
script_model = torch.jit.script(model)
onnx_test_common.run_model_test(
self,
script_model,
input_args=inputs,
rtol=rtol,
atol=atol,
acceptable_error_percentage=acceptable_error_percentage,
)
TestModels = type(
"TestModels",
(common_utils.TestCase,),
dict(
test_models.TestModels.__dict__,
is_script_test_enabled=False,
is_script=False,
exportTest=exportTest,
),
)
# model tests for scripting with new JIT APIs and shape inference
TestModels_new_jit_API = type(
"TestModels_new_jit_API",
(common_utils.TestCase,),
dict(
TestModels.__dict__,
exportTest=exportTest,
is_script_test_enabled=True,
is_script=True,
onnx_shape_inference=True,
),
)
def _get_image(rel_path: str, size: Tuple[int, int]) -> torch.Tensor:
data_dir = os.path.join(os.path.dirname(__file__), "assets")
path = os.path.join(data_dir, *rel_path.split("/"))
image = PIL.Image.open(path).convert("RGB").resize(size, PIL.Image.BILINEAR)
return torchvision.transforms.ToTensor()(image)
def _get_test_images() -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
return (
[_get_image("grace_hopper_517x606.jpg", (100, 320))],
[_get_image("rgb_pytorch.png", (250, 380))],
)
def _get_features(images):
s0, s1 = images.shape[-2:]
features = [
("0", torch.rand(2, 256, s0 // 4, s1 // 4)),
("1", torch.rand(2, 256, s0 // 8, s1 // 8)),
("2", torch.rand(2, 256, s0 // 16, s1 // 16)),
("3", torch.rand(2, 256, s0 // 32, s1 // 32)),
("4", torch.rand(2, 256, s0 // 64, s1 // 64)),
]
features = OrderedDict(features)
return features
def _init_test_generalized_rcnn_transform():
min_size = 100
max_size = 200
image_mean = [0.485, 0.456, 0.406]
image_std = [0.229, 0.224, 0.225]
return transform.GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
def _init_test_rpn():
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = rpn.AnchorGenerator(anchor_sizes, aspect_ratios)
out_channels = 256
rpn_head = rpn.RPNHead(
out_channels, rpn_anchor_generator.num_anchors_per_location()[0]
)
rpn_fg_iou_thresh = 0.7
rpn_bg_iou_thresh = 0.3
rpn_batch_size_per_image = 256
rpn_positive_fraction = 0.5
rpn_pre_nms_top_n = dict(training=2000, testing=1000)
rpn_post_nms_top_n = dict(training=2000, testing=1000)
rpn_nms_thresh = 0.7
rpn_score_thresh = 0.0
return rpn.RegionProposalNetwork(
rpn_anchor_generator,
rpn_head,
rpn_fg_iou_thresh,
rpn_bg_iou_thresh,
rpn_batch_size_per_image,
rpn_positive_fraction,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_nms_thresh,
score_thresh=rpn_score_thresh,
)
def _init_test_roi_heads_faster_rcnn():
out_channels = 256
num_classes = 91
box_fg_iou_thresh = 0.5
box_bg_iou_thresh = 0.5
box_batch_size_per_image = 512
box_positive_fraction = 0.25
bbox_reg_weights = None
box_score_thresh = 0.05
box_nms_thresh = 0.5
box_detections_per_img = 100
box_roi_pool = ops.MultiScaleRoIAlign(
featmap_names=["0", "1", "2", "3"], output_size=7, sampling_ratio=2
)
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = faster_rcnn.TwoMLPHead(
out_channels * resolution**2, representation_size
)
representation_size = 1024
box_predictor = faster_rcnn.FastRCNNPredictor(representation_size, num_classes)
return roi_heads.RoIHeads(
box_roi_pool,
box_head,
box_predictor,
box_fg_iou_thresh,
box_bg_iou_thresh,
box_batch_size_per_image,
box_positive_fraction,
bbox_reg_weights,
box_score_thresh,
box_nms_thresh,
box_detections_per_img,
)
@parameterized.parameterized_class(
("is_script",),
[(True,), (False,)],
class_name_func=onnx_test_common.parameterize_class_name,
)
class TestModelsONNXRuntime(onnx_test_common._TestONNXRuntime):
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest() # Faster RCNN model is not scriptable
def test_faster_rcnn(self):
model = faster_rcnn.fasterrcnn_resnet50_fpn(
pretrained=False, pretrained_backbone=True, min_size=200, max_size=300
)
model.eval()
x1 = torch.randn(3, 200, 300, requires_grad=True)
x2 = torch.randn(3, 200, 300, requires_grad=True)
self.run_test(model, ([x1, x2],), rtol=1e-3, atol=1e-5)
self.run_test(
model,
([x1, x2],),
input_names=["images_tensors"],
output_names=["outputs"],
dynamic_axes={"images_tensors": [0, 1, 2], "outputs": [0, 1, 2]},
rtol=1e-3,
atol=1e-5,
)
dummy_image = [torch.ones(3, 100, 100) * 0.3]
images, test_images = _get_test_images()
self.run_test(
model,
(images,),
additional_test_inputs=[(images,), (test_images,), (dummy_image,)],
input_names=["images_tensors"],
output_names=["outputs"],
dynamic_axes={"images_tensors": [0, 1, 2], "outputs": [0, 1, 2]},
rtol=1e-3,
atol=1e-5,
)
self.run_test(
model,
(dummy_image,),
additional_test_inputs=[(dummy_image,), (images,)],
input_names=["images_tensors"],
output_names=["outputs"],
dynamic_axes={"images_tensors": [0, 1, 2], "outputs": [0, 1, 2]},
rtol=1e-3,
atol=1e-5,
)
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_mask_rcnn(self):
model = mask_rcnn.maskrcnn_resnet50_fpn(
pretrained=False, pretrained_backbone=True, min_size=200, max_size=300
)
images, test_images = _get_test_images()
self.run_test(model, (images,), rtol=1e-3, atol=1e-5)
self.run_test(
model,
(images,),
input_names=["images_tensors"],
output_names=["boxes", "labels", "scores", "masks"],
dynamic_axes={
"images_tensors": [0, 1, 2],
"boxes": [0, 1],
"labels": [0],
"scores": [0],
"masks": [0, 1, 2],
},
rtol=1e-3,
atol=1e-5,
)
dummy_image = [torch.ones(3, 100, 100) * 0.3]
self.run_test(
model,
(images,),
additional_test_inputs=[(images,), (test_images,), (dummy_image,)],
input_names=["images_tensors"],
output_names=["boxes", "labels", "scores", "masks"],
dynamic_axes={
"images_tensors": [0, 1, 2],
"boxes": [0, 1],
"labels": [0],
"scores": [0],
"masks": [0, 1, 2],
},
rtol=1e-3,
atol=1e-5,
)
self.run_test(
model,
(dummy_image,),
additional_test_inputs=[(dummy_image,), (images,)],
input_names=["images_tensors"],
output_names=["boxes", "labels", "scores", "masks"],
dynamic_axes={
"images_tensors": [0, 1, 2],
"boxes": [0, 1],
"labels": [0],
"scores": [0],
"masks": [0, 1, 2],
},
rtol=1e-3,
atol=1e-5,
)
@unittest.skip("Failing, see https://github.com/pytorch/pytorch/issues/66528")
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_keypoint_rcnn(self):
model = keypoint_rcnn.keypointrcnn_resnet50_fpn(
pretrained=False, pretrained_backbone=False, min_size=200, max_size=300
)
images, test_images = _get_test_images()
self.run_test(model, (images,), rtol=1e-3, atol=1e-5)
self.run_test(
model,
(images,),
input_names=["images_tensors"],
output_names=["outputs1", "outputs2", "outputs3", "outputs4"],
dynamic_axes={"images_tensors": [0, 1, 2]},
rtol=1e-3,
atol=1e-5,
)
dummy_images = [torch.ones(3, 100, 100) * 0.3]
self.run_test(
model,
(images,),
additional_test_inputs=[(images,), (test_images,), (dummy_images,)],
input_names=["images_tensors"],
output_names=["outputs1", "outputs2", "outputs3", "outputs4"],
dynamic_axes={"images_tensors": [0, 1, 2]},
rtol=5e-3,
atol=1e-5,
)
self.run_test(
model,
(dummy_images,),
additional_test_inputs=[(dummy_images,), (test_images,)],
input_names=["images_tensors"],
output_names=["outputs1", "outputs2", "outputs3", "outputs4"],
dynamic_axes={"images_tensors": [0, 1, 2]},
rtol=5e-3,
atol=1e-5,
)
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_roi_heads(self):
class RoIHeadsModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.transform = _init_test_generalized_rcnn_transform()
self.rpn = _init_test_rpn()
self.roi_heads = _init_test_roi_heads_faster_rcnn()
def forward(self, images, features: Mapping[str, torch.Tensor]):
original_image_sizes = [
(img.shape[-1], img.shape[-2]) for img in images
]
images_m = image_list.ImageList(
images, [(i.shape[-1], i.shape[-2]) for i in images]
)
proposals, _ = self.rpn(images_m, features)
detections, _ = self.roi_heads(
features, proposals, images_m.image_sizes
)
detections = self.transform.postprocess(
detections, images_m.image_sizes, original_image_sizes
)
return detections
images = torch.rand(2, 3, 100, 100)
features = _get_features(images)
images2 = torch.rand(2, 3, 150, 150)
test_features = _get_features(images2)
model = RoIHeadsModule()
model.eval()
model(images, features)
self.run_test(
model,
(images, features),
input_names=["input1", "input2", "input3", "input4", "input5", "input6"],
dynamic_axes={
"input1": [0, 1, 2, 3],
"input2": [0, 1, 2, 3],
"input3": [0, 1, 2, 3],
"input4": [0, 1, 2, 3],
"input5": [0, 1, 2, 3],
"input6": [0, 1, 2, 3],
},
additional_test_inputs=[(images, features), (images2, test_features)],
)
@skipScriptTest() # TODO: #75625
def test_transformer_encoder(self):
class MyModule(torch.nn.Module):
def __init__(self, ninp, nhead, nhid, dropout, nlayers):
super().__init__()
encoder_layers = nn.TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = nn.TransformerEncoder(
encoder_layers, nlayers
)
def forward(self, input):
return self.transformer_encoder(input)
x = torch.rand(10, 32, 512)
self.run_test(MyModule(512, 8, 2048, 0.0, 3), (x,), atol=1e-5)
@skipScriptTest()
def test_mobilenet_v3(self):
model = torchvision.models.quantization.mobilenet_v3_large(pretrained=False)
dummy_input = torch.randn(1, 3, 224, 224)
self.run_test(model, (dummy_input,))
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_shufflenet_v2_dynamic_axes(self):
model = torchvision.models.shufflenet_v2_x0_5(pretrained=False)
dummy_input = torch.randn(1, 3, 224, 224, requires_grad=True)
test_inputs = torch.randn(3, 3, 224, 224, requires_grad=True)
self.run_test(
model,
(dummy_input,),
additional_test_inputs=[(dummy_input,), (test_inputs,)],
input_names=["input_images"],
output_names=["outputs"],
dynamic_axes={
"input_images": {0: "batch_size"},
"output": {0: "batch_size"},
},
rtol=1e-3,
atol=1e-5,
)
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_models_onnxruntime.py
|
# Owner(s): ["module: onnx"]
import unittest
import torch
from model_defs.dcgan import _netD, _netG, bsz, imgsz, nz, weights_init
from model_defs.emb_seq import EmbeddingNetwork1, EmbeddingNetwork2
from model_defs.mnist import MNIST
from model_defs.op_test import ConcatNet, DummyNet, FakeQuantNet, PermuteNet, PReluNet
from model_defs.squeezenet import SqueezeNet
from model_defs.srresnet import SRResNet
from model_defs.super_resolution import SuperResolutionNet
from pytorch_test_common import skipIfUnsupportedMinOpsetVersion, skipScriptTest
from torch import quantization
from torch.autograd import Variable
from torch.onnx import OperatorExportTypes
from torch.testing._internal import common_utils
from torch.testing._internal.common_utils import skipIfNoLapack
from torchvision.models import shufflenet_v2_x1_0
from torchvision.models.alexnet import alexnet
from torchvision.models.densenet import densenet121
from torchvision.models.googlenet import googlenet
from torchvision.models.inception import inception_v3
from torchvision.models.mnasnet import mnasnet1_0
from torchvision.models.mobilenet import mobilenet_v2
from torchvision.models.resnet import resnet50
from torchvision.models.segmentation import deeplabv3_resnet101, fcn_resnet101
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
from torchvision.models.video import mc3_18, r2plus1d_18, r3d_18
from verify import verify
if torch.cuda.is_available():
def toC(x):
return x.cuda()
else:
def toC(x):
return x
BATCH_SIZE = 2
class TestModels(common_utils.TestCase):
opset_version = 9 # Caffe2 doesn't support the default.
keep_initializers_as_inputs = False
def exportTest(self, model, inputs, rtol=1e-2, atol=1e-7, **kwargs):
import caffe2.python.onnx.backend as backend
with torch.onnx.select_model_mode_for_export(
model, torch.onnx.TrainingMode.EVAL
):
graph = torch.onnx.utils._trace(model, inputs, OperatorExportTypes.ONNX)
torch._C._jit_pass_lint(graph)
verify(
model,
inputs,
backend,
rtol=rtol,
atol=atol,
opset_version=self.opset_version,
)
def test_ops(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(DummyNet()), toC(x))
def test_prelu(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(PReluNet(), x)
@skipScriptTest()
def test_concat(self):
input_a = Variable(torch.randn(BATCH_SIZE, 3))
input_b = Variable(torch.randn(BATCH_SIZE, 3))
inputs = ((toC(input_a), toC(input_b)),)
self.exportTest(toC(ConcatNet()), inputs)
def test_permute(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 10, 12))
self.exportTest(PermuteNet(), x)
@skipScriptTest()
def test_embedding_sequential_1(self):
x = Variable(torch.randint(0, 10, (BATCH_SIZE, 3)))
self.exportTest(EmbeddingNetwork1(), x)
@skipScriptTest()
def test_embedding_sequential_2(self):
x = Variable(torch.randint(0, 10, (BATCH_SIZE, 3)))
self.exportTest(EmbeddingNetwork2(), x)
@unittest.skip("This model takes too much memory")
def test_srresnet(self):
x = Variable(torch.randn(1, 3, 224, 224).fill_(1.0))
self.exportTest(
toC(SRResNet(rescale_factor=4, n_filters=64, n_blocks=8)), toC(x)
)
@skipIfNoLapack
def test_super_resolution(self):
x = Variable(torch.randn(BATCH_SIZE, 1, 224, 224).fill_(1.0))
self.exportTest(toC(SuperResolutionNet(upscale_factor=3)), toC(x), atol=1e-6)
def test_alexnet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(alexnet()), toC(x))
def test_mnist(self):
x = Variable(torch.randn(BATCH_SIZE, 1, 28, 28).fill_(1.0))
self.exportTest(toC(MNIST()), toC(x))
@unittest.skip("This model takes too much memory")
def test_vgg16(self):
# VGG 16-layer model (configuration "D")
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(vgg16()), toC(x))
@unittest.skip("This model takes too much memory")
def test_vgg16_bn(self):
# VGG 16-layer model (configuration "D") with batch normalization
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(vgg16_bn()), toC(x))
@unittest.skip("This model takes too much memory")
def test_vgg19(self):
# VGG 19-layer model (configuration "E")
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(vgg19()), toC(x))
@unittest.skip("This model takes too much memory")
def test_vgg19_bn(self):
# VGG 19-layer model (configuration "E") with batch normalization
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(vgg19_bn()), toC(x))
def test_resnet(self):
# ResNet50 model
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(resnet50()), toC(x), atol=1e-6)
@skipScriptTest(min_opset_version=15) # None type in outputs
# This test is numerically unstable. Sporadic single element mismatch occurs occasionally.
def test_inception(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 299, 299))
self.exportTest(toC(inception_v3()), toC(x), acceptable_error_percentage=0.01)
def test_squeezenet(self):
# SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and
# <0.5MB model size
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
sqnet_v1_0 = SqueezeNet(version=1.1)
self.exportTest(toC(sqnet_v1_0), toC(x))
# SqueezeNet 1.1 has 2.4x less computation and slightly fewer params
# than SqueezeNet 1.0, without sacrificing accuracy.
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
sqnet_v1_1 = SqueezeNet(version=1.1)
self.exportTest(toC(sqnet_v1_1), toC(x))
def test_densenet(self):
# Densenet-121 model
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(densenet121()), toC(x), rtol=1e-2, atol=1e-5)
@skipScriptTest()
def test_dcgan_netD(self):
netD = _netD(1)
netD.apply(weights_init)
input = Variable(torch.empty(bsz, 3, imgsz, imgsz).normal_(0, 1))
self.exportTest(toC(netD), toC(input))
@skipScriptTest()
def test_dcgan_netG(self):
netG = _netG(1)
netG.apply(weights_init)
input = Variable(torch.empty(bsz, nz, 1, 1).normal_(0, 1))
self.exportTest(toC(netG), toC(input))
@skipIfUnsupportedMinOpsetVersion(10)
def test_fake_quant(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(FakeQuantNet()), toC(x))
@skipIfUnsupportedMinOpsetVersion(10)
def test_qat_resnet_pertensor(self):
# Quantize ResNet50 model
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
qat_resnet50 = resnet50()
# Use per tensor for weight. Per channel support will come with opset 13
qat_resnet50.qconfig = quantization.QConfig(
activation=quantization.default_fake_quant,
weight=quantization.default_fake_quant,
)
quantization.prepare_qat(qat_resnet50, inplace=True)
qat_resnet50.apply(torch.ao.quantization.enable_observer)
qat_resnet50.apply(torch.ao.quantization.enable_fake_quant)
_ = qat_resnet50(x)
for module in qat_resnet50.modules():
if isinstance(module, quantization.FakeQuantize):
module.calculate_qparams()
qat_resnet50.apply(torch.ao.quantization.disable_observer)
self.exportTest(toC(qat_resnet50), toC(x))
@skipIfUnsupportedMinOpsetVersion(13)
def test_qat_resnet_per_channel(self):
# Quantize ResNet50 model
x = torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0)
qat_resnet50 = resnet50()
qat_resnet50.qconfig = quantization.QConfig(
activation=quantization.default_fake_quant,
weight=quantization.default_per_channel_weight_fake_quant,
)
quantization.prepare_qat(qat_resnet50, inplace=True)
qat_resnet50.apply(torch.ao.quantization.enable_observer)
qat_resnet50.apply(torch.ao.quantization.enable_fake_quant)
_ = qat_resnet50(x)
for module in qat_resnet50.modules():
if isinstance(module, quantization.FakeQuantize):
module.calculate_qparams()
qat_resnet50.apply(torch.ao.quantization.disable_observer)
self.exportTest(toC(qat_resnet50), toC(x))
@skipScriptTest(min_opset_version=15) # None type in outputs
def test_googlenet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(googlenet()), toC(x), rtol=1e-3, atol=1e-5)
def test_mnasnet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(mnasnet1_0()), toC(x), rtol=1e-3, atol=1e-5)
def test_mobilenet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(mobilenet_v2()), toC(x), rtol=1e-3, atol=1e-5)
@skipScriptTest() # prim_data
def test_shufflenet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(shufflenet_v2_x1_0()), toC(x), rtol=1e-3, atol=1e-5)
@skipIfUnsupportedMinOpsetVersion(11)
def test_fcn(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(
toC(fcn_resnet101(pretrained=False, pretrained_backbone=False)),
toC(x),
rtol=1e-3,
atol=1e-5,
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_deeplab(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(
toC(deeplabv3_resnet101(pretrained=False, pretrained_backbone=False)),
toC(x),
rtol=1e-3,
atol=1e-5,
)
def test_r3d_18_video(self):
x = Variable(torch.randn(1, 3, 4, 112, 112).fill_(1.0))
self.exportTest(toC(r3d_18()), toC(x), rtol=1e-3, atol=1e-5)
def test_mc3_18_video(self):
x = Variable(torch.randn(1, 3, 4, 112, 112).fill_(1.0))
self.exportTest(toC(mc3_18()), toC(x), rtol=1e-3, atol=1e-5)
def test_r2plus1d_18_video(self):
x = Variable(torch.randn(1, 3, 4, 112, 112).fill_(1.0))
self.exportTest(toC(r2plus1d_18()), toC(x), rtol=1e-3, atol=1e-5)
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_models.py
|
# Owner(s): ["module: onnx"]
from __future__ import annotations
import io
import itertools
import os
import unittest
from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import onnx_test_common
import parameterized
import torch
import torchvision
from model_defs import (
lstm_flattening_result,
rnn_model_with_packed_sequence,
word_language_model,
)
from pytorch_test_common import (
BATCH_SIZE,
RNN_BATCH_SIZE,
RNN_HIDDEN_SIZE,
RNN_INPUT_SIZE,
RNN_SEQUENCE_LENGTH,
skipDtypeChecking,
skipIfUnsupportedMaxOpsetVersion,
skipIfUnsupportedMinOpsetVersion,
skipIfUnsupportedOpsetVersion,
skipScriptTest,
skipShapeChecking,
skipTraceTest,
)
from torch import Tensor
from torch.nn.utils import rnn as rnn_utils
from torch.onnx import verification
from torch.testing._internal import common_utils
from torch.testing._internal.common_utils import skipIfNoLapack
def _init_test_generalized_rcnn_transform():
min_size = 100
max_size = 200
image_mean = [0.485, 0.456, 0.406]
image_std = [0.229, 0.224, 0.225]
transform = torchvision.models.detection.transform.GeneralizedRCNNTransform(
min_size, max_size, image_mean, image_std
)
return transform
def _init_test_rpn():
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = torchvision.models.detection.rpn.AnchorGenerator(
anchor_sizes, aspect_ratios
)
out_channels = 256
rpn_head = torchvision.models.detection.rpn.RPNHead(
out_channels, rpn_anchor_generator.num_anchors_per_location()[0]
)
rpn_fg_iou_thresh = 0.7
rpn_bg_iou_thresh = 0.3
rpn_batch_size_per_image = 256
rpn_positive_fraction = 0.5
rpn_pre_nms_top_n = dict(training=2000, testing=1000)
rpn_post_nms_top_n = dict(training=2000, testing=1000)
rpn_nms_thresh = 0.7
rpn_score_thresh = 0.0
rpn = torchvision.models.detection.rpn.RegionProposalNetwork(
rpn_anchor_generator,
rpn_head,
rpn_fg_iou_thresh,
rpn_bg_iou_thresh,
rpn_batch_size_per_image,
rpn_positive_fraction,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_nms_thresh,
score_thresh=rpn_score_thresh,
)
return rpn
def _construct_tensor_for_quantization_test(
shape: Tuple[int, ...],
offset: Optional[Union[int, float]] = None,
max_val: Optional[Union[int, float]] = None,
) -> Tensor:
"""Helper function to generate weights and test inputs in a deterministic way.
Due to difference in implementation details between PyTorch and ONNXRuntime, randomly generated
test data for quantization tests can be flaky. To help stablize the test, this helper function is
used to generate weights and test inputs in a deterministic way.
Args:
shape (Tuple[int]): Shape for tensor to construct.
offset (Optional[Union[int, float]]): Offset to be added to the generated tensor.
max_val (Optional[Union[int, float]]): If any element within tensor has a larger absolute value than
max_val, the tensor will be scaled by max_val / tensor.abs().max(). This step is done after
applying offset.
"""
tensor = torch.arange(np.prod(shape), dtype=torch.float).view(shape)
if offset is not None:
tensor = tensor + offset
if max_val is not None and tensor.abs().max() > max_val:
tensor = tensor * max_val / tensor.abs().max()
return tensor
def _parameterized_class_attrs_and_values():
attrs = ("opset_version", "is_script", "keep_initializers_as_inputs")
input_values = []
input_values.extend(itertools.product((7, 8), (True, False), (True,)))
# Valid opset versions are defined in torch/onnx/_constants.py.
# Versions are intentionally set statically, to not be affected by elsewhere changes.
input_values.extend(itertools.product(range(9, 17), (True, False), (True, False)))
return {"attrs": attrs, "input_values": input_values}
def _parametrize_rnn_args(arg_name):
options = {
"layers": {1: "unilayer", 3: "trilayer"},
"bidirectional": {True: "bidirectional", False: "forward"},
"initial_state": {True: "with_initial_state", False: "no_initial_state"},
"packed_sequence": {
0: "without_sequence_lengths",
1: "with_variable_length_sequences",
2: "with_batch_first_sequence_lengths",
},
"dropout": {0.2: "with_dropout", 0.0: "without_dropout"},
}
return {
"arg_str": arg_name,
"arg_values": options[arg_name].keys(),
"name_fn": lambda val: options[arg_name][val],
}
@parameterized.parameterized_class(
**_parameterized_class_attrs_and_values(),
class_name_func=onnx_test_common.parameterize_class_name,
)
@common_utils.instantiate_parametrized_tests
class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
def test_fuse_conv_bn1d(self):
class Fuse(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv1d(16, 33, 3, stride=2)
self.bn = torch.nn.BatchNorm1d(33)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
model = Fuse()
x = torch.randn(20, 16, 50, requires_grad=True)
self.run_test(model, (x,))
def test_fuse_conv_bn2d(self):
class Fuse(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(
3, 2, kernel_size=1, stride=2, padding=3, bias=False
)
self.bn = torch.nn.BatchNorm2d(2)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
model = Fuse()
x = torch.randn(2, 3, 2, 2, requires_grad=True)
self.run_test(model, (x,))
def test_fuse_conv_bn3d(self):
class Fuse(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv3d(
3, 2, (3, 5, 2), stride=(2, 1, 1), padding=(3, 2, 0), bias=False
)
self.bn = torch.nn.BatchNorm3d(2)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
model = Fuse()
x = torch.randn(2, 3, 10, 50, 100, requires_grad=True)
self.run_test(model, (x,), rtol=1e-3, atol=1e-6)
def test_fuse_conv_in_block(self):
class Fuse(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv1d(
in_channels=5,
out_channels=5,
kernel_size=3,
stride=1,
padding=2,
dilation=1,
)
self.bn = torch.nn.BatchNorm1d(5)
def forward(self, x):
results_available = True
if x.sum() > -1:
results_available = False
if results_available:
x = self.conv(x)
x = self.bn(x)
return x
model = Fuse()
x = torch.randn(2, 5, 9, requires_grad=True)
self.run_test(
torch.jit.script(model),
(x,),
input_names=["x"],
dynamic_axes={"x": [0, 2]},
rtol=1e-3,
atol=1e-6,
)
def test_conv_tbc(self):
from torch.nn.modules.utils import _single
class ConvTBC(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=0):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _single(kernel_size)
self.padding = _single(padding)
self.weight = torch.nn.Parameter(
Tensor(self.kernel_size[0], in_channels, out_channels)
)
self.bias = torch.nn.Parameter(Tensor(out_channels))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.weight)
torch.nn.init.zeros_(self.bias)
def conv_tbc(self, input):
return torch.conv_tbc(
input.contiguous(), self.weight, self.bias, self.padding[0]
)
def forward(self, input):
return self.conv_tbc(input)
in_channels = 3
out_channels = 5
kernel_size = 5
model = ConvTBC(in_channels, out_channels, kernel_size, padding=0)
x = torch.randn(10, 7, in_channels, requires_grad=True)
self.run_test(model, (x,), atol=1e-5)
def test_reshape_constant_fold(self):
class Reshape(torch.nn.Module):
def __init__(
self,
):
super().__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
scale_1 = self.weight.reshape(1, -1, 1, 1)
return x * scale_1
x = torch.randn(4, 5)
self.run_test(Reshape(), (x,), rtol=1e-3, atol=1e-5)
def run_word_language_model(self, model_name):
ntokens = 50
emsize = 5
nhid = 5
nlayers = 5
dropout = 0.2
tied = False
batchsize = 5
if model_name == "GRU":
model = word_language_model.RNNModelWithTensorHidden(
model_name, ntokens, emsize, nhid, nlayers, dropout, tied, batchsize
)
elif model_name == "LSTM":
model = word_language_model.RNNModelWithTupleHidden(
model_name, ntokens, emsize, nhid, nlayers, dropout, tied, batchsize
)
else:
model = word_language_model.RNNModel(
model_name, ntokens, emsize, nhid, nlayers, dropout, tied, batchsize
)
x = torch.arange(0, ntokens).long().view(-1, batchsize)
# Only support CPU version, since tracer is not working in GPU RNN.
self.run_test(model, (x, model.hidden))
def get_image(self, rel_path: str, size: Tuple[int, int]) -> Tensor:
from PIL import Image
from torchvision import transforms
data_dir = os.path.join(os.path.dirname(__file__), "assets")
path = os.path.join(data_dir, *rel_path.split("/"))
image = Image.open(path).convert("RGB").resize(size, Image.BILINEAR)
return transforms.ToTensor()(image)
def get_test_images(self) -> Tuple[List[Tensor], List[Tensor]]:
return (
[self.get_image("grace_hopper_517x606.jpg", (100, 320))],
[self.get_image("rgb_pytorch.png", (250, 380))],
)
def test_paste_mask_in_image(self):
masks = torch.rand(10, 1, 26, 26)
boxes = torch.rand(10, 4)
boxes[:, 2:] += torch.rand(10, 2)
boxes *= 50
o_im_s = (100, 100)
from torchvision.models.detection.roi_heads import paste_masks_in_image
out = paste_masks_in_image(masks, boxes, o_im_s)
jit_trace = torch.jit.trace(
paste_masks_in_image,
(masks, boxes, [torch.tensor(o_im_s[0]), torch.tensor(o_im_s[1])]),
)
out_trace = jit_trace(
masks, boxes, [torch.tensor(o_im_s[0]), torch.tensor(o_im_s[1])]
)
assert torch.all(out.eq(out_trace))
masks2 = torch.rand(20, 1, 26, 26)
boxes2 = torch.rand(20, 4)
boxes2[:, 2:] += torch.rand(20, 2)
boxes2 *= 100
o_im_s2 = (200, 200)
from torchvision.models.detection.roi_heads import paste_masks_in_image
out2 = paste_masks_in_image(masks2, boxes2, o_im_s2)
out_trace2 = jit_trace(
masks2, boxes2, [torch.tensor(o_im_s2[0]), torch.tensor(o_im_s2[1])]
)
assert torch.all(out2.eq(out_trace2))
def test_heatmaps_to_keypoints(self):
maps = torch.rand(10, 1, 26, 26)
rois = torch.rand(10, 4)
from torchvision.models.detection.roi_heads import heatmaps_to_keypoints
out = heatmaps_to_keypoints(maps, rois)
jit_trace = torch.jit.trace(heatmaps_to_keypoints, (maps, rois))
out_trace = jit_trace(maps, rois)
assert torch.all(out[0].eq(out_trace[0]))
assert torch.all(out[1].eq(out_trace[1]))
maps2 = torch.rand(20, 2, 21, 21)
rois2 = torch.rand(20, 4)
from torchvision.models.detection.roi_heads import heatmaps_to_keypoints
out2 = heatmaps_to_keypoints(maps2, rois2)
out_trace2 = jit_trace(maps2, rois2)
assert torch.all(out2[0].eq(out_trace2[0]))
assert torch.all(out2[1].eq(out_trace2[1]))
def test_word_language_model_RNN_TANH(self):
self.run_word_language_model("RNN_TANH")
def test_word_language_model_RNN_RELU(self):
self.run_word_language_model("RNN_RELU")
@skipScriptTest() # scripting prim::unchecked_cast prim::setattr
def test_word_language_model_LSTM(self):
self.run_word_language_model("LSTM")
def test_word_language_model_GRU(self):
self.run_word_language_model("GRU")
def test_index_1d(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[0]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
def test_index_2d_1dimslice(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[0:1, :]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
def test_index_2d_sliceint(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[1, :]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
def test_index_2d_neg_slice(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[0:-1, :]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_mask(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[torch.tensor([0, 1, 0], dtype=torch.uint8)]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
class MyModel(torch.nn.Module):
def forward(self, input):
return input[torch.tensor([0, 1, 0], dtype=torch.bool)]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
@skipIfUnsupportedMinOpsetVersion(9)
def test_data(self):
class Data(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.new_zeros(x.data.size())
x = torch.randn(3, 4)
self.run_test(Data(), x, input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(Data(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_mask_nd(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[input > 0]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
@skipScriptTest()
def test_dict(self):
class MyModel(torch.nn.Module):
def forward(self, x_in):
x_out = {}
x_out["test_key_out"] = torch.add(
x_in[list(x_in.keys())[0]], list(x_in.keys())[0]
)
return x_out
x = {torch.tensor(1.0): torch.randn(1, 2, 3)}
self.run_test(MyModel(), (x,))
@skipScriptTest()
def test_dict_str(self):
class MyModel(torch.nn.Module):
def forward(self, x_in):
x_out = {}
x_out["test_key_out"] = torch.add(x_in["test_key_in"], 2.0)
return x_out
x = {"test_key_in": torch.randn(1, 2, 3)}
self.run_test(MyModel(), (x,))
@skipScriptTest() # User-defined class not supported
def test_dict_output(self):
class DictModelOutput(OrderedDict):
tensor_out: Tensor
tuple_out: Optional[Tuple[Tensor]] = None
list_out: Optional[List[Tensor]] = None
class MyModel(torch.nn.Module):
def forward(self, a, b, c, d):
return DictModelOutput(
tensor_out=a,
tuple_out=(b, c),
list_out=[d],
)
a = torch.randn(2, 3)
b = torch.randn(2, 3)
c = torch.randn(2, 3)
d = torch.randn(2, 3)
self.run_test(MyModel(), (a, b, c, d))
def test_tuple_output(self):
class MyModel(torch.nn.Module):
def forward(self, a, b, c, d):
return a, (b, c), d
a = torch.randn(2, 3)
b = torch.randn(2, 3)
c = torch.randn(2, 3)
d = torch.randn(2, 3)
self.run_test(MyModel(), (a, b, c, d))
def test_nested_tuple_output(self):
class MyModel(torch.nn.Module):
def forward(self, a, b, c, d):
return a, ((b,), (c, d))
a = torch.randn(2, 3)
b = torch.randn(2, 3)
c = torch.randn(2, 3)
d = torch.randn(2, 3)
self.run_test(MyModel(), (a, b, c, d))
def test_tuple_input(self):
class TupleModel(torch.nn.Module):
def forward(self, a: Tuple[Tensor, Tensor]):
return a
x = (torch.randn(3, 4), torch.randn(4, 3))
self.run_test(TupleModel(), input_args=(x,))
def test_tuple_primitive_input(self):
class TupleModel(torch.nn.Module):
def forward(self, a: Tuple[int, Tensor], b):
return a[0], a[1] + b
x = (3, torch.randn(4, 3))
y = torch.randn(4, 3)
self.run_test(TupleModel(), input_args=(x, y))
def test_nested_tuple_input(self):
class NestedTupleModel(torch.nn.Module):
def forward(self, a, b: Tuple[Tensor, Tuple[Tensor, Tensor]]):
return a + b[0] + b[1][0] + b[1][1]
x = torch.randn(4, 5)
y = (torch.randn(4, 5), (torch.randn(1, 5), torch.randn(4, 1)))
self.run_test(NestedTupleModel(), input_args=(x, y))
@skipScriptTest() # Needs https://github.com/pytorch/rfcs/pull/21
@skipIfUnsupportedMinOpsetVersion(15)
def test_mixed_optional_default_none(self):
class Model(torch.nn.Module):
def forward(
self,
x,
y: Optional[Tensor] = None,
z: Optional[Tensor] = None,
):
if y is not None:
return x + y
if z is not None:
return x + z
return x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
z = torch.randn(2, 3)
model = Model()
# Without kwargs dict.
self.run_test(model, (x, y, None))
self.run_test(model, (x, None, z))
# With kwargs dict.
self.run_test(model, (x,), {"y": y, "z": None})
self.run_test(model, (x,), {"y": None, "z": z})
self.run_test(model, (x,), {"z": z})
self.run_test(model, (x,), {"y": y})
@skipScriptTest() # tracing eliminates None inputs so it works differently. See _script version below.
@skipIfUnsupportedMinOpsetVersion(15)
def test_mixed_optional_default_tensor(self):
class Model(torch.nn.Module):
def forward(
self,
x,
y: Optional[Tensor] = torch.ones(2, 3),
z: Optional[Tensor] = torch.zeros(2, 3),
):
if y is not None:
return x + y
if z is not None:
return x + z
return x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
z = torch.randn(2, 3)
model = Model()
self.run_test(model, (x, y, None))
self.run_test(model, (x, None, z))
@skipTraceTest() # tracing is verified with different set of inputs. See above.
@skipIfUnsupportedMinOpsetVersion(15)
def test_mixed_optional_default_tensor_script(self):
class Model(torch.nn.Module):
def forward(
self,
x,
y: Optional[Tensor] = torch.ones(2, 3),
z: Optional[Tensor] = torch.zeros(2, 3),
):
if y is not None:
return x + y
if z is not None:
return x + z
return x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
z = torch.randn(2, 3)
model = torch.jit.script(Model())
self.run_test(model, (x, y, z), input_names=("x", "y", "z"))
self.run_test(model, (x,), {"y": y, "z": z}, input_names=("x", "y", "z"))
# Requires input_names to be set so that we can feed the inputs properly into ORT.
# TODO: Export default values as ONNX initializers, then this should not raise.
# https://msdata.visualstudio.com/Vienna/_workitems/edit/969268
# Default values are accessible via FunctionSchema.
with self.assertRaisesRegex(
ValueError, "Model requires 3 inputs. Input Feed contains 2"
):
self.run_test(model, (x,), {"y": y}, input_names=("x", "y"))
for example_inputs, example_kwargs in (
((x, y, None), {}),
((x, None, z), {}),
((x,), {"y": y, "z": None}),
((x,), {"y": None, "z": z}),
):
with self.assertRaisesRegex(
ValueError, "args contained 1 None's after flattening."
):
self.run_test(
model, example_inputs, example_kwargs, input_names=("x", "y", "z")
)
@skipScriptTest() # Needs https://github.com/pytorch/rfcs/pull/21
@skipIfUnsupportedMinOpsetVersion(15)
def test_all_optional_default_none(self):
class Model(torch.nn.Module):
def forward(self, x: Optional[Tensor] = None, y: Optional[Tensor] = None):
if x is not None:
return x
if y is not None:
return y
else:
return torch.tensor(-1.0)
x = torch.randn(2, 3)
model = Model()
self.run_test(model, (x, None))
self.run_test(
model,
(),
{"x": x, "y": None},
# y disappears in tracing.
input_names=("x",),
)
@skipScriptTest() # tracing eliminates None inputs so it works differently. See _script version below.
@skipIfUnsupportedMinOpsetVersion(15)
def test_all_optional_default_tensor(self):
class Model(torch.nn.Module):
def forward(
self,
x: Optional[Tensor] = torch.ones(2, 3),
y: Optional[Tensor] = torch.zeros(2, 3),
):
if x is not None:
return x
elif y is not None:
return y
else:
return torch.tensor(-1.0)
x = torch.randn(2, 3)
y = torch.randn(2, 3)
model = Model()
self.run_test(model, (x, None))
self.run_test(model, (None, y))
# tracing means y is never used so it's removed from the exported model inputs,
# and we fail when trying to run ORT.
with self.assertRaisesRegex(ValueError, "got too many positional inputs"):
self.run_test(model, (x, y))
@skipTraceTest() # tracing is verified with different set of inputs. See above.
@skipIfUnsupportedMinOpsetVersion(15)
def test_all_optional_default_tensor_script(self):
class Model(torch.nn.Module):
def forward(
self,
x: Optional[Tensor] = torch.ones(2, 3),
y: Optional[Tensor] = torch.zeros(2, 3),
):
if x is not None:
return x
elif y is not None:
return y
else:
return torch.tensor(-1.0)
x = torch.randn(2, 3)
y = torch.randn(2, 3)
model = torch.jit.script(Model())
# TODO: Export default values as ONNX initializers, then this should not raise.
# https://msdata.visualstudio.com/Vienna/_workitems/edit/969268
# Default values are accessible via FunctionSchema.
with self.assertRaisesRegex(
ValueError, "Model requires 2 inputs. Input Feed contains 1"
):
self.run_test(model, (x,))
self.run_test(model, (), {"y": y})
self.run_test(model, (x, y))
self.run_test(model, (), {"x": x, "y": y}, input_names=("x", "y"))
@skipScriptTest() # Needs https://github.com/pytorch/rfcs/pull/21
@skipIfUnsupportedMinOpsetVersion(15)
def test_mixed_optional(self):
class Model(torch.nn.Module):
def forward(self, x, y: Optional[Tensor]):
if y is not None:
return x + y
return x
x = torch.randn(2, 3)
model = Model()
self.run_test(model, (x, None))
self.run_test(model, (x, x))
@skipScriptTest() # Needs https://github.com/pytorch/rfcs/pull/21
@skipIfUnsupportedMinOpsetVersion(15)
def test_tuple_of_optional(self):
class Model(torch.nn.Module):
def forward(self, x, y: Tuple[Optional[Tensor], Optional[Tensor]]):
if y[0] is not None:
return x + y[0]
if y[1] is not None:
return x + y[1]
return x
x = torch.randn(2, 3)
y1 = torch.randn(2, 3)
self.run_test(Model(), (x, (None, y1)))
@skipScriptTest() # tracing eliminates None inputs so it works differently. See _script version below.
@skipIfUnsupportedMinOpsetVersion(15)
def test_tuple_of_optional_default_tensor(self):
class Model(torch.nn.Module):
def forward(
self,
x,
y: Tuple[Optional[Tensor], Optional[Tensor]] = (
torch.zeros(2, 3),
torch.zeros(2, 3),
),
):
y0, y1 = y
if y0 is not None:
return x + y0
if y1 is not None:
return x + y1
return x
x = torch.randn(2, 3)
y1 = torch.randn(2, 3)
self.run_test(Model(), (x, (None, y1)))
@skipTraceTest() # tracing is verified with different set of inputs. See above.
@skipIfUnsupportedMinOpsetVersion(15)
def test_tuple_of_optional_default_tensor_script(self):
class Model(torch.nn.Module):
def forward(
self,
x,
y: Tuple[Optional[Tensor], Optional[Tensor]] = (
torch.zeros(2, 3),
torch.zeros(2, 3),
),
):
y0, y1 = y
if y0 is not None:
return x + y0
if y1 is not None:
return x + y1
return x
x = torch.randn(2, 3)
y0 = torch.randn(2, 3)
y1 = torch.randn(2, 3)
model = torch.jit.script(Model())
with self.assertRaisesRegex(
ValueError, "args contained 1 None's after flattening."
):
self.run_test(model, (x, (None, y1)))
self.run_test(model, (x, (y0, y1)))
# export succeeds, but running ORT through run_test would fail because the exported model
# has the inputs flattened into 3 inputs.
torch.onnx.export(
model, (x, {"y": (y0, y1)}), io.BytesIO(), opset_version=self.opset_version
)
def test_primitive_input_integer(self):
class Model(torch.nn.Module):
def forward(self, x: int, y):
return x + y
x = 3
y = torch.randint(10, (2, 3, 4))
self.run_test(Model(), (x, y))
@skipDtypeChecking
def test_primitive_input_floating(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: float, y):
return x + y
x = 3.0
y = torch.randn(2, 3, 4)
self.run_test(Model(), (x, y))
def test_primitive_input_bool(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, flag: bool, x, y):
if flag:
return x
else:
return y
flag = True
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
self.run_test(torch.jit.script(Model()), (flag, x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_cste_script(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.zeros(x.size(0)), torch.ones(
(x.size(1), x.size(0)), dtype=torch.int64
)
x = torch.randn(3, 4)
self.run_test(MyModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(MyModel(), x, remained_onnx_input_idx=[])
def test_scalar_tensor(self):
class test(torch.nn.Module):
def forward(self, input):
return torch.scalar_tensor(input.size(0)), torch.scalar_tensor(
input.size(1), dtype=torch.int64
)
x = torch.randn(2, 3, 4)
y = torch.randn(7, 8, 9)
model = test()
self.run_test(
model,
x,
additional_test_inputs=[y],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2]},
)
def test_tensor(self):
class ScalarInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor(input.shape[1])
x = torch.randn(3, 4)
self.run_test(
ScalarInputModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1]}
)
self.run_test(ScalarInputModel(), x, remained_onnx_input_idx=[])
class TensorInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor([input.shape[0], input.shape[1]])
x = torch.randn(3, 4)
self.run_test(
TensorInputModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1]}
)
self.run_test(TensorInputModel(), x, remained_onnx_input_idx=[])
class FloatInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor([float(input)])
x = torch.randn(1)
self.run_test(FloatInputModel(), x)
class InputWithDtypeModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor(input.shape[1], dtype=torch.long)
x = torch.randn(3, 4)
self.run_test(
InputWithDtypeModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1]}
)
self.run_test(InputWithDtypeModel(), x, remained_onnx_input_idx=[])
class MixedInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor([input.shape[0], int(input)])
x = torch.randn(1)
self.run_test(MixedInputModel(), x)
def test_hardtanh(self):
model = torch.nn.Hardtanh(-1.5, 2.5)
x = torch.arange(-5, 5).to(dtype=torch.float32)
self.run_test(model, x)
def test_hardtanh_script_with_default_values(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.nn.functional.hardtanh(x)
x = torch.arange(-5, 5).to(dtype=torch.float32)
self.run_test(MyModel(), x)
def test_hardswish(self):
model = torch.nn.Hardswish()
x = torch.rand(3, 3).to(dtype=torch.float32)
self.run_test(model, x)
# Testing edge cases
x = torch.tensor(3).to(dtype=torch.float32)
self.run_test(model, x)
x = torch.tensor(-3).to(dtype=torch.float32)
self.run_test(model, x)
def test_hardswish_script(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.nn.functional.hardswish(x)
x = torch.rand(3, 3).to(dtype=torch.float32)
self.run_test(MyModel(), x)
def test_hardsigmoid(self):
model = torch.nn.Hardsigmoid()
x = torch.rand(3, 3).to(dtype=torch.float32)
self.run_test(model, x)
# corner cases
x = torch.tensor(3).to(dtype=torch.float32)
self.run_test(model, x)
x = torch.tensor(-3).to(dtype=torch.float32)
self.run_test(model, x)
def test_tanhshrink(self):
model = torch.nn.Tanhshrink()
x = torch.rand(3, 3).to(dtype=torch.float32)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_hardshrink(self):
model = torch.nn.Hardshrink()
x = torch.rand(3, 3).to(dtype=torch.float32)
self.run_test(model, x)
# Testing edge cases
x = torch.tensor(0.5).to(dtype=torch.float32)
self.run_test(model, x)
x = torch.tensor(-0.5).to(dtype=torch.float32)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_hardshrink_dtype(self):
x = torch.rand(3, 3).to(dtype=torch.float64)
self.run_test(torch.nn.Hardshrink(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_softshrink(self):
model = torch.nn.Softshrink()
x = torch.rand(3, 3).to(dtype=torch.float32)
self.run_test(model, x)
# Testing edge cases
x = torch.tensor(0.5).to(dtype=torch.float32)
self.run_test(model, x)
x = torch.tensor(-0.5).to(dtype=torch.float32)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_softshrink_dtype(self):
x = torch.rand(3, 3).to(dtype=torch.float64)
self.run_test(torch.nn.Softshrink(), x)
def test_clamp(self):
class ClampModel(torch.nn.Module):
def forward(self, x):
return x.clamp(-0.5, 0.5)
x = torch.randn(3, 4)
self.run_test(ClampModel(), x)
class ClampMinModel(torch.nn.Module):
def forward(self, x):
return x.clamp(min=-0.5)
x = torch.randn(3, 4)
self.run_test(ClampMinModel(), x)
class ClampMaxModel(torch.nn.Module):
def forward(self, x):
return x.clamp(max=0.5)
x = torch.randn(3, 4)
self.run_test(ClampMaxModel(), x)
@skipIfUnsupportedMinOpsetVersion(8)
def test_clamp_dyn(self):
class ClampMaxModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.clamp(None, x.size(0))
x = torch.arange(16).view(4, 4).float()
self.run_test(ClampMaxModel(), x)
class ClampMinModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.clamp(x.size(0), None)
x = torch.arange(16).view(4, 4).float()
self.run_test(ClampMinModel(), x)
class ClampMinMaxModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.clamp(x.size(0), x.size(1))
x = torch.arange(16).view(2, 8).float()
self.run_test(ClampMinMaxModel(), x)
class ClampTensorModel(torch.nn.Module):
def forward(self, x, min, max):
return x.clamp(min, max)
x = torch.randn(3, 4)
y = torch.randn(3, 4)
z = torch.randn(3, 4)
self.run_test(ClampTensorModel(), (x, y, z))
class ClampTensorMinModel(torch.nn.Module):
def forward(self, x, min):
return x.clamp(min=min)
self.run_test(ClampTensorMinModel(), (x, y))
class ClampTensorMaxModel(torch.nn.Module):
def forward(self, x, max):
return x.clamp(max=max)
self.run_test(ClampTensorMaxModel(), (x, z))
@skipIfUnsupportedMinOpsetVersion(9)
def test_full_trace(self):
class FullModel(torch.nn.Module):
def forward(self, x):
return torch.full((3, 4), x, dtype=torch.long)
x = torch.tensor(12)
self.run_test(FullModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_full_script(self):
class FullModelScripting(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.full((3, 4), x, dtype=torch.long)
x = torch.tensor(12)
self.run_test(FullModelScripting(), x)
def test_fuse_addmm(self):
class AddmmModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x
x = torch.ones(3, 3)
self.run_test(AddmmModel(), x)
def test_maxpool(self):
model = torch.nn.MaxPool1d(2, stride=1)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_conv(self):
class TraceModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.Conv2d(
16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)
)
self.conv3 = torch.nn.Conv3d(
16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0)
)
def forward(self, input1, input2, input3):
return self.conv1(input1), self.conv2(input2), self.conv3(input3)
x1 = torch.randn(20, 16, 50)
x2 = torch.randn(20, 16, 50, 50)
x3 = torch.randn(20, 16, 10, 50, 50)
self.run_test(TraceModel(), (x1, x2, x3), atol=10e-5)
def test_conv_shape_inference(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv2 = torch.nn.Conv2d(
16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)
)
def forward(self, input):
return self.conv2(input) + 2
x = torch.randn(20, 16, 50, 100)
self.run_test(
Model(), x, atol=10e-5, input_names=["x"], dynamic_axes={"x": [0]}
)
def test_conv_transpose(self):
class TraceModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.ConvTranspose1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.ConvTranspose2d(
16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)
)
self.conv3 = torch.nn.ConvTranspose3d(
16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0)
)
def forward(self, input1, input2, input3):
return self.conv1(input1), self.conv2(input2), self.conv3(input3)
x1 = torch.randn(20, 16, 10)
x2 = torch.randn(20, 16, 10, 10)
x3 = torch.randn(20, 16, 10, 10, 10)
self.run_test(TraceModel(), (x1, x2, x3), atol=10e-5)
def test_numpy_T(self):
class NumpyTranspose(torch.nn.Module):
def forward(self, x):
return x.T
self.run_test(NumpyTranspose(), torch.randn(4, 7))
self.run_test(NumpyTranspose(), torch.tensor(-42.0))
# Conversion of Transpose depends on input shape to be known.
# The following test only works when onnx shape inference is enabled.
def test_transpose_infer_shape(self):
class TransposeModule(torch.jit.ScriptModule):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 1, 3, stride=2)
@torch.jit.script_method
def forward(self, x):
x = self.conv(x)
return x.transpose(0, 1)
x = torch.randn(32, 3, 64, 64)
y = torch.randn(16, 3, 8, 64)
self.run_test(
TransposeModule(),
x,
input_names=["x"],
dynamic_axes={"x": [0, 2]},
additional_test_inputs=[y],
)
def squeeze_model_tests(self, d, x1, x2):
class Squeeze(torch.nn.Module):
def __init__(self, d):
super().__init__()
self.d = d
def forward(self, x):
if self.d is not None:
return torch.squeeze(x, dim=self.d)
else:
return torch.squeeze(x)
x2 = [] if x2 is None else [x2]
if len(x2) > 0:
self.run_test(
Squeeze(d),
x1,
input_names=["input"],
dynamic_axes={"input": {0: "0", 1: "1", 2: "2"}},
additional_test_inputs=x2,
)
else:
self.run_test(Squeeze(d), x1)
def test_squeeze_without_no_op(self):
x = torch.randn(2, 1, 4)
self.squeeze_model_tests(1, x, None)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_dynamic(self):
x_squeeze = torch.randn(2, 1, 4)
x_noop = torch.randn(2, 2, 3)
self.squeeze_model_tests(1, x_squeeze, x_noop)
def test_squeeze_neg_without_no_op(self):
x = torch.randn(2, 1, 4)
self.squeeze_model_tests(-2, x, None)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_neg(self):
x_squeeze = torch.randn(2, 1, 4)
x_noop = torch.randn(2, 2, 3)
self.squeeze_model_tests(-2, x_squeeze, x_noop)
def test_squeeze_all_dims(self):
x_squeeze = torch.randn(2, 1, 4)
x_noop = torch.randn(2, 2, 3)
self.squeeze_model_tests(None, x_squeeze, x_noop)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_no_op(self):
x_noop = torch.randn(2, 1, 4)
x_squeeze = torch.randn(2, 2, 1)
self.squeeze_model_tests(2, x_noop, x_squeeze)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_runtime_dim(self):
class Squeeze(torch.nn.Module):
def forward(self, d1, d2):
t = torch.zeros(d1[0], d2[0])
return t.squeeze(0)
d1 = torch.tensor([1])
d3 = torch.tensor([3])
d4 = torch.tensor([4])
self.run_test(Squeeze(), (d1, d4), additional_test_inputs=[(d3, d4)])
self.run_test(Squeeze(), (d3, d4), additional_test_inputs=[(d1, d3)])
def test_squeeze(self):
class Squeeze(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x, dim=-2)
x = torch.randn(2, 1, 4)
self.run_test(Squeeze(), x)
@skipIfUnsupportedMinOpsetVersion(13)
def test_squeeze_dynamic_dim(self):
class Squeeze(torch.nn.Module):
def forward(self, x, dim: int):
return torch.squeeze(x, dim)
x = torch.randn(2, 1, 4)
dim = 1
self.run_test(Squeeze(), (x, dim))
def test_unsqueeze(self):
class Unsqueeze(torch.nn.Module):
def forward(self, x):
return torch.unsqueeze(x, dim=-2)
x = torch.randn(2, 3, 4)
self.run_test(Unsqueeze(), x)
@skipIfUnsupportedMinOpsetVersion(13)
def test_unsqueeze_dynamic_dim(self):
class Unsqueeze(torch.nn.Module):
def forward(self, x, dim: int):
return torch.unsqueeze(x, dim)
x = torch.randn(2, 1, 4)
dim = -1
self.run_test(Unsqueeze(), (x, dim))
def test_maxpool_default_stride(self):
class MaxPoolModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.max_pool2d(x, 2)
model = MaxPoolModel()
x = torch.randn(10, 20, 16, 50)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(8)
def test_maxpool_adaptive(self):
model = torch.nn.AdaptiveMaxPool1d((5), return_indices=False)
x = torch.randn(20, 16, 50, requires_grad=True)
y = torch.randn(32, 16, 50, requires_grad=True)
self.run_test(
model,
x,
input_names=["x"],
dynamic_axes={"x": [0]},
additional_test_inputs=[y],
)
def test_maxpool_2d(self):
model = torch.nn.MaxPool2d(5, padding=(1, 2))
x = torch.randn(1, 20, 16, 50, requires_grad=True)
self.run_test(model, x)
def test_maxpool_1d_ceil(self):
model = torch.nn.MaxPool1d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_maxpool_2d_ceil(self):
model = torch.nn.MaxPool2d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 32)
self.run_test(model, x)
def test_maxpool_3d_ceil(self):
model = torch.nn.MaxPool3d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 44, 31)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(8)
def test_maxpool_with_indices(self):
model = torch.nn.MaxPool1d(2, stride=1, return_indices=True)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_maxpool_dilation(self):
model = torch.nn.MaxPool1d(2, stride=1, dilation=2)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_avgpool_default_stride(self):
class AvgPoolModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.avg_pool2d(x, 2)
model = AvgPoolModel()
x = torch.randn(10, 20, 16, 50)
self.run_test(model, x)
def test_avgpool(self):
model = torch.nn.AvgPool1d(2, stride=1)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_avgpool_1d_ceil(self):
model = torch.nn.AvgPool1d(3, 2, ceil_mode=True)
x = torch.randn(1, 1, 7)
self.run_test(model, x)
def test_avgpool_2d_ceil(self):
model = torch.nn.AvgPool2d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 32)
self.run_test(model, x)
def test_avgpool_3d_ceil(self):
model = torch.nn.AvgPool3d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 44, 31)
y = torch.randn(32, 8, 50, 44, 31)
self.run_test(
model,
x,
input_names=["x"],
dynamic_axes={"x": [0, 1]},
additional_test_inputs=[y],
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_floating_point(self):
class FloatingPoint(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
if x.is_floating_point():
return x.new_zeros(x.shape)
return x.new_zeros(x.shape)
x = torch.randn(2, 3, 4)
self.run_test(
FloatingPoint(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(FloatingPoint(), x, remained_onnx_input_idx=[])
class FloatingPoint(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
if x.size(0) > 1:
a = x + 2
if a.is_floating_point():
return x + 1
return x + 1
return x
x = torch.randn(2, 3, 4)
self.run_test(FloatingPoint(), x)
# Operator rank mismatch between outputs of two branches for opsets below 11.
@skipIfUnsupportedMinOpsetVersion(11)
def test_floating_point_infer_dtype(self):
class FloatingPoint(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
if x.size(0) > 1:
a = x + 2
if a.is_floating_point():
return x.new_zeros(x.shape[1:])
return x.new_zeros(x.shape)
return x
x = torch.randn(2, 3, 4)
self.run_test(
FloatingPoint(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(FloatingPoint(), x, remained_onnx_input_idx=[])
class FloatingPoint(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
if x.size(0) > 1:
a = x + 2
if a.is_floating_point():
return x + 1
return x
return x
x = torch.randn(2, 3, 4).to(torch.int32)
self.run_test(FloatingPoint(), x)
@skipIfUnsupportedMinOpsetVersion(12)
def test_prim_min(self):
@torch.jit.script
def list_append(boxes: List[Tensor]):
temp = []
for i, b in enumerate(
boxes
): # enumerate is creating a prim::min op in torch graph
temp.append(torch.full_like(b[:, 1], i))
return temp[0]
class Min(torch.nn.Module):
def forward(self, x):
boxes = [x for _ in range(3)]
return list_append(boxes)
x = torch.rand(5, 5)
self.run_test(Min(), (x,))
class M(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
i = 3
return min(x[i], i)
x = torch.arange(6, dtype=torch.int64)
self.run_test(M(), (x,))
def test_arithmetic(self):
class ArithmeticModule(torch.nn.Module):
def forward(self, x):
x = x + 2
x = x - 4
x = x * 6
x = x / 8
return x
x = torch.randn(2, 3, 4)
self.run_test(ArithmeticModule(), x)
def test_arithmetic_prim_long(self):
class ArithmeticModule(torch.nn.Module):
def forward(self, x, y: int):
x = x + y
x = x - y
x = x * (y * 3)
x = x / (y * 4)
return x
x = torch.randn(2, 3, 4)
y = 2
self.run_test(ArithmeticModule(), (x, y))
class ArithmeticModule(torch.nn.Module):
def forward(self, x):
x = x + 2
x = x - 3
return x.shape[0]
x = torch.randn(2, 3, 4)
self.run_test(ArithmeticModule(), x, remained_onnx_input_idx=[])
@skipDtypeChecking
def test_arithmetic_prim_float(self):
class ArithmeticModule(torch.nn.Module):
def forward(self, x, y: float):
x = x + y
x = x - y
x = x * (y * 3)
x = x / (y * 4)
return x
x = torch.randn(2, 3, 4)
y = 2.5
self.run_test(ArithmeticModule(), (x, y))
class ArithmeticModule(torch.nn.Module):
def forward(self, x):
x = x + 2
x = x - 3
return x.shape[1] / 2
x = torch.randn(2, 3, 4)
self.run_test(ArithmeticModule(), x, remained_onnx_input_idx=[])
@skipDtypeChecking
def test_arithmetic_prim_bool(self):
class ArithmeticModule(torch.nn.Module):
def forward(self, x, y: int, z: bool, t: float):
x = x + y
x = x - y
if z:
x = x * (y * 3)
x = x / (y * 4)
return x / t, z
x = torch.randn(2, 3, 4)
y = 2
z = False
t = 2.5
self.run_test(ArithmeticModule(), (x, y, z, t))
class ArithmeticModule(torch.nn.Module):
def forward(self, x: int, y: int):
return x == y
x = 3
y = 2
self.run_test(ArithmeticModule(), (x, y))
# In tracing, None outputs are removed. In scripting they're kept but
# we don't know Optional.elem_type, so we can't construct a valid Optional.
# Tests for Optional outputs (control flow with None in one branch,
# not-None in another) are in test_pytorch_onnx_no_runtime.py.
@skipScriptTest()
def test_tuple_with_none_outputs(self):
class TupleModel(torch.nn.Module):
def forward(self, x):
return (x, (x, None, (x, None)))
x = torch.randn(3, 4)
self.run_test(TupleModel(), (x,))
# In scripting the first transpose node do not carry shape and dtype info.
# The following test only works when onnx shape inference is enabled.
def test_arithmetic_infer_dtype(self):
class ArithmeticModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
x = x.t()
x = x + 2
x = x - 4
x = x * 6
x = x / 8
return x
x = torch.randn(2, 3)
self.run_test(ArithmeticModule(), x)
@unittest.skip("Floor division on ONNX is inconsistent with eager (see #78411)")
def test_floor_div(self):
class FloorDivModule(torch.nn.Module):
def forward(self, x, y):
return (
x // 3,
x // 2.0,
x.to(dtype=torch.float64) // 3,
x.to(dtype=torch.float64) // 2.0,
x.to(dtype=torch.int64) // 3,
x.to(dtype=torch.int64) // 2.0,
x // (y + 1.0).to(dtype=torch.int64),
x // y,
x.to(dtype=torch.float64) // y.to(dtype=torch.int64),
x.to(dtype=torch.float64) // y.to(dtype=torch.float64),
x.to(dtype=torch.int64) // y.to(dtype=torch.int64),
x.to(dtype=torch.int64) // y,
)
x = torch.arange(-2, 4).reshape(2, 3, 1)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4)
self.run_test(FloorDivModule(), (x, y))
@unittest.skip("Floor division on ONNX is inconsistent with eager (see #78411)")
def test_floor_div_script(self):
class FloorDivModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
return x // 3, x // 2.0, x // y
x = torch.arange(-2, 4).reshape(2, 3, 1)
y = torch.randn(2, 3, 4)
self.run_test(FloorDivModule(), (x, y))
@unittest.skip("Floor division on ONNX is inconsistent with eager (see #78411)")
@skipIfUnsupportedMinOpsetVersion(9)
def test_floordiv(self):
class FloordivModule(torch.nn.Module):
def forward(self, x):
return x.new_zeros(x.size(2) // x.size(1))
x = torch.randn(2, 3, 4)
self.run_test(
FloordivModule(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(FloordivModule(), (x,), remained_onnx_input_idx=[])
def test_div(self):
class DivModule(torch.nn.Module):
def forward(self, x, y):
return x / y, torch.true_divide(x, y)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
self.run_test(DivModule(), (x, y))
self.run_test(DivModule(), (x.float(), y.float()))
# Note: div cannot (generally) be exported via scripting
# since its type promotion logic is dependent on knowing the scalar types
# of the input tensors. That is, the ONNX graph is dependent on the
# data type of the inputs. This makes it appropriate for tracing only.
def test_div_promotion_trace(self):
class DivModule(torch.nn.Module):
def forward(self, x, y):
return x / y, torch.true_divide(x, y)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
prev_default = torch.get_default_dtype()
torch.set_default_dtype(torch.float)
self.run_test(torch.jit.trace(DivModule(), (x, y)), (x, y))
torch.set_default_dtype(torch.double)
self.run_test(torch.jit.trace(DivModule(), (x, y)), (x, y))
torch.set_default_dtype(prev_default)
# In scripting x, y do not carry shape and dtype info.
# The following test only works when onnx shape inference is enabled.
def test_div_promotion_script(self):
class DivModule(torch.nn.Module):
def forward(self, x, y):
# Add transpose to hide shape/type information
# Otherwise shape and type are still avaiable from input.
x = x.transpose(1, 2)
y = y.transpose(1, 2)
return x / y, torch.true_divide(x, y)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
prev_default = torch.get_default_dtype()
# 1. x,y are int, and output is float.
# This can be handled by the default case, where both are cast to float.
# It works even if type of x, y are unknown.
torch.set_default_dtype(torch.float)
self.run_test(torch.jit.script(DivModule()), (x, y))
# 2. x,y are int, and output is double.
# This can be handled by the default case, where both are cast to double.
# It works even if type of x, y are unknown.
torch.set_default_dtype(torch.double)
self.run_test(torch.jit.script(DivModule()), (x, y))
# 3. x is int, y is double, and output is double.
# This can only be handled when both type of x and y are known.
torch.set_default_dtype(prev_default)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.double)
self.run_test(torch.jit.script(DivModule()), (x, y))
@skipDtypeChecking
def test_div_rounding_mode(self):
class TrueDivModule(torch.nn.Module):
def forward(self, x, y):
return (
x.div(y, rounding_mode=None),
torch.div(x, y, rounding_mode=None),
)
class TruncDivModule(torch.nn.Module):
def forward(self, x, y):
return (
x.div(y, rounding_mode="trunc"),
torch.div(x, y, rounding_mode="trunc"),
)
class FloorDivModule(torch.nn.Module):
def forward(self, x, y):
return (
x.div(y, rounding_mode="floor"),
torch.div(x, y, rounding_mode="floor"),
)
modules = [TrueDivModule(), TruncDivModule(), FloorDivModule()]
x = (torch.randn(2, 3, 4) * 100).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
for module in modules:
self.run_test(module, (x, y))
self.run_test(torch.jit.trace(module, (x, y)), (x, y))
self.run_test(torch.jit.script(module), (x, y))
x = torch.randn(2, 3, 4)
y = torch.rand(2, 3, 4) * 10.0 + 0.1
for module in modules:
self.run_test(module, (x, y))
self.run_test(torch.jit.trace(module, (x, y)), (x, y))
self.run_test(torch.jit.script(module), (x, y))
def test_slice_trace(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x[0:1]
x = torch.randn(3)
self.run_test(MyModule(), x)
def test_slice_neg(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[-1:]
x = torch.randn(3, 4, 5)
self.run_test(NegSlice(), x)
def test_slice_neg_large(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[:, :, -3:-1, :, -1]
x = torch.randn(3, 4, 5, 6, 7)
self.run_test(NegSlice(), x)
def test_slice_neg_large_negone(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[:, :, :, :, -1]
x = torch.randn(3, 4, 5, 6, 7)
self.run_test(NegSlice(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_slice_with_input_index(self):
class InputIndexSlice(torch.nn.Module):
def forward(self, x, y):
x[: y.size(0), 0, :] = y
return x
x = torch.zeros((56, 6, 256))
y = torch.rand((22, 256))
self.run_test(InputIndexSlice(), (x, y))
@skipIfUnsupportedMinOpsetVersion(10)
@skipScriptTest() # scripting tuple/list append
def test_slice_dynamic(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[: x.size(0) - i, i : x.size(2), i:3])
return tuple(results)
x = torch.rand(5, 5, 5)
y = torch.randn(6, 7, 8)
self.run_test(
DynamicSliceExportMod(),
x,
additional_test_inputs=[y],
input_names=["input_1"],
output_names=["output_1"],
dynamic_axes={"input_1": [0, 1, 2], "output_1": [0, 1, 2]},
)
@skipIfUnsupportedMinOpsetVersion(10)
def test_slice_dynamic_script(self):
class DynamicSliceModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x[1 : x.size(1)]
x = torch.rand(1, 2)
self.run_test(DynamicSliceModel(), x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_slice_dynamic_shape_script(self):
class DynamicSliceModel(torch.nn.Module):
def forward(self, x):
return x.new_zeros(x.shape[1 : x.size(2)])
x = torch.rand(1, 2, 3, 4)
self.run_test(
DynamicSliceModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2, 3]}
)
self.run_test(DynamicSliceModel(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(10)
@skipScriptTest() # scripting tuple/list append
def test_slice_dynamic_to_end(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[:, i:, x.size(2) - 5])
return tuple(results)
x = torch.rand(5, 5, 5)
self.run_test(
DynamicSliceExportMod(),
x,
dynamic_axes={"input_1": [0, 1, 2], "output_1": [0, 1, 2]},
)
def test_square(self):
class Square(torch.nn.Module):
def forward(self, x):
return torch.square(x)
x = torch.randn(2, 3, 4)
self.run_test(Square(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_dynamic(self):
class ArangeModel(torch.nn.Module):
def forward(self, input):
return (
torch.arange(input.shape[0]),
torch.arange(12),
torch.arange(start=input.shape[0], end=input.shape[0] + 5),
)
x = torch.randn(5, 3, 2)
y = torch.randn(8, 3, 2)
self.run_test(
ArangeModel(),
x,
additional_test_inputs=[y],
input_names=["input_1"],
output_names=["output_1", "output_2", "output_3"],
dynamic_axes={"input_1": [0], "output_1": [0]},
)
self.run_test(
torch.jit.script(ArangeModel()),
x,
additional_test_inputs=[y],
input_names=["input_1"],
output_names=["output_1", "output_2", "output_3"],
dynamic_axes={"input_1": [0], "output_1": [0]},
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_dynamic_arange_out(self):
class ArangeOutModel(torch.nn.Module):
def forward(self, end):
out_t = torch.tensor([1], dtype=torch.int64)
return torch.arange(end, out=out_t)
x = torch.tensor(8)
self.run_test(ArangeOutModel(), (x))
@skipIfUnsupportedMinOpsetVersion(9)
def test_dynamic_arange_start_out(self):
class ArangeStartOutModel(torch.nn.Module):
def forward(self, start, end):
out_t = torch.tensor([1], dtype=torch.int64)
return torch.arange(start.size(0), end, out=out_t)
x = torch.randn(2, 3, 4)
y = torch.tensor(8)
self.run_test(
ArangeStartOutModel(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeStartOutModel(), (x, y), remained_onnx_input_idx=[1])
@skipIfUnsupportedMinOpsetVersion(9)
def test_linspace(self):
class LinspaceModel(torch.nn.Module):
def forward(self, start, end, steps):
return torch.linspace(start, end, steps)
x = torch.tensor(3, dtype=torch.float)
y = torch.tensor(10, dtype=torch.float)
z = torch.tensor(5, dtype=torch.int)
self.run_test(LinspaceModel(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(9)
def test_linspace_negative_start(self):
class LinspaceModel(torch.nn.Module):
def forward(self, start, end, steps):
return torch.linspace(start, end, steps)
x = torch.tensor(-1, dtype=torch.float)
y = torch.tensor(1, dtype=torch.float)
z = torch.tensor(6, dtype=torch.int)
self.run_test(LinspaceModel(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_with_floats_out(self):
class ArangeModelEnd(torch.nn.Module):
def forward(self, end):
out_t = torch.tensor([1], dtype=torch.float)
return torch.arange(end, out=out_t)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeModelEnd(), (y))
class ArangeModelStep(torch.nn.Module):
def forward(self, start, end):
out_t = torch.tensor([1], dtype=torch.float)
return torch.arange(start.size(0), end, 1.5, out=out_t)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(
ArangeModelStep(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeModelStep(), (x, y), remained_onnx_input_idx=[1])
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_with_floats(self):
class ArangeModelEnd(torch.nn.Module):
def forward(self, end):
return torch.arange(end)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeModelEnd(), (y))
class ArangeModelStep(torch.nn.Module):
def forward(self, start, end):
return torch.arange(start.size(0), end, 1.5)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(
ArangeModelStep(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeModelStep(), (x, y), remained_onnx_input_idx=[1])
class ArangeModelStepNeg(torch.nn.Module):
def forward(self, start, end):
return torch.arange(end, start.size(0), -1.5)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(
ArangeModelStepNeg(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeModelStepNeg(), (x, y), remained_onnx_input_idx=[1])
class ArangeModelStart(torch.nn.Module):
def forward(self, start, end):
return torch.arange(start.size(0), end)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(
ArangeModelStart(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeModelStart(), (x, y), remained_onnx_input_idx=[1])
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_with_floats_override(self):
class ArangeModelEnd(torch.nn.Module):
def forward(self, end):
return torch.arange(end, dtype=torch.int64)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeModelEnd(), (y))
class ArangeModelStep(torch.nn.Module):
def forward(self, start, end):
return torch.arange(start.size(0), end, 1.5, dtype=torch.int64)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(
ArangeModelStep(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeModelStep(), (x, y), remained_onnx_input_idx=[1])
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_out(self):
class ArangeOutModel(torch.nn.Module):
def forward(self, end):
out_t = torch.tensor([1], dtype=torch.float)
return torch.arange(end, out=out_t)
x = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeOutModel(), (x))
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_start_out(self):
class ArangeStartOutModel(torch.nn.Module):
def forward(self, start, end):
out_t = torch.tensor([1], dtype=torch.float)
return torch.arange(start.size(0), end, out=out_t)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(
ArangeStartOutModel(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeStartOutModel(), (x, y), remained_onnx_input_idx=[1])
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_no_type(self):
class ArangeModel(torch.nn.Module):
def forward(self, end):
return torch.arange(end), torch.arange(0, end)
x = torch.tensor(6.2, dtype=torch.float)
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_size(self):
class SizeModel(torch.nn.Module):
def forward(self, input):
return (
torch.arange(input.size(0)),
torch.arange(input.size(-1)),
torch.ones(input.shape),
)
x = torch.randn(5, 3, 2)
self.run_test(SizeModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(SizeModel(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
@skipScriptTest() # x.stride() not scriptable
def test_as_strided(self):
class Model(torch.nn.Module):
def forward(self, x):
chunk_size = list(x.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(x.stride())
chunk_stride[1] = chunk_stride[1] // 2
return x.as_strided(
(3, 3, 3), (1, 4, 2), storage_offset=2
), x.as_strided(chunk_size, chunk_stride)
x = torch.randn(5, 8, 7)
self.run_test(Model(), x)
@skipScriptTest() # Ellipses followed by tensor indexing not scriptable
def test_tensor_index_advanced_indexing_ellipsis(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[..., torch.tensor([2, 1]), torch.tensor([0, 3])]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), (m1,))
def test_tensor_index_advanced_indexing(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[
:,
torch.tensor([[0, 2], [1, 1]]),
:,
torch.tensor([2, 1]),
torch.tensor([0, 3]),
]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), (m1,))
class MyModel(torch.nn.Module):
def forward(self, input):
return input[
:, torch.tensor([0, 2]), None, 2:4, torch.tensor([[1, 3], [4, 0]])
]
self.run_test(MyModel(), (m1,))
class MyModel(torch.nn.Module):
def forward(self, input):
return input[
:,
torch.tensor([0, 2]),
torch.tensor([1]),
2:4,
torch.tensor([[1], [4]]),
]
self.run_test(MyModel(), (m1,))
def test_tensor_index_advanced_indexing_consecutive(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[
:, torch.tensor([0, 2]), torch.tensor([[1, 3], [4, 0]]), None
]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), (m1,))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, ind, update):
x[ind] = update
return x
x = torch.randn(3, 4)
ind = torch.tensor([1], dtype=torch.long)
update = torch.ones(4)
self.run_test(IndexPutModel(), (x, ind, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_singular(self):
class IndexPutBoolModel(torch.nn.Module):
def forward(self, mask, indices):
mask[indices] = True
return mask
mask = torch.zeros(100, dtype=torch.bool)
indices = (torch.rand(25) * mask.shape[0]).to(torch.int64)
self.run_test(IndexPutBoolModel(), (mask, indices))
class IndexPutFloatModel(torch.nn.Module):
def forward(self, mask, indices):
mask[indices] = torch.tensor(5.5)
return mask
mask = torch.rand(100, dtype=torch.float)
indices = (torch.rand(50) * mask.shape[0]).to(torch.int64)
self.run_test(IndexPutFloatModel(), (mask, indices))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_accumulate(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, ind, update):
return x.index_put((ind,), update, accumulate=True)
x = torch.randn(3, 4)
ind = torch.tensor([2], dtype=torch.long)
update = torch.ones(4)
self.run_test(IndexPutModel(), (x, ind, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_slice_index(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, update):
x[1:2, 1:3, torch.tensor([1])] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(1, 2, 1)
self.run_test(IndexPutModel(), (x, update))
class IndexPutModel2(torch.nn.Module):
def forward(self, x, update):
x[torch.tensor([0, 2]), torch.tensor([1, 2])] += update
return x
x = torch.randn(3, 4, 5)
update = torch.randn(2, 5)
self.run_test(IndexPutModel2(), (x, update))
class IndexPutModel3(torch.nn.Module):
def forward(self, x, update):
x[torch.tensor([0, 2]), 1:2] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(2, 1, 1)
self.run_test(IndexPutModel3(), (x, update))
class IndexPutModel4(torch.nn.Module):
def forward(self, x, update):
x[torch.tensor([0, 2]), 2] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(2, 1)
self.run_test(IndexPutModel4(), (x, update))
class IndexPutModel5(torch.nn.Module):
def forward(self, x, update):
x[1:3, torch.tensor([0, 2]), 2] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(2, 1)
self.run_test(IndexPutModel5(), (x, update))
class IndexPutModel6(torch.nn.Module):
def forward(self, x, update):
x[1:3, 0] = update
return x
x = torch.randn(3, 4, 5)
update = torch.arange(2 * 5).to(torch.float).view(2, 5)
self.run_test(IndexPutModel6(), (x, update))
class IndexPutModel7(torch.nn.Module):
def forward(self, x, update):
x[1:, 0] = update
return x
x = torch.randn(3, 4, 5)
update = torch.arange(2 * 5).to(torch.float).view(2, 5)
self.run_test(IndexPutModel7(), (x, update))
class IndexPutModel8(torch.nn.Module):
def forward(self, x, update):
x[:3, 0] = update
return x
x = torch.randn(3, 4, 5)
update = torch.arange(3 * 5).to(torch.float).view(3, 5)
self.run_test(IndexPutModel8(), (x, update))
class IndexPutModel9(torch.nn.Module):
def forward(self, poses):
w = 32
x = poses[:, :, 0] - (w - 1) // 2
boxes = torch.zeros([poses.shape[0], 17, 4])
boxes[:, :, 0] = x
return boxes
x = torch.zeros([2, 17, 3], dtype=torch.int64)
self.run_test(IndexPutModel9(), (x,))
class IndexPutModel10(torch.nn.Module):
def forward(self, x, ind, update):
x[ind, 1:3] = update.view(1, 1, 1, 5).expand(2, 2, 2, 5)
return x
x = torch.randn(3, 4, 5)
ind = torch.tensor([[0, 2], [1, 1]])
update = torch.randn(5)
self.run_test(IndexPutModel10(), (x, ind, update))
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest() # Ellipses followed by tensor indexing not scriptable
def test_index_put_ellipsis(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, update):
x[..., torch.tensor([2, 1, 3]), 2:4] += update
return x
x = torch.randn(3, 4, 5, 6, 7)
update = torch.randn(3, 1, 1, 3, 2)
self.run_test(IndexPutModel(), (x, update))
class IndexPutModel2(torch.nn.Module):
def forward(self, x, update):
x[2, ..., torch.tensor([2, 1, 3]), 2:4] += update
return x
x = torch.randn(3, 4, 5, 6, 7)
update = torch.randn(4, 1, 3, 2)
self.run_test(IndexPutModel2(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_loop(self):
@torch.jit.script
def ngram_attention_bias(
sequence_length: int, ngram: int, device: torch.device, dtype: torch.dtype
):
bias = torch.ones(
(ngram, sequence_length), device=device, dtype=dtype
) * float("-inf")
for stream_idx in range(ngram):
for i in range(sequence_length):
bias = bias * 2
bias[stream_idx, i] = 5
bias = bias * 5
bias[0, 0] = 5
for stream_idx in range(ngram):
for i in range(sequence_length):
bias[stream_idx, i] = 5
bias[0, i] = 5
return bias
class ScriptModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.ngram = 2
self.max_target_positions = 512
def forward(self, hidden_states):
seq_length, batch_size = hidden_states.shape[:2]
predict_causal_mask = ngram_attention_bias(
self.max_target_positions,
self.ngram,
hidden_states.device,
hidden_states.dtype,
)
predict_causal_mask = predict_causal_mask[:, :seq_length]
return predict_causal_mask
x = torch.randn(6, 2)
y = torch.randn(4, 1)
self.run_test(
ScriptModel(),
x,
input_names=["x"],
dynamic_axes={"x": {0: "seq_length", 1: "batch_size"}},
additional_test_inputs=[y],
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_copy_(self):
class CopyModel(torch.nn.Module):
def forward(self, x, data):
x[1:3] = data
return x
x = torch.randn(3, 4)
update = torch.randn(2, 4)
self.run_test(CopyModel(), (x, update))
# mixed slice and select
class CopyModel2(torch.nn.Module):
def forward(self, x, data):
x[1:3, 0] = data
return x
x = torch.randn(3, 4)
update = torch.tensor([0], dtype=torch.float32)
self.run_test(CopyModel2(), (x, update))
update = torch.tensor([2, 3], dtype=torch.float32)
self.run_test(CopyModel2(), (x, update))
update = torch.randn(2)
self.run_test(CopyModel2(), (x, update))
class CopyModel3(torch.nn.Module):
def forward(self, x, data):
x[1, 1:3] = data
return x
x = torch.randn(3, 4)
update = torch.tensor([0], dtype=torch.float32)
self.run_test(CopyModel3(), (x, update))
update = torch.tensor([2, 3], dtype=torch.float32)
self.run_test(CopyModel3(), (x, update))
update = torch.randn(2)
self.run_test(CopyModel3(), (x, update))
class CopyModel4(torch.nn.Module):
def forward(self, x, ind, data):
x[ind] = data
return x
x = torch.randn(3, 4)
ind = torch.tensor(2)
data = torch.randn(4)
self.run_test(CopyModel4(), (x, ind, data))
class CopyModel5(torch.nn.Module):
def forward(self, x, mask):
if mask is not None:
x.copy_(mask)
return x
x = torch.randn(3, 4)
mask = torch.randn(3, 1)
self.run_test(CopyModel5(), (x, mask))
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest() # Model not scriptable (output with shape doesn't match the broadcast shape)
def test_copy_tracing(self):
class CopyModel(torch.nn.Module):
def forward(self, x, data):
x[1, 1:3] = data
return x
x = torch.randn(3, 4)
update = torch.randn(1, 2)
self.run_test(CopyModel(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_copy_ellipsis(self):
class CopyModel(torch.nn.Module):
def forward(self, x, update):
x[..., 1] = update
return x
x = torch.randn(2, 3, 4)
update = torch.ones(1)
self.run_test(CopyModel(), (x, update))
x = torch.randn(2, 3, 4, 5, 6)
update = torch.ones(1)
self.run_test(CopyModel(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_copy_ellipsis_script(self):
class CopyModel(torch.nn.Module):
def forward(self, x, update):
# Insert reshape node to ensure no shape/type info for
# x in scripting, without onnx shape inference.
x = x.reshape(4, 3, 5, 6)
x[2, ..., 1:3] = update
return x
x = torch.randn(3, 4, 5, 6)
update = torch.ones(1)
self.run_test(CopyModel(), (x, update))
@skipIfUnsupportedMinOpsetVersion(10)
def test_flip(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.flip(x, dims=[0])
x = torch.tensor(np.arange(6.0).reshape(2, 3))
self.run_test(MyModule(), x)
def test_randn(self):
class RandN(torch.nn.Module):
def forward(self, x):
return torch.mul(x, (torch.randn(2, 3, 4) + x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandN(), x)
def test_rand(self):
class Rand(torch.nn.Module):
def forward(self, x):
return torch.mul(x, (torch.rand(2, 3, 4) + x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(Rand(), x)
def test_randn_dtype(self):
class RandN(torch.nn.Module):
def forward(self, x):
# The resulting node's dtype should be double.
return (
x.to(torch.float32)
* torch.randn(2, 3, 4, dtype=torch.double)
* torch.tensor(0, dtype=torch.float32)
)
x = torch.randn(2, 3, 4)
self.run_test(RandN(), x)
def test_rand_dtype(self):
class Rand(torch.nn.Module):
def forward(self, x):
# The resulting node's dtype should be double.
return (
x.to(torch.float32)
* torch.rand(2, 3, 4, dtype=torch.double)
* torch.tensor(0, dtype=torch.float32)
)
x = torch.randn(2, 3, 4)
self.run_test(Rand(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_randn_dynamic_size(self):
class RandN(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.randn(x.size()).size(1))
x = torch.randn(2, 3, 4)
self.run_test(RandN(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_rand_dynamic_size(self):
class Rand(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.rand(x.size()).size(1))
x = torch.randn(2, 3, 4)
self.run_test(Rand(), x)
def test_randn_like(self):
class RandNLike(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.randn_like(x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandNLike(), x)
self.run_test(torch.jit.script(RandNLike()), x)
def test_rand_like(self):
class RandLike(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.rand_like(x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandLike(), x)
self.run_test(torch.jit.script(RandLike()), x)
def test_randn_like_dtype(self):
class RandNLike(torch.nn.Module):
def forward(self, x):
# The resulting node's dtype should be double.
return (
x.to(torch.float32)
* torch.randn_like(x, dtype=torch.double)
* torch.tensor(0, dtype=torch.float32)
)
x = torch.randn(2, 3, 4)
self.run_test(RandNLike(), x)
def test_rand_like_dtype(self):
class RandLike(torch.nn.Module):
def forward(self, x):
# The resulting node's dtype should be double.
return (
x.to(torch.float32)
* torch.rand_like(x, dtype=torch.double)
* torch.tensor(0, dtype=torch.float32)
)
x = torch.randn(2, 3, 4)
self.run_test(RandLike(), x)
def test_bernoulli(self):
class Bernoulli(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.bernoulli(x).size(0))
x = torch.empty(3, 3).uniform_(0, 1)
self.run_test(Bernoulli(), x)
x = torch.empty(2, 3, 3, dtype=torch.double).uniform_(0, 1)
self.run_test(Bernoulli(), x)
@unittest.skip("Bug in ORT, skip test until rel-1.11.")
@skipIfUnsupportedMinOpsetVersion(14)
def test_reshape_allowzero(self):
class ReshapeModel(torch.nn.Module):
def forward(self, x):
x = x.reshape(3, 4, 0)
return x
x = torch.randn(0, 3, 4)
self.run_test(ReshapeModel(), x)
def test_reshape_different_rank(self):
class ReshapeModel(torch.nn.Module):
def forward(self, x):
x = x.reshape(-1, 2, 4, 4, 5, 5)
return x
x = torch.randn(1, 32, 5, 5)
self.run_test(ReshapeModel(), x)
def _interpolate(self, x, mode, use_size, is_upsample, align_corners=False):
class MyModel(torch.nn.Module):
__constants__ = [
"mode",
"use_size",
"is_upsample",
"size",
"scale",
"size_array",
"scale_array",
"align_corners",
]
def __init__(self, mode, use_size, is_upsample, align_corners):
super().__init__()
self.mode = mode
self.use_size = use_size
self.is_upsample = is_upsample
self.align_corners = align_corners
self.scale = 2.0 if self.is_upsample else 0.5
self.size = 24 if self.is_upsample else 2
if x.dim() == 3:
self.scale_array = [2.3]
self.size_array = [16]
elif x.dim() == 4:
self.scale_array = [2.3, 3.1]
self.size_array = [16, 32]
else:
self.scale_array = [2.3, 3.1, 4.6]
self.size_array = [16, 32, 64]
def forward(self, x):
if self.use_size:
if self.align_corners:
return torch.nn.functional.interpolate(
x, mode=self.mode, size=self.size, align_corners=True
), torch.nn.functional.interpolate(
x, mode=self.mode, size=self.size_array, align_corners=True
)
return torch.nn.functional.interpolate(
x, mode=self.mode, size=self.size
), torch.nn.functional.interpolate(
x, mode=self.mode, size=self.size_array
)
if self.align_corners:
return torch.nn.functional.interpolate(
x,
mode=self.mode,
scale_factor=self.scale,
recompute_scale_factor=False,
), torch.nn.functional.interpolate(
x,
mode=self.mode,
scale_factor=self.scale_array,
recompute_scale_factor=False,
)
return torch.nn.functional.interpolate(
x,
mode=self.mode,
scale_factor=self.scale,
recompute_scale_factor=False,
), torch.nn.functional.interpolate(
x,
mode=self.mode,
scale_factor=self.scale_array,
recompute_scale_factor=False,
)
model = MyModel(mode, use_size, is_upsample, align_corners)
self.run_test(model, x, atol=1e-6)
def _interpolate_tests(self, is_upsample):
# - cubic mode is not supported for opsets below 11;
# - linear mode does not match for opsets below 11;
modes = ["nearest", "linear", "bicubic"]
if self.opset_version < 11:
modes = ["nearest"]
x = [
torch.randn(1, 2, 6, requires_grad=True),
torch.randn(1, 2, 4, 6, requires_grad=True),
torch.randn(1, 2, 4, 4, 6, requires_grad=True),
]
for mode in modes:
for xi in x:
mode_i = mode
# TODO: enable bicubic downsample when ORT precision loss fixed
if mode == "bicubic" and xi.dim() != 4:
continue
elif mode == "linear":
if xi.dim() == 3:
# TODO : enable when linear mode is implemented for 1d inputs in ORT
continue
elif xi.dim() == 4:
mode_i = "bilinear"
elif xi.dim() == 5:
# TODO : enable when linear mode is implemented for 3d inputs in ORT
mode_i = "trilinear"
continue
self._interpolate(xi, mode_i, True, is_upsample)
# test with align_corners if supported
if mode != "nearest":
self._interpolate(xi, mode_i, True, is_upsample, True)
# the following cases, require dynamic sizes/scales,
# which which is not supported for opset_version < 9
if self.opset_version >= 9:
self._interpolate(xi, mode_i, True, is_upsample)
# test with align_corners if supported
if mode != "nearest":
self._interpolate(xi, mode_i, False, is_upsample, True)
self._interpolate(xi, mode_i, False, is_upsample)
# ONNX export failed on interpolate scripting because dynamic size not supported for opsets below 9.
@skipIfUnsupportedMinOpsetVersion(9)
def test_interpolate_upsample(self):
self._interpolate_tests(True)
@skipIfUnsupportedMaxOpsetVersion(8)
@skipScriptTest() # Scripting supported for opsets > 8. See test_interpolate_upsample
def test_interpolate_upsample_trace(self):
self._interpolate_tests(True)
@skipIfUnsupportedMinOpsetVersion(9)
def test_interpolate_function_substitution(self):
class ScriptModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.nn.functional.interpolate(
x, mode="nearest", scale_factor=2.0
)
class ScriptModule(torch.jit.ScriptModule):
def __init__(self):
super().__init__()
self.submodule = ScriptModel()
@torch.jit.script_method
def forward(self, input):
return self.submodule(input)
x = torch.randn(1, 2, 4, 4, 6)
self.run_test(ScriptModule(), (x,))
@torch.jit.script
def script_method(x):
return torch.nn.functional.interpolate(x, mode="nearest", scale_factor=2.0)
class TracingModule(torch.nn.Module):
def forward(self, x):
return script_method(x)
self.run_test(TracingModule(), (x,))
@skipIfUnsupportedMinOpsetVersion(10)
def test_interpolate_downsample(self):
self._interpolate_tests(False)
@skipIfUnsupportedMinOpsetVersion(11)
def test_interpolate_half_pixel(self):
# testing whether it uses "half_pixel" or "pytorch_half_pixel"
# see https://github.com/onnx/onnx/blob/main/docs/Operators.md#Resize
class MyModel(torch.nn.Module):
def __init__(self, mode, size):
super().__init__()
self.mode = mode
self.size = size
def forward(self, x):
return torch.nn.functional.interpolate(
x, mode=self.mode, size=self.size
)
modes = ["linear", "bicubic"]
x = [
torch.randn(1, 2, 6, requires_grad=True),
torch.randn(1, 2, 4, 6, requires_grad=True),
torch.randn(1, 2, 4, 4, 6, requires_grad=True),
]
for mode in modes:
for xi in x:
mode_i = mode
if mode == "bicubic" and xi.dim() != 4:
continue
elif mode == "linear":
if xi.dim() == 4:
mode_i = "bilinear"
elif xi.dim() == 5:
mode_i = "trilinear"
for i in range(xi.dim() - 2):
size = list(xi.shape[2:])
size[i] = 1
self.run_test(MyModel(mode_i, size), xi)
@skipIfUnsupportedMinOpsetVersion(11)
def test_interpolate_no_shape(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
x = torch.add(x, x)
out1 = torch.nn.functional.interpolate(
x, mode="bilinear", size=(16, 16), align_corners=False
)
out2 = torch.nn.functional.interpolate(
x, mode="nearest", size=(int(y.size(0)), int(y.size(1)))
)
return out1, out2
x = torch.randn(1, 2, 4, 4, requires_grad=True)
y = torch.randn(16, 16, requires_grad=True)
self.run_test(
MyModel(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2, 3], "y": [0, 1]},
)
self.run_test(MyModel(), (x, y), remained_onnx_input_idx=[0])
@skipScriptTest() # scripting raises OnnxRuntimeError
def test_interpolate_adaptive_pooling_error(self):
x = torch.randn(1, 2, 6, requires_grad=True)
with self.assertRaises(RuntimeError) as cm:
self._interpolate(x, "area", True, True)
with self.assertRaises(RuntimeError) as cm:
self._interpolate(x, "area", False, True)
def test_groupnorm(self):
model = torch.nn.GroupNorm(3, 6, 0.002)
x = torch.randn(4, 6, 36, 36, 18)
self.run_test(model, x)
model = torch.nn.GroupNorm(1, 6, 0.002)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
model = torch.nn.GroupNorm(6, 6, 0.002)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
def test_groupnorm_noaffine(self):
model = torch.nn.GroupNorm(4, 8, 0.002, affine=False)
x = torch.randn(3, 8, 224, 224)
self.run_test(model, x)
model = torch.nn.GroupNorm(1, 6, 0.002, affine=False)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
model = torch.nn.GroupNorm(6, 6, 0.002, affine=False)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_list_unpack_scripted(self):
class ListUnpack(torch.nn.Module):
def forward(self, x):
a, b = x.shape
return x.new_zeros((a, b))
x = torch.randn(2, 3)
self.run_test(
torch.jit.script(ListUnpack()),
x,
input_names=["x"],
dynamic_axes={"x": [0, 1]},
)
self.run_test(torch.jit.script(ListUnpack()), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_list_unpack_scripted_runs_without_error_with_constructed_list_as_input(
self,
):
class PackUnpack(torch.nn.Module):
"""Create and unpack a list of tensors.
When scripted, it should produce a graph similar to
```
graph(%self : __torch__.PackUnpack,
%a.1 : Tensor,
%b.1 : Tensor):
%packed.1 : Tensor[] = prim::ListConstruct(%a.1, %b.1)
%c.1 : Tensor, %8 : Tensor = prim::ListUnpack(%packed.1)
return (%c.1)
```
"""
def forward(self, a, b):
packed = [a, b]
c, _ = packed
return c
self.run_test(
torch.jit.script(PackUnpack()),
(torch.tensor(0), torch.tensor([42])),
remained_onnx_input_idx=[0],
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_list_unpack_slice_scripted(self):
class ListUnpackSlice(torch.nn.Module):
def forward(self, x):
a, b = x.shape[2:]
return x.new_zeros((a, b))
x = torch.randn(2, 3, 4, 5)
self.run_test(
torch.jit.script(ListUnpackSlice()),
x,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2, 3]},
)
self.run_test(
torch.jit.script(ListUnpackSlice()), x, remained_onnx_input_idx=[]
)
@skipDtypeChecking
def test_pow(self):
class PowModule(torch.nn.Module):
def forward(self, x, y):
return x.pow(y)
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
self.run_test(PowModule(), (x, y))
x = torch.randint(10, (2, 3, 4))
y = torch.randint(10, (2, 3, 4)).to(dtype=torch.int32)
self.run_test(PowModule(), (x, y))
x = torch.randint(10, (2, 3, 4))
y = torch.randint(10, (2, 3, 4))
self.run_test(PowModule(), (x, y))
x = torch.randn(2, 3, 4).to(dtype=torch.float64)
y = torch.randint(10, (2, 3, 4))
self.run_test(PowModule(), (x, y))
class PowModule2(torch.nn.Module):
def forward(self, x):
return torch.pow(2, x)
x = torch.randn(1, 10)
self.run_test(PowModule2(), (x,))
x = torch.randint(10, (2, 3, 4))
self.run_test(PowModule2(), (x,))
x = torch.randn(1, 10).to(dtype=torch.float64)
self.run_test(PowModule2(), (x,))
class PowModule3(torch.nn.Module):
def forward(self, x, y):
return y[torch.pow(2, x)]
x = torch.randint(5, (2, 3, 4))
y = torch.rand(100)
self.run_test(PowModule3(), (x, y))
# the arithmeticOps(Add\Sub\Mul\Div\Gemm\Pow\Mod) with low precision include unit8 will be failed in ORT
# add to(dtype=torch.long) to avoid ORT output type does not match expected type.
# will be fixed in ONNX version 14.
@skipIfUnsupportedMaxOpsetVersion(13)
@skipDtypeChecking
def test_arithmeticOps_with_low_precision(self):
class AddModule(torch.nn.Module):
def forward(self, x, y):
return x + y
class SubModule(torch.nn.Module):
def forward(self, x, y):
return x - y
class MulModule(torch.nn.Module):
def forward(self, x, y):
return x * y
class DivModule(torch.nn.Module):
def forward(self, x, y):
return x / y
class PowModule(torch.nn.Module):
def forward(self, x, y):
return x.pow(y)
x = torch.tensor([2, 3, 5], dtype=torch.uint8)
y = torch.tensor([2, 3, 5], dtype=torch.uint8)
z = torch.tensor([1], dtype=torch.uint8)
self.run_test(AddModule(), (x, y))
self.run_test(SubModule(), (x, y))
self.run_test(MulModule(), (x, y))
self.run_test(DivModule(), (x, y))
self.run_test(PowModule(), (x, z))
x = torch.tensor([2, 3, 5], dtype=torch.int8)
y = torch.tensor([2, 3, 5], dtype=torch.int8)
z = torch.tensor([1], dtype=torch.int8)
self.run_test(AddModule(), (x, y))
self.run_test(SubModule(), (x, y))
self.run_test(MulModule(), (x, y))
self.run_test(DivModule(), (x, y))
self.run_test(PowModule(), (x, z))
x = torch.tensor([2, 3, 5], dtype=torch.int16)
y = torch.tensor([2, 3, 5], dtype=torch.int16)
z = torch.tensor([1], dtype=torch.int16)
self.run_test(AddModule(), (x, y))
self.run_test(SubModule(), (x, y))
self.run_test(MulModule(), (x, y))
self.run_test(DivModule(), (x, y))
self.run_test(PowModule(), (x, z))
x = torch.tensor([2, 3, 5], dtype=torch.uint8)
y = torch.tensor([2, 3, 5], dtype=torch.float32)
z = torch.tensor([1], dtype=torch.float64)
self.run_test(AddModule(), (x, y))
self.run_test(SubModule(), (x, y))
self.run_test(MulModule(), (x, y))
self.run_test(DivModule(), (x, y))
self.run_test(PowModule(), (x, z))
x = torch.tensor([2, 3, 5], dtype=torch.uint8)
y = torch.tensor([2, 3, 5], dtype=torch.int64)
z = torch.tensor([1], dtype=torch.int32)
self.run_test(AddModule(), (x, y))
self.run_test(SubModule(), (x, y))
self.run_test(MulModule(), (x, y))
self.run_test(DivModule(), (x, y))
self.run_test(PowModule(), (x, z))
def test_mul_bool(self):
class MyModel(torch.nn.Module):
def forward(self, x, y):
return torch.mul(x, y)
x_t = torch.tensor([True, False, True, False])
y_t = torch.tensor([True, True, False, False])
z_t = torch.tensor([1.0, 2.0, 3.0, 0.0])
self.run_test(MyModel(), (x_t, y_t))
self.run_test(MyModel(), (x_t, z_t))
self.run_test(MyModel(), (z_t, y_t))
# fmod was added in version 10
@skipIfUnsupportedMinOpsetVersion(10)
@skipIfUnsupportedMaxOpsetVersion(13)
def test_mod_with_low_precision(self):
class ModModule(torch.nn.Module):
def forward(self, x, y):
return torch.fmod(x, y).to(dtype=torch.long)
x = torch.tensor([2, 3, 5], dtype=torch.uint8)
y = torch.tensor([2, 3, 5], dtype=torch.uint8)
self.run_test(ModModule(), (x, y))
x = torch.tensor([2, 3, 5], dtype=torch.int8)
y = torch.tensor([2, 3, 5], dtype=torch.int8)
self.run_test(ModModule(), (x, y))
x = torch.tensor([2, 3, 5], dtype=torch.int16)
y = torch.tensor([2, 3, 5], dtype=torch.int16)
self.run_test(ModModule(), (x, y))
x = torch.tensor([2, 3, 5], dtype=torch.uint8)
y = torch.tensor([2, 3, 5], dtype=torch.int32)
self.run_test(ModModule(), (x, y))
x = torch.tensor([2, 3, 5], dtype=torch.uint8)
y = torch.tensor([2, 3, 5], dtype=torch.float64)
self.run_test(ModModule(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_empty_constant_shape(self):
class Zeros(torch.nn.Module):
def forward(self, x):
y = torch.zeros(())
y += x
return y
x = torch.tensor(42.0)
self.run_test(Zeros(), x)
class Ones(torch.nn.Module):
def forward(self, x):
y = torch.ones(())
y += x
return y
x = torch.tensor(42.0)
self.run_test(Ones(), x)
class Full(torch.nn.Module):
def forward(self, x):
y = torch.full((), 1.0)
y += x
return y
x = torch.tensor(42.0)
self.run_test(Full(), x)
class Empty(torch.nn.Module):
def forward(self, x):
y = torch.empty(()).fill_(0)
y += x
return y
x = torch.tensor(42.0)
self.run_test(Empty(), x)
def test_std(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, unbiased=False)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
class StandardDeviationUnbiased(torch.nn.Module):
def forward(self, input):
return torch.std(input, unbiased=True)
model = StandardDeviationUnbiased()
self.run_test(model, x)
def test_std_along_dims(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=False)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
class StandardDeviationUnbiased(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=True)
x = torch.randn(2, 3, 4)
model = StandardDeviationUnbiased()
self.run_test(model, x)
def test_std_keepdim(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=False, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
class StandardDeviationUnbiased(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=True, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviationUnbiased()
self.run_test(model, x)
def test_std_correction(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), correction=3, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
def test_var(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var(input, unbiased=False)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.var(input, unbiased=True)
model = VarianceUnbiased()
self.run_test(model, x)
class VarianceSqrt(torch.nn.Module):
def forward(self, input):
y = torch.var(input, 1)
return torch.sqrt(y + 1e-8)
x = torch.randn(1, 2, 3, 300, 300)
model = VarianceSqrt()
self.run_test(model, x)
def test_var_along_dims(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var(input, dim=(0, 1), unbiased=False)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.var(input, dim=(0, 1), unbiased=True)
x = torch.randn(2, 3, 4)
model = VarianceUnbiased()
self.run_test(model, x)
def test_var_keepdim(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var(input, dim=(0, 1), unbiased=False, keepdim=True)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.var(input, dim=(0, 1), unbiased=True, keepdim=True)
x = torch.randn(2, 3, 4)
model = VarianceUnbiased()
self.run_test(model, x)
def test_var_correction(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var(input, dim=(0, 1), correction=3, keepdim=True)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
def test_var_mean(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, unbiased=False)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, unbiased=True)
model = VarianceUnbiased()
self.run_test(model, x)
def test_var_mean_along_dims(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 1), unbiased=False)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 1), unbiased=True)
x = torch.randn(2, 3, 4)
model = VarianceUnbiased()
self.run_test(model, x)
def test_var_mean_mixed_dims(self):
class ReverseDims(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(2, 1), unbiased=False)
x = torch.randn(2, 3, 4)
model = ReverseDims()
self.run_test(model, x)
class SkipDims(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 2), unbiased=False)
x = torch.randn(2, 3, 4)
model = SkipDims()
self.run_test(model, x)
class NonZeroDims(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(1, 2), unbiased=False)
x = torch.randn(2, 3, 4)
model = NonZeroDims()
self.run_test(model, x)
def test_var_mean_keepdim(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 1), unbiased=False, keepdim=True)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 1), unbiased=True, keepdim=True)
x = torch.randn(2, 3, 4)
model = VarianceUnbiased()
self.run_test(model, x)
def test_var_mean_correction(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 1), correction=3, keepdim=True)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
def test_std_mean(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std_mean(input, unbiased=False)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
class StandardDeviationUnbiased(torch.nn.Module):
def forward(self, input):
return torch.std_mean(input, unbiased=True)
model = StandardDeviationUnbiased()
self.run_test(model, x)
def test_std_mean_along_dims(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std_mean(input, dim=(0, 1), unbiased=False)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.std_mean(input, dim=(0, 1), unbiased=True)
x = torch.randn(2, 3, 4)
model = VarianceUnbiased()
self.run_test(model, x)
def test_std_mean_keepdim(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std_mean(input, dim=(0, 1), unbiased=False, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
class StandardDeviationUnbiased(torch.nn.Module):
def forward(self, input):
return torch.std_mean(input, dim=(0, 1), unbiased=True, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviationUnbiased()
self.run_test(model, x)
def test_std_mean_correction(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 1), correction=3, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
def test_bitshift(self):
class BitshiftModel(torch.nn.Module):
def forward(self, input):
return (
input >> 1,
input << 3,
input >> torch.tensor([1, 2]),
input << 4,
)
input = torch.arange(24, dtype=torch.int64).reshape(3, 4, 2)
self.run_test(BitshiftModel(), input)
# uint8 not implemented in ORT for Mul used in
# exporting bitshift for opset_version < 10
@skipIfUnsupportedMinOpsetVersion(11)
def test_bitshift_uint8(self):
class BitshiftModel(torch.nn.Module):
def forward(self, input, input2):
return (
input >> 1,
input << 3,
input2 >> torch.tensor([1, 2], dtype=torch.uint8),
input2 << 4,
)
input = torch.arange(24, dtype=torch.uint8).reshape(3, 4, 2)
input2 = torch.arange(24, dtype=torch.uint8).reshape(3, 4, 2)
self.run_test(BitshiftModel(), (input, input2))
def test_narrow(self):
class NarrowModel(torch.nn.Module):
def forward(self, input):
return torch.narrow(input, 0, 0, 2)
x = torch.randn(3, 3, requires_grad=True)
self.run_test(NarrowModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_narrow_dynamic(self):
class NarrowModel(torch.nn.Module):
def forward(self, input):
return torch.narrow(input, 0, 0, input.shape[0] - 1)
x = torch.randn(3, 3, requires_grad=True)
self.run_test(NarrowModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_fill(self):
class IndexFillModel(torch.nn.Module):
def forward(self, input):
index = torch.tensor([2, 0])
return input.index_fill(2, index, -1)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(IndexFillModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_copy(self):
class IndexCopyModel(torch.nn.Module):
def forward(self, input):
index = torch.tensor([2, 0])
source = torch.ones(3, 2, 5)
return input.index_copy(1, index, source)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(IndexCopyModel(), x)
def test_select(self):
class Select(torch.nn.Module):
def forward(self, x):
return x[:, 1]
x = torch.randn(3, 4)
self.run_test(Select(), x)
def test_select_negative_index(self):
class Select(torch.nn.Module):
def forward(self, x):
return x[:, -1]
x = torch.randn(3, 4)
self.run_test(Select(), x)
def test_index_select_constant_scaler_index(self):
class IndexSelectScalerIndexModel(torch.nn.Module):
def forward(self, x):
index = 2
return torch.index_select(x, 1, torch.tensor(index))
x = torch.randn(3, 4)
self.run_test(IndexSelectScalerIndexModel(), x)
def test_index_select_scaler_index(self):
class IndexSelectScalerIndexModel(torch.nn.Module):
def __init__(self, index_base):
super().__init__()
self.index_base = torch.tensor(index_base)
def forward(self, x, index_offset):
index = self.index_base + index_offset
return torch.index_select(x, 1, index)
x = torch.randn(3, 4)
offset = 2
index_offset = torch.tensor(offset)
base = 1
self.run_test(IndexSelectScalerIndexModel(base), (x, index_offset))
def test_take(self):
class TakeModel(torch.nn.Module):
def forward(self, x, y):
return torch.take(x, y)
x = torch.randn(6, 4, 3, 3)
y = torch.tensor([4, 1, 7, 15, 63])
self.run_test(TakeModel(), (x, y))
def test_topk(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.topk(x, 3)
x = torch.arange(1.0, 6.0, requires_grad=True)
self.run_test(MyModule(), x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_topk_int32_k(self):
class Model(torch.nn.Module):
def forward(self, x, k):
return torch.topk(x, k)
x = torch.arange(1.0, 6.0)
k = torch.tensor(3, dtype=torch.int32)
self.run_test(Model(), (x, k))
@skipIfUnsupportedMinOpsetVersion(11)
def test_topk_smallest_unsorted(self):
class MyModule(torch.nn.Module):
def forward(self, x, k):
# When sorted=False, order of elements in the outout tensors
# are not expected to match between PyTorch and ORT
topk_unsorted = torch.topk(x, k, largest=False, sorted=False)
topk_sorted = torch.topk(x, k, largest=False, sorted=True)
return topk_sorted, torch.sort(topk_unsorted.values).values
x = torch.arange(1.0, 6.0, requires_grad=True)
k = torch.tensor(3)
self.run_test(MyModule(), (x, k))
@skipIfUnsupportedMinOpsetVersion(10)
def test_topk_script(self):
class MyModuleDynamic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, k):
return torch.topk(x, k)
x = torch.arange(1.0, 6.0, requires_grad=True)
k = torch.tensor(3)
self.run_test(MyModuleDynamic(), [x, k])
@skipScriptTest() # Python builtin apply of FunctionMeta object is currently not supported in Torchscript.
@skipIfUnsupportedMinOpsetVersion(11) # Clip op min is an input since opset 11.
def test_auto_grad(self):
class MyClip(torch.autograd.Function):
@staticmethod
def forward(ctx, input, scalar):
ctx.save_for_backward(input)
return input.clamp(min=scalar)
class MyRelu(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.clamp(min=0)
def symbolic_python_op(
ctx: torch.onnx.SymbolicContext, g: torch._C.Graph, *args, **kwargs
):
n = ctx.cur_node
name = kwargs["name"]
if name == "MyClip":
return g.op("Clip", args[0], args[1], outputs=n.outputsSize())
elif name == "MyRelu":
return g.op("Relu", args[0], outputs=n.outputsSize())
else:
# TODO(justinchuby): Remove reference to internal names in symbolic_helper
return torch.onnx.symbolic_helper._unimplemented(
"prim::PythonOp", "unknown node kind: " + name
)
torch.onnx.register_custom_op_symbolic("prim::PythonOp", symbolic_python_op, 1)
self.addCleanup(torch.onnx.unregister_custom_op_symbolic, "prim::PythonOp", 1)
class MyClipModule(torch.nn.Module):
def forward(self, x, min):
return MyClip.apply(x, min)
x = torch.randn(3, 3)
min = torch.tensor([0.0])
self.run_test(MyClipModule(), (x, min))
class MyReluModule(torch.nn.Module):
def forward(self, x):
return MyRelu.apply(x)
x = torch.randn(3, 3)
self.run_test(MyReluModule(), x)
def test_clip_int(self):
class MyClipInt(torch.nn.Module):
def forward(self, x):
return torch.clamp(x, 0, 1)
self.run_test(MyClipInt(), torch.randn(3, 3).to(torch.int64))
def test_relu_int(self):
self.run_test(torch.nn.ReLU(), torch.randn(3, 3).to(torch.int32))
def test_pad_int(self):
class MyPadInt(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.pad(x, (1, 1))
self.run_test(MyPadInt(), torch.randn(3, 3).to(torch.int32))
def test_min_int(self):
class MyMinInt(torch.nn.Module):
def forward(self, x):
return torch.min(x, x + 1)
self.run_test(MyMinInt(), torch.randn(3, 3).to(torch.int32))
def test_max_int(self):
class MyMaxnInt(torch.nn.Module):
def forward(self, x):
return torch.max(x, x + 1)
self.run_test(MyMaxnInt(), torch.randn(3, 3).to(torch.int32))
@skipIfUnsupportedOpsetVersion([7])
def test_normalize(self):
class Model(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.normalize(x)
x = torch.randn(3, 3)
self.run_test(Model(), x)
def test_layer_norm(self):
model = torch.nn.LayerNorm([10, 10])
x = torch.randn(20, 5, 10, 10)
self.run_test(model, x)
def test_batchnorm1d(self):
x = torch.randn(10, 10)
model = torch.nn.BatchNorm1d(10, affine=True)
self.run_test(model, x)
x = torch.randn(10, 10, 128)
self.run_test(model, x)
def test_batchnorm1d_noaffine(self):
x = torch.randn(10, 10)
model = torch.nn.BatchNorm1d(10, affine=False)
self.run_test(model, x)
x = torch.randn(10, 10, 128)
self.run_test(model, x)
def test_batchnorm1d_norunningstats(self):
x = torch.randn(10, 10)
model = torch.nn.BatchNorm1d(10, track_running_stats=False)
self.run_test(model, x)
x = torch.randn(10, 10, 128)
self.run_test(model, x)
def test_batchnorm2d(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.BatchNorm2d(3, affine=True)
self.run_test(model, x)
def test_batchnorm2d_noaffine(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.BatchNorm2d(3, affine=False)
self.run_test(model, x)
def test_batchnorm2d_norunningstats(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.BatchNorm2d(3, track_running_stats=False)
self.run_test(model, x)
def test_batchnorm3d(self):
x = torch.randn(10, 3, 64, 64, 64)
model = torch.nn.BatchNorm3d(3, affine=True)
self.run_test(model, x)
def test_batchnorm3d_noaffine(self):
x = torch.randn(10, 3, 64, 64, 64)
model = torch.nn.BatchNorm3d(3, affine=False)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(
9
) # Because ConstantOfShape op is not supported for opset < 9
def test_instancenorm1d_runningstats(self):
x = torch.randn(10, 5, 128)
model = torch.nn.InstanceNorm1d(5, affine=True, track_running_stats=True)
self.run_test(model, x)
model = torch.nn.InstanceNorm1d(5, affine=False, track_running_stats=True)
self.run_test(model, x)
def test_instancenorm1d_norunningstats(self):
x = torch.randn(10, 5, 128)
model = torch.nn.InstanceNorm1d(5, affine=True, track_running_stats=False)
self.run_test(model, x)
model = torch.nn.InstanceNorm1d(5, affine=False, track_running_stats=False)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(
9
) # Because ConstantOfShape op is not supported for opset < 9
def test_instancenorm2d_runningstats(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.InstanceNorm2d(3, affine=True, track_running_stats=True)
self.run_test(model, x)
model = torch.nn.InstanceNorm2d(3, affine=False, track_running_stats=True)
self.run_test(model, x)
def test_instancenorm2d_norunningstats(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.InstanceNorm2d(3, affine=True, track_running_stats=False)
self.run_test(model, x)
model = torch.nn.InstanceNorm2d(3, affine=False, track_running_stats=False)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(
9
) # Because ConstantOfShape op is not supported for opset < 9
def test_instancenorm3d_runningstats(self):
x = torch.randn(10, 3, 64, 64, 64)
model = torch.nn.InstanceNorm3d(3, affine=True, track_running_stats=True)
self.run_test(model, x)
model = torch.nn.InstanceNorm3d(3, affine=False, track_running_stats=True)
self.run_test(model, x)
def test_instancenorm3d_norunningstats(self):
x = torch.randn(10, 3, 64, 64, 64)
model = torch.nn.InstanceNorm3d(3, affine=True, track_running_stats=False)
self.run_test(model, x)
model = torch.nn.InstanceNorm3d(3, affine=False, track_running_stats=False)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter_with_scalar(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices):
values = 1.0
return input.scatter(1, indices, values)
input = torch.tensor(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float64
)
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(ScatterModel(), input_args=(input, indices))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter_with_scalar_different_types(self):
# Tests the case when scalar src (updates values) type is different
# from self type. Happens only with scalar src - PyTorch does not
# allow this when src is a tensor.
class ScatterModel(torch.nn.Module):
def forward(self, input, indices):
values = 1.0
return input.scatter(1, indices, values)
input = torch.tensor(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32
)
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(ScatterModel(), input_args=(input, indices))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices, values):
return input.scatter(1, indices, values)
input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_test(ScatterModel(), input_args=(input, indices, values))
input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_test(ScatterModel(), (input, indices, values))
input = torch.zeros(3, 4, 5, 6)
indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)
indices = indices.view(3, 2, 1, 1).expand(3, 2, 5, 6)
values = torch.arange(3 * 2 * 5 * 6, dtype=torch.float32).view(3, 2, 5, 6)
self.run_test(ScatterModel(), (input, indices, values))
input = torch.zeros(3, 4, 2)
indices = torch.tensor([[[1, 0], [0, 2]], [[1, 1], [0, 1]], [[2, 1], [2, 2]]])
values = torch.arange(3 * 2 * 2, dtype=torch.float32).view(3, 2, 2)
self.run_test(ScatterModel(), (input, indices, values))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter_add(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices, values):
return input.scatter_add(1, indices, values)
input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_test(ScatterModel(), input_args=(input, indices, values))
@torch.jit.script
def scatter_sum(src: Tensor, index: Tensor):
size = src.size()
out = torch.zeros(size, dtype=src.dtype)
return out.scatter_add_(1, index, src)
class ScatterModel(torch.nn.Module):
def forward(self, src, index):
return scatter_sum(src, index)
src = torch.rand(3, 2)
index = torch.tensor([[0, 1], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(ScatterModel(), (src, index))
@skipIfUnsupportedMinOpsetVersion(16)
def test_scatter_add_index_not_unique(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices, values):
return input.scatter_add(1, indices, values)
input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[0, 0], [1, 1], [2, 2]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_test(ScatterModel(), input_args=(input, indices, values))
@torch.jit.script
def scatter_sum(src: Tensor, index: Tensor):
size = src.size()
out = torch.zeros(size, dtype=src.dtype)
return out.scatter_add_(1, index, src)
class ScatterModel(torch.nn.Module):
def forward(self, src, index):
return scatter_sum(src, index)
src = torch.rand(3, 2)
index = torch.tensor([[0, 0], [1, 1], [0, 1]], dtype=torch.int64)
self.run_test(ScatterModel(), (src, index))
@skipIfUnsupportedMinOpsetVersion(9)
def test_bucketize(self):
class BucketModel(torch.nn.Module):
def forward(self, input, boundaries):
return torch.bucketize(input, boundaries), torch.bucketize(
input, boundaries, right=True
)
input = torch.tensor([[2, 5, 10], [6, 8, 3]])
boundaries = torch.tensor([1, 5, 7, 8, 10])
self.run_test(BucketModel(), (input, boundaries))
@skipIfUnsupportedMinOpsetVersion(9)
def test_one_hot(self):
class OneHot(torch.nn.Module):
def __init__(self, num_classes):
super().__init__()
self.num_classes = num_classes
def forward(self, x):
return torch.nn.functional.one_hot(x, self.num_classes)
x = torch.arange(10)
self.run_test(OneHot(15), (x))
class OneHot(torch.nn.Module):
def forward(self, x, num_classes):
num_classes = num_classes.to(torch.int32)
return torch.nn.functional.one_hot(x, num_classes[0])
x = torch.arange(10)
num_classes = 15 * torch.ones(1)
self.run_test(OneHot(), (x, num_classes))
@skipIfUnsupportedMinOpsetVersion(9)
def test_gather(self):
class GatherModel(torch.nn.Module):
def forward(self, input, indices):
return input.gather(1, indices)
input = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(GatherModel(), input_args=(input, indices))
@skipScriptTest() # Scripting error: Cannot instantiate nn module
def test_gather_constant_fold(self):
class GatherModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("weight", torch.ones(5))
# torch.nn.Embedding is converted to ONNX::Gather.
# Constant folding will be triggerred for constant inputs.
# This pattern is common for constant mask inputs in transformer models.
self.embed = torch.nn.Embedding(8, 3)
def forward(self, x):
# shape is of rank 0
shape = self.weight.shape[0]
m = 5 - shape
y = torch.ones(1, 4, dtype=torch.long)
return x.clamp(min=m), self.embed(y)
x = torch.randn(1)
self.run_test(GatherModule(), (x,))
class GatherModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("weight", torch.ones(2))
def forward(self, x):
# shape is of rank 0
shape = self.weight.shape[0]
pad = [1, shape, shape, shape]
zero_pad = torch.nn.ZeroPad2d(pad)
return zero_pad(x)
x = torch.randn(1, 3, 2)
self.run_test(GatherModule(), (x,))
class GatherModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("rb", torch.randn(1, 1, 3, 1, 1))
def forward(self, x):
x += self.rb[0]
return x
x = torch.randn(1, 3, 224, 224)
self.run_test(
GatherModule(),
(x,),
dynamic_axes={
"input": {0: "batch", 2: "height", 3: "width"},
"output": {0: "batch", 1: "class", 2: "height", 3: "width"},
},
input_names=["input"],
output_names=["output"],
)
@skipIfUnsupportedOpsetVersion([13])
@skipIfUnsupportedMinOpsetVersion(9)
def test_expand(self):
class ExpandModel(torch.nn.Module):
def forward(self, input):
return input.expand(2, 3, -1)
input = torch.randn(2, 1, 4)
self.run_test(ExpandModel(), input_args=(input))
class ExpandInferDimModel(torch.nn.Module):
def forward(self, input):
return input.expand(-1, input.size(0))
input = torch.randn(3, 1)
self.run_test(ExpandInferDimModel(), input_args=(input))
class ExpandTensorSizeModel(torch.nn.Module):
def forward(self, input, size):
return input.expand(size)
input = torch.randn(
3,
)
size = torch.tensor(-1)
self.run_test(ExpandTensorSizeModel(), input_args=(input, size))
@skipIfUnsupportedMinOpsetVersion(11) # index_put is supported in opsets >= 11
def test_dynamic_expand_as(self):
class Model(torch.nn.Module):
def forward(self, x):
x[:, x.size(0) :] = 0
return x
x = torch.ones(2, 5)
x2 = torch.randn(3, 4)
self.run_test(
Model(),
(x,),
input_names=["x"],
dynamic_axes={"x": [0, 1]},
additional_test_inputs=[x2],
)
class Model(torch.nn.Module):
def forward(self, x):
x[:, x.size(0) :] = torch.tensor([1, 2, 3])
return x
x = torch.ones(2, 5, 3)
x2 = torch.randn(3, 4, 3)
self.run_test(
Model(),
(x,),
input_names=["x"],
dynamic_axes={"x": [0, 1, 2]},
additional_test_inputs=[x2],
)
def test_multinomial(self):
class Multinomial(torch.nn.Module):
def forward(self, weight):
return torch.multinomial(weight, 3, replacement=True)
class MultinomialNoReplacement(torch.nn.Module):
def forward(self, weight):
return torch.multinomial(weight, 1)
weight = torch.tensor([[0, 10, 0, 0], [0, 0, 100, 0]], dtype=torch.float)
self.run_test(Multinomial(), (weight,))
self.run_test(MultinomialNoReplacement(), (weight,))
def _test_reduced_ops(self, op):
class ReducedOpModule(torch.nn.Module):
def forward(self, input):
return op(input, dim=-1)
if op != torch.mean: # torch.mean only supports float types
x = torch.randint(10, (4, 4), dtype=torch.uint8)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int8)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int16)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int32)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int64)
self.run_test(ReducedOpModule(), x)
# torch.mean only supports float types
# ORT does not support double ReduceProd for double
if op != torch.prod and op != torch.mean:
x = torch.randn(4, 5, dtype=torch.double)
self.run_test(ReducedOpModule(), x)
if op != torch.prod: # torch.prod not implemented for Half
x = torch.randn(4, 4, dtype=torch.half)
self.run_test(ReducedOpModule(), x)
x = torch.randn(4, 5, dtype=torch.float)
self.run_test(ReducedOpModule(), x)
def test_reduced_sum(self):
return self._test_reduced_ops(op=torch.sum)
def test_reduced_mean(self):
return self._test_reduced_ops(op=torch.mean)
def test_reduced_prod(self):
return self._test_reduced_ops(op=torch.prod)
def test_reduced_sum_dtypes(self):
class NoDimModel(torch.nn.Module):
def forward(self, input):
return input.sum(dtype=torch.float)
class DimModel(torch.nn.Module):
def forward(self, input):
return input.sum(dim=-1, dtype=torch.float)
input = torch.randn((4, 4), dtype=torch.half)
self.run_test(NoDimModel(), input)
self.run_test(DimModel(), input)
def test_reduced_min_max(self):
class ReducedMinMaxModule(torch.nn.Module):
def forward(self, input):
return torch.min(input, dim=-1)[0], torch.max(input, dim=0)[0]
x = torch.randint(10, (4, 4), dtype=torch.int32)
self.run_test(ReducedMinMaxModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int64)
self.run_test(ReducedMinMaxModule(), x)
x = torch.randn(4, 5, dtype=torch.float)
self.run_test(ReducedMinMaxModule(), x)
def test_reduce_log_sum_exp(self):
class ReduceLogSumExpModel(torch.nn.Module):
def forward(self, input):
a = torch.logsumexp(input, dim=0)
b = torch.logsumexp(input, dim=(0, 1))
return a + b
x = torch.randn(4, 4, requires_grad=True)
self.run_test(ReduceLogSumExpModel(), x)
def test_softmax(self):
for i in range(-4, 3):
model = torch.nn.Softmax(dim=i)
input = torch.randn(3, 4, 5, 6)
self.run_test(model, input)
class SoftmaxUnknownRank(torch.nn.Module):
def __init__(self, i):
super().__init__()
self.softmax = torch.nn.Softmax(dim=i)
def forward(self, x):
return self.softmax(x.reshape(3, 4, 5, 6))
model = torch.jit.script(SoftmaxUnknownRank(i))
self.run_test(model, input)
def test_softmax_large_values(self):
input = torch.tensor(
[[-1e12, -1e12, -1e12], [1e12, 0.0, -5.0], [3.0, 4.0, 5.0]]
)
for i in range(-2, 1):
model = torch.nn.Softmax(dim=i)
self.run_test(model, input)
class SoftmaxUnknownRank(torch.nn.Module):
def __init__(self, i):
super().__init__()
self.softmax = torch.nn.Softmax(dim=i)
def forward(self, x):
return self.softmax(x.reshape(3, 3))
model = torch.jit.script(SoftmaxUnknownRank(i))
self.run_test(model, input)
def test_logsoftmax(self):
for i in range(7)[2:]:
model = torch.nn.LogSoftmax(dim=i - 1)
dims = [2] * (i - 2) + [3, 4]
input = torch.ones(*dims, requires_grad=True)
self.run_test(model, input)
def test_logsoftmax_dim(self):
for i in range(-4, 3):
model = torch.nn.LogSoftmax(dim=i)
input = torch.randn(3, 4, 5, 6)
self.run_test(model, input)
def test_logsoftmax_dtype(self):
class Model(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.log_softmax(x, dim=1, dtype=torch.float64)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(Model(), x)
def test_softplus(self):
class BetaOneModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.softplus(x)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(BetaOneModel(), x)
class BetaModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.softplus(x, beta=2)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(BetaModel(), x)
class BetaFloatModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.softplus(x, beta=1.7)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(BetaFloatModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_no_hidden(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.rnn = torch.nn.LSTM(input_size=16, hidden_size=16)
def forward(self, x):
return self.rnn(x)
input = torch.randn((10, 16, 16))
self.run_test(LSTMModel(), (input,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_proj_no_hidden(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.rnn = torch.nn.LSTM(input_size=16, hidden_size=16, proj_size=8)
def forward(self, x):
return self.rnn(x)
input = torch.randn((10, 16, 16))
with self.assertRaises(RuntimeError):
self.run_test(LSTMModel(), (input,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.rnn = torch.nn.LSTM(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False
)
def forward(self, x, h0, c0):
return self.rnn(x, (h0, c0))
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
h0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
self.run_test(LSTMModel(), (input, h0, c0))
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_cell(self):
class LSTMCellModel(torch.nn.Module):
def __init__(self, bias):
super().__init__()
self.lstm_cell = torch.nn.LSTMCell(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, bias=bias
)
def forward(self, x, h0, c0):
return self.lstm_cell(x, (h0, c0))
input = torch.randn(BATCH_SIZE, RNN_INPUT_SIZE)
h0 = torch.randn(BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(BATCH_SIZE, RNN_HIDDEN_SIZE)
for bias in [True, False]:
self.run_test(LSTMCellModel(bias), (input, h0, c0))
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_default_init_state(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.rnn = torch.nn.LSTM(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False
)
def forward(self, x):
return self.rnn(x)
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
self.run_test(LSTMModel(), input)
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_fixed_batch_size(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.lstm = torch.nn.LSTM(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False
)
self.RNN_HIDDEN_SIZE = RNN_HIDDEN_SIZE
def forward(self, input):
batch_size = input.size()[1]
h0 = torch.ones([1, batch_size, self.RNN_HIDDEN_SIZE])
c0 = torch.ones([1, batch_size, self.RNN_HIDDEN_SIZE])
return self.lstm(input, (h0, c0))
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
# verify with different input of same batch size
input2 = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
self.run_test(
LSTMModel(), input, fixed_batch_size=True, additional_test_inputs=[input2]
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_post_fix_init_state(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.lstm = torch.nn.LSTM(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False
)
self.RNN_HIDDEN_SIZE = RNN_HIDDEN_SIZE
def forward(self, input):
batch_size = input.size()[1]
h0 = torch.ones([1, batch_size, self.RNN_HIDDEN_SIZE])
c0 = torch.ones([1, batch_size, self.RNN_HIDDEN_SIZE])
return self.lstm(input, (h0, c0))
model = LSTMModel()
input = torch.randn(RNN_SEQUENCE_LENGTH, 1, RNN_INPUT_SIZE)
# verify with different input of different batch size
input2 = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
self.run_test(
model,
input,
input_names=["input.1"],
dynamic_axes={"input.1": {0: "seq", 1: "batch"}},
additional_test_inputs=[input2],
)
def test_lstm_constant_folding(self):
class LstmNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super().__init__()
self.lstm = torch.nn.LSTM(
input_size, hidden_size, num_layers, bidirectional=bidirectional
)
def forward(self, input, initial_state: Tuple[Tensor, Tensor]):
return self.lstm(input, initial_state)
def get_LstmNet_model_and_inputs(
input_size, hidden_size, num_layers, batch_size, seq_len, bidirectional
):
num_directions = 2 if bidirectional else 1
model = LstmNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
c0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, (h0, c0))
batch_size1 = 3
model1, input1 = get_LstmNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)
self.run_test(model1, input1, do_constant_folding=True)
batch_size2 = 4
model2, input2 = get_LstmNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)
self.run_test(model2, input2, do_constant_folding=True)
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_no_bias(self):
class LstmNet(torch.nn.Module):
def __init__(self, num_layers, bidirectional):
super().__init__()
self.lstm = torch.nn.LSTM(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
num_layers,
bias=False,
bidirectional=bidirectional,
)
def forward(self, input, initial_state: Tuple[Tensor, Tensor]):
return self.lstm(input, initial_state)
def get_LstmNet_model_and_inputs(num_layers, bidirectional):
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
num_directions = 2 if bidirectional else 1
model = LstmNet(num_layers, bidirectional)
h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(num_layers * num_directions, BATCH_SIZE, RNN_HIDDEN_SIZE)
return model, (input, (h0, c0))
num_layers = [1, 1, 2, 3]
bidirectional = [True, False, True, False]
models_and_inputs = [
get_LstmNet_model_and_inputs(n, b)
for n, b in zip(num_layers, bidirectional)
]
for model, input in models_and_inputs:
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_sequence(self):
class LstmNet(torch.nn.Module):
def __init__(self):
super().__init__()
self.rnn1 = torch.nn.LSTM(8, 8, bidirectional=True, batch_first=True)
self.linear1 = torch.nn.Linear(8 * 2, 8)
self.rnn2 = torch.nn.LSTM(8, 8, bidirectional=True, batch_first=True)
self.linear2 = torch.nn.Linear(8 * 2, 8)
def forward(self, input):
rnn_output1, _ = self.rnn1(input)
linear_output1 = self.linear1(rnn_output1)
rnn_output2, _ = self.rnn2(linear_output1)
linear_output2 = self.linear2(rnn_output2)
return linear_output2
input = torch.zeros((1, 100, 8), dtype=torch.float32)
self.run_test(
LstmNet(),
input,
input_names=["input"],
output_names=["output"],
dynamic_axes={
"input": {0: "batch_size", 1: "w", 2: "h"},
"output": {0: "batch_size", 1: "w", 2: "h"},
},
)
@skipScriptTest()
def test_rnn_no_bias(self):
def make_model(layers, packed_sequence):
batch_first = True if packed_sequence == 2 else False
model = torch.nn.RNN(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
layers,
bidirectional=False,
batch_first=batch_first,
bias=False,
)
if packed_sequence == 1:
model = rnn_model_with_packed_sequence.RnnModelWithPackedSequence(
model, False
)
if packed_sequence == 2:
model = rnn_model_with_packed_sequence.RnnModelWithPackedSequence(
model, True
)
return model
def make_input(batch_size, layers, packed_sequence):
batch_first = True if packed_sequence == 2 else False
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
h0 = torch.randn(layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
layers = [1, 3, 1, 3, 1, 3]
packed_sequence = [0, 0, 1, 1, 2, 2]
models = [make_model(l, p) for l, p in zip(layers, packed_sequence)]
inputs = [
make_input(RNN_BATCH_SIZE, l, p) for l, p in zip(layers, packed_sequence)
]
for model, input in zip(models, inputs):
self.run_test(model, input)
def test_gru_no_bias(self):
class GruNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super().__init__()
self.mygru = torch.nn.GRU(
input_size,
hidden_size,
num_layers,
bidirectional=bidirectional,
bias=False,
)
def forward(self, input, initial_state):
out = self.mygru(input, initial_state)
return out
def get_GruNet_model_and_inputs(
input_size, hidden_size, num_layers, batch_size, seq_len, bidirectional
):
num_directions = 2 if bidirectional else 1
model = GruNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, h0)
input_size = [7, 5]
hidden_size = [3, 4]
num_layers = [2, 3]
batch_size = [3, 4]
seq_len = [5, 7]
bidirectional = [True, False]
models_and_inputs = [
get_GruNet_model_and_inputs(i, h, n, b, s, bi)
for i, h, n, b, s, bi in zip(
input_size, hidden_size, num_layers, batch_size, seq_len, bidirectional
)
]
for model, input in models_and_inputs:
self.run_test(model, input, do_constant_folding=True)
def test_gru_constant_folding(self):
class GruNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super().__init__()
self.mygru = torch.nn.GRU(
input_size, hidden_size, num_layers, bidirectional=bidirectional
)
def forward(self, input, initial_state):
out = self.mygru(input, initial_state)
return out
def get_GruNet_model_and_inputs(
input_size, hidden_size, num_layers, batch_size, seq_len, bidirectional
):
num_directions = 2 if bidirectional else 1
model = GruNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, h0)
batch_size1 = 3
model1, input1 = get_GruNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)
self.run_test(model1, input1, do_constant_folding=True)
batch_size2 = 4
model2, input2 = get_GruNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)
self.run_test(model2, input2, do_constant_folding=True)
@skipIfUnsupportedMinOpsetVersion(8)
def test_max_tensors(self):
class MaxModel(torch.nn.Module):
def forward(self, input, other):
return torch.max(input, other)
model = MaxModel()
x = torch.randn(4, 4, requires_grad=True)
y = torch.randn(4, 1, requires_grad=True)
self.run_test(model, (x, y))
def test_amax_amin(self):
class Model(torch.nn.Module):
def forward(self, x):
return torch.amax(x, dim=0, keepdim=True), torch.amin(
x, dim=[0, 1], keepdim=False
)
model = Model()
x = torch.randn(4, 4)
self.run_test(model, x)
def test_aminmax(self):
class Model(torch.nn.Module):
def forward(self, x):
return torch.aminmax(x, dim=1, keepdim=True), torch.aminmax(
x, keepdim=False
)
model = Model()
x = torch.randn(3, 4)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_end(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
outputs = ArangeScript()(x)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_end_notype(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(a.size(0))
x = torch.randn(3, 4, requires_grad=True)
outputs = ArangeScript()(x)
self.run_test(ArangeScript(), x, input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(ArangeScript(), x, remained_onnx_input_idx=[])
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(a.size(0))
self.run_test(ArangeModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(ArangeModel(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_start_end(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_start_end_notype(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2.7, a.size(0) + 2).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2.7, a.size(0) + 2).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_start_end_step(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return (
torch.arange(
2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float
).view(-1, 1)
+ a
)
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return (
torch.arange(
2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float
).view(-1, 1)
+ a
)
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_start_end_step_notype(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return (
torch.arange(2.7, a.size(0) * a.size(1) + 2, a.size(1)).view(-1, 1)
+ a
)
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return (
torch.arange(2.7, a.size(0) * a.size(1) + 2, a.size(1)).view(-1, 1)
+ a
)
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test__dim_arange(self):
class DimArange(torch.nn.Module):
def forward(self, input):
return torch._dim_arange(input, 1)
x = torch.ones(5, 6)
self.run_test(DimArange(), x, input_names=["x"], dynamic_axes={"x": [0, 1]})
remained_onnx_input_idx = None if self.opset_version < 11 else []
self.run_test(DimArange(), x, remained_onnx_input_idx=remained_onnx_input_idx)
def _test_compare_ops(self, model, num_inputs):
x_float = torch.randn(1, 2, 3, 4, requires_grad=True)
x_int = torch.randint(10, (3, 4), dtype=torch.int32)
if num_inputs > 1:
y_float = torch.randn(1, 2, 3, 4, requires_grad=True)
y_int = torch.randint(10, (3, 4), dtype=torch.int32)
self.run_test(model, (x_float, y_float))
self.run_test(model, (x_float, y_int))
self.run_test(model, (x_int, y_float))
self.run_test(model, (x_int, y_int))
else:
self.run_test(model, x_float)
self.run_test(model, x_int)
@skipIfUnsupportedMinOpsetVersion(9)
def test_and_or_xor(self):
class MyModel(torch.nn.Module):
def forward(self, x, y):
return x ^ y, x | y, x & y, ~x
x = torch.randint(0, 2, (5, 5), dtype=torch.bool)
y = torch.randint(0, 2, (5, 5), dtype=torch.bool)
self.run_test(MyModel(), input_args=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_logical_and(self):
class AndModel(torch.nn.Module):
def forward(self, x, y):
return torch.logical_and(x, y)
x = torch.randint(0, 2, (5, 5), dtype=torch.bool)
y = torch.randint(0, 2, (5, 5), dtype=torch.bool)
self.run_test(AndModel(), input_args=(x, y))
x = torch.randint(10, (5, 5), dtype=torch.int32)
y = torch.randint(10, (5, 5), dtype=torch.int32)
self.run_test(AndModel(), input_args=(x, y))
x = torch.randint(10, (5, 5), dtype=torch.double)
y = torch.randint(10, (5, 5), dtype=torch.double)
self.run_test(AndModel(), input_args=(x, y))
x = torch.randint(10, (2, 3, 5), dtype=torch.float32)
y = torch.randint(10, (2, 3, 5), dtype=torch.long)
self.run_test(AndModel(), input_args=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_logical_or(self):
class OrModel(torch.nn.Module):
def forward(self, x, y):
return torch.logical_or(x, y)
x = torch.randint(0, 2, (5, 5), dtype=torch.bool)
y = torch.randint(0, 2, (5, 5), dtype=torch.bool)
self.run_test(OrModel(), input_args=(x, y))
x = torch.randint(10, (5, 5), dtype=torch.int32)
y = torch.randint(10, (5, 5), dtype=torch.int32)
self.run_test(OrModel(), input_args=(x, y))
x = torch.randint(10, (5, 5), dtype=torch.double)
y = torch.randint(10, (5, 5), dtype=torch.double)
self.run_test(OrModel(), input_args=(x, y))
x = torch.randint(10, (2, 3, 5), dtype=torch.float32)
y = torch.randint(10, (2, 3, 5), dtype=torch.long)
self.run_test(OrModel(), input_args=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_logical_xor(self):
class XorModel(torch.nn.Module):
def forward(self, x, y):
return torch.logical_xor(x, y)
x = torch.randint(0, 2, (5, 5), dtype=torch.bool)
y = torch.randint(0, 2, (5, 5), dtype=torch.bool)
self.run_test(XorModel(), input_args=(x, y))
x = torch.randint(10, (5, 5), dtype=torch.int32)
y = torch.randint(10, (5, 5), dtype=torch.int32)
self.run_test(XorModel(), input_args=(x, y))
x = torch.randint(10, (5, 5), dtype=torch.double)
y = torch.randint(10, (5, 5), dtype=torch.double)
self.run_test(XorModel(), input_args=(x, y))
x = torch.randint(10, (2, 3, 5), dtype=torch.float32)
y = torch.randint(10, (2, 3, 5), dtype=torch.long)
self.run_test(XorModel(), input_args=(x, y))
@skipIfUnsupportedMinOpsetVersion(11) # float equal added after opset 11
def test_eq(self):
class EqualModel(torch.nn.Module):
def forward(self, input, other):
return input == other
self._test_compare_ops(EqualModel(), 2)
def test_gt(self):
class GreaterModel(torch.nn.Module):
def forward(self, input, other):
return input > other
self._test_compare_ops(GreaterModel(), 2)
@skipIfUnsupportedMinOpsetVersion(9)
def test_ge(self):
class GreaterOrEqualModel(torch.nn.Module):
def forward(self, input, other):
return input >= other
self._test_compare_ops(GreaterOrEqualModel(), 2)
def test_gt_scalar(self):
class GreaterModel(torch.nn.Module):
def forward(self, input):
return input > 1
self._test_compare_ops(GreaterModel(), 1)
def test_gt_primitive(self):
class GreaterModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.y: int = 2
def forward(self, x: int):
return self.y > x
x = 3
self.run_test(GreaterModel(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_ge_scalar(self):
class GreaterOrEqualModel(torch.nn.Module):
def forward(self, input):
return input >= 1
self._test_compare_ops(GreaterOrEqualModel(), 1)
def test_lt(self):
class LessModel(torch.nn.Module):
def forward(self, input, other):
return input > other
self._test_compare_ops(LessModel(), 2)
@skipIfUnsupportedMinOpsetVersion(9)
def test_le(self):
class LessOrEqualModel(torch.nn.Module):
def forward(self, input, other):
return input <= other
self._test_compare_ops(LessOrEqualModel(), 2)
def test_lt_scalar(self):
class LessModel(torch.nn.Module):
def forward(self, input):
return input < 1
self._test_compare_ops(LessModel(), 1)
@skipIfUnsupportedMinOpsetVersion(9)
def test_le_scalar(self):
class LessOrEqualModel(torch.nn.Module):
def forward(self, input):
return input <= 1
self._test_compare_ops(LessOrEqualModel(), 1)
def test_matmul(self):
class MatmulModel(torch.nn.Module):
def forward(self, input, other):
return torch.matmul(input, other)
x = torch.randn(3, 4, requires_grad=True)
y = torch.randn(4, 5, requires_grad=True)
self.run_test(MatmulModel(), (x, y))
x = torch.randint(10, (3, 4))
y = torch.randint(10, (4, 5))
self.run_test(MatmulModel(), (x, y))
def test_matmul_batch(self):
class MatmulModel(torch.nn.Module):
def forward(self, input, other):
return torch.matmul(input, other)
x = torch.randn(2, 3, 4, requires_grad=True)
y = torch.randn(2, 4, 5, requires_grad=True)
self.run_test(MatmulModel(), (x, y))
x = torch.randint(10, (2, 3, 4))
y = torch.randint(10, (2, 4, 5))
self.run_test(MatmulModel(), (x, y))
def _argmin_argmax_model(self, input):
class ArgminArgmaxModel(torch.nn.Module):
def forward(self, input):
return (
torch.argmin(input),
torch.argmax(input),
torch.argmin(input, keepdim=True),
torch.argmax(input, keepdim=True),
torch.argmin(input, dim=0, keepdim=True),
torch.argmax(input, dim=1, keepdim=True),
)
self.run_test(ArgminArgmaxModel(), input)
@skipIfUnsupportedMinOpsetVersion(9)
def test_argmin_argmax(self):
input = torch.randn(7, 3, 5)
self._argmin_argmax_model(input)
# Argmin and Argmax with "select_last_index" is not supprted before opset 12
# "select_last_index" was added in opset 12 to deal with corner case where the
# same value appears multiple times in the tensor
@skipIfUnsupportedMinOpsetVersion(12)
def test_argmin_argmax_select_last_index(self):
input = torch.tensor([[1.0, 2.0, 3.0], [1.0, 1.0, 2.0]])
self._argmin_argmax_model(input)
input = torch.ones(7, 3, 5)
self._argmin_argmax_model(input)
def test_repeat(self):
class RepeatModel(torch.nn.Module):
def forward(self, x, y):
x2 = x.repeat(y.shape[0], 1)
y1 = y.view(-1, 1)
return x2 + y1
x = torch.tensor([1, 2, 3])
y = torch.tensor([4, 5, 8, 9])
self.run_test(RepeatModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_repeat_interleave(self):
class FlattenModel(torch.nn.Module):
def forward(self, x):
return x.repeat_interleave(2)
x = torch.tensor([1, 2, 3])
self.run_test(FlattenModel(), (x,))
class DimsModel(torch.nn.Module):
def forward(self, x):
return x.repeat_interleave(4, dim=1)
x = torch.tensor([[1, 2], [3, 4]])
self.run_test(DimsModel(), (x,))
class DimsModel2(torch.nn.Module):
def forward(self, x):
repeats = torch.tensor([4])
return torch.repeat_interleave(x, repeats, dim=1)
x = torch.tensor([[1, 2], [3, 4]])
self.run_test(DimsModel2(), (x,))
class RepeatsDimsModel(torch.nn.Module):
def forward(self, x):
repeats = torch.tensor([1, 2])
return torch.repeat_interleave(x, repeats, dim=0)
x = torch.tensor([[1, 2], [3, 4]])
self.run_test(RepeatsDimsModel(), (x,))
class RepeatsDimsModel2(torch.nn.Module):
def forward(self, x):
repeats = torch.tensor([1, 2])
return torch.repeat_interleave(x, repeats, dim=1)
x = torch.tensor([[1, 2], [3, 4]])
self.run_test(RepeatsDimsModel2(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_repeat_interleave_noop(self):
class Model(torch.nn.Module):
def forward(self, x):
return x.repeat_interleave(1, dim=1)
x = torch.randn(4, 1, 8)
self.run_test(Model(), (x,))
@skipIfUnsupportedMinOpsetVersion(13)
def test_dynamic_repeat_interleave(self):
class SingleDynamicModel(torch.nn.Module):
def forward(self, x):
repeats = torch.tensor(4)
return torch.repeat_interleave(x, repeats, dim=1)
x = torch.tensor([[1, 2, 4], [3, 4, 7]])
another_x = torch.tensor([[7, 8], [5, 6]])
self.run_test(
SingleDynamicModel(),
x,
additional_test_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": {1: "w"}},
)
class NegDynamicModel(torch.nn.Module):
def forward(self, x):
repeats = torch.tensor(4)
return torch.repeat_interleave(x, repeats, dim=-1)
x = torch.tensor([[1, 2, 4], [3, 4, 7]])
another_x = torch.tensor([[7, 8], [5, 6]])
self.run_test(
NegDynamicModel(),
x,
additional_test_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": {1: "w"}},
)
class SingleDynamicModelFloat(torch.nn.Module):
def forward(self, x):
repeats = torch.tensor([4])
return torch.repeat_interleave(x, repeats, dim=0)
x = torch.tensor([[1.1, 2.1], [3.1, 4.1]])
another_x = torch.tensor([[7.1, 8.1], [5.1, 6.1]])
self.run_test(
SingleDynamicModelFloat(),
x,
additional_test_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": {0: "h"}},
)
class DynamicRepeatsModel(torch.nn.Module):
def forward(self, x, repeats):
return torch.repeat_interleave(x, repeats, dim=1)
x = torch.tensor([[1, 2, 4], [3, 4, 7]])
another_x = torch.tensor([[7, 8], [5, 6]])
repeats = torch.tensor([2])
another_repeats = torch.tensor([4])
self.run_test(
DynamicRepeatsModel(),
(x, repeats),
additional_test_inputs=[(another_x, another_repeats)],
input_names=["input_1", "repeats_1"],
dynamic_axes={"input_1": {1: "w"}, "repeats_1": {0: "r"}},
)
class DynamicRepeatsModel2(torch.nn.Module):
def forward(self, x, repeats):
return torch.repeat_interleave(x, repeats, dim=1)
x = torch.tensor([[1, 2, 4], [3, 4, 7]])
repeats = torch.tensor([2])
another_repeats = torch.tensor([4])
self.run_test(
DynamicRepeatsModel2(),
(x, repeats),
additional_test_inputs=[(x, another_repeats)],
input_names=["input_1", "repeats_1"],
dynamic_axes={"repeats_1": {0: "r"}},
)
@skipIfUnsupportedMinOpsetVersion(13)
def test_multiple_dynamic_repeat_interleave(self):
class DynamicRepeatsModel(torch.nn.Module):
def forward(self, x, repeats):
return torch.repeat_interleave(x, repeats, dim=1)
x = torch.tensor([[1, 2, 4], [3, 4, 7]])
repeats = torch.tensor([2, 3, 4])
another_repeats = torch.tensor([4, 3, 2])
self.run_test(
DynamicRepeatsModel(),
(x, repeats),
additional_test_inputs=[(x, another_repeats)],
input_names=["input_1", "repeats_1"],
dynamic_axes={"repeats_1": {0: "r"}},
)
class DynamicRepeatsModel2(torch.nn.Module):
def forward(self, x, repeats):
return torch.repeat_interleave(x, repeats, dim=0)
x = torch.tensor([[1, 2, 4], [3, 4, 7]])
repeats = torch.tensor([2, 3])
another_repeats = torch.tensor([4, 3])
self.run_test(
DynamicRepeatsModel2(),
(x, repeats),
additional_test_inputs=[(x, another_repeats)],
input_names=["input_1", "repeats_1"],
dynamic_axes={"repeats_1": {0: "r"}},
)
def test_view(self):
class ViewModel(torch.nn.Module):
def forward(self, input):
return input.view(4, 24)
x = torch.randint(10, (4, 2, 3, 4), dtype=torch.int32)
self.run_test(ViewModel(), x)
def test_view_dynamic(self):
class ViewModel(torch.nn.Module):
def forward(self, input, other):
return input.view(other.shape)
x = torch.randn(2, 3, 4)
shape = torch.randn(6, 4)
self.run_test(
ViewModel(),
(x, shape),
input_names=["x", "shape"],
dynamic_axes={"x": [0, 1, 2], "shape": [0, 1]},
)
self.run_test(ViewModel(), (x, shape), remained_onnx_input_idx=[0])
def test_view_dynamic_zero_dim(self):
class ViewModel(torch.nn.Module):
def forward(self, input):
input = input.view(-1, 2)
return input.view(1, -1)
x = torch.ones(2)
another_x = torch.empty((0,))
self.run_test(
ViewModel(),
x,
additional_test_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={
"input_1": [
0,
]
},
)
def test_view_as(self):
class ViewModel(torch.nn.Module):
def forward(self, input, other):
return input.view_as(other)
x = torch.randn(2, 3, 4)
y = torch.randn(6, 4)
self.run_test(ViewModel(), (x, y))
def test_linear(self):
class LinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc = torch.nn.Linear(16, 16)
def forward(self, x):
out = self.fc(x)
out = self.fc(out)
return out
x = torch.randn(3, 16)
self.run_test(LinearModel(), (x,))
class LinearModel(torch.nn.Module):
def forward(self, input, weight, bias):
return torch.nn.functional.linear(input, weight, bias)
# input of rank 2
x = torch.randn(2, 2)
y = torch.randn(2, 2)
z = torch.randn(1)
self.run_test(LinearModel(), (x, y, z))
# input of rank 3
x = torch.randn(3, 3, 3)
y = torch.randn(3, 3)
z = torch.randn(1)
self.run_test(LinearModel(), (x, y, z))
@skipScriptTest()
def test_weight_norm(self):
# addmm for 3-d inputs converts to onnx::MatMul
model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=1)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(model, x)
# addmm for 2-d inputs converts to onnx::Gemm
model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=1)
x = torch.randn(4, 5, requires_grad=True)
self.run_test(model, x)
model = torch.nn.utils.weight_norm(torch.nn.Conv1d(1, 1, 3))
x = torch.randn(1, 1, 5, requires_grad=True)
self.run_test(model, x)
model = torch.nn.utils.weight_norm(torch.nn.Conv1d(1, 1, 3), dim=-2)
x = torch.randn(1, 1, 5, requires_grad=True)
self.run_test(model, x)
model = torch.nn.utils.weight_norm(torch.nn.Conv1d(3, 6, 3), name="weight")
x = torch.randn(3, 3, 5, requires_grad=True)
self.run_test(model, x)
@skipScriptTest()
def test_weight_norm_nodim(self):
# addmm for 3-d inputs converts to onnx::MatMul
model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=None)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(model, x)
# addmm for 2-d inputs converts to onnx::Gemm
model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=None)
x = torch.randn(4, 5, requires_grad=True)
self.run_test(model, x)
def test_flatten(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input)
x = torch.randint(10, (1, 2, 3, 4))
self.run_test(FlattenModel(), x)
x = torch.randn(4)
self.run_test(FlattenModel(), x)
def test_flatten2d(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input, 1)
x = torch.randint(10, (1, 2, 3, 4))
self.run_test(FlattenModel(), x)
def test_flatten2d_neg(self):
class FlattenModel(torch.nn.Module):
def forward(self, x):
return (
torch.flatten(x, 1, -1),
torch.flatten(x, 0, -2),
torch.flatten(x, 1, -2),
)
x = torch.randint(10, (1, 2, 3, 4))
self.run_test(FlattenModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_flatten_dynamic_axes(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.flatten(x, start_dim=2, end_dim=3)
batch_size = 3
x = torch.randn(batch_size, 5, 4, 5)
y = torch.randn(5, 5, 4, 5)
model = MyModule()
self.run_test(
model,
x,
additional_test_inputs=[y],
input_names=["input"],
output_names=["output"],
dynamic_axes={"input": {0: "batch_size"}, "output": {0: "batch_size"}},
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_getitem(self):
class GetItemModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y, z, ind):
# this will create prim::ListConstruct(x, y, z) + aten::__getitem__
arr = [x, y, z]
return arr[ind]
x = torch.randn(3, 4, 5)
y = torch.randn(1, 4, 5)
z = torch.randn(2, 4, 5)
ind = torch.tensor(1, dtype=torch.long)
self.run_test(GetItemModel(), (x, y, z, ind))
ind = torch.tensor(-2, dtype=torch.long)
self.run_test(GetItemModel(), (x, y, z, ind))
@skipDtypeChecking
def test_item(self):
class M(torch.nn.Module):
def forward(self, x, y, i: int):
return int(x[y[i]].item())
x = torch.arange(6, dtype=torch.float)
y = torch.tensor([0, 1, 2, 3, 4], dtype=torch.long)
i = 3
self.run_test(torch.jit.script(M()), (x, y, i))
@skipScriptTest() # torch.nonzero(x, as_tuple=True) is not scriptable.
@skipIfUnsupportedMinOpsetVersion(9)
def test_nonzero(self):
class NonzeroModel(torch.nn.Module):
def forward(self, x):
return x.nonzero(), x.nonzero(as_tuple=True)
x = torch.randn(60).index_fill_(0, torch.randint(0, 60, (20,)), 0).view(3, 4, 5)
self.run_test(NonzeroModel(), (x,))
def test_unbind(self):
class UnbindModel(torch.nn.Module):
def forward(self, input):
_, out, _ = input.unbind()
return out
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel(), x)
class UnbindModel2(torch.nn.Module):
def forward(self, input):
_, out, _, _ = input.unbind(1)
return out
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel2(), x)
class UnbindModel3(torch.nn.Module):
def forward(self, input):
_, out, _, _ = input.unbind(-2)
return out
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel3(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_len(self):
class LenModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return len(input.unbind()) + input
x = torch.randn(4, 5)
self.run_test(
LenModel(),
x,
input_names=["input"],
dynamic_axes={"input": {0: "seq"}},
additional_test_inputs=(torch.randn(5, 5),),
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_len_list(self):
class LenListModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.ones(len(input.shape))
x = torch.randn(4, 5)
self.run_test(LenListModel(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_unbind_dynamic(self):
class UnbindModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.unbind()[1]
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel(), x)
class UnbindModel2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.unbind(-1)[1]
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel2(), x)
@skipScriptTest() # scripting tests run for opsets > 11. See: test_split_script
def test_split(self):
class SplitModel(torch.nn.Module):
def forward(self, input):
return input.split([2, 1, 2]), input.split([3, 2])[0]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel(), x)
class SplitModel2(torch.nn.Module):
def forward(self, input):
return input.split([2, 1, 1], -2), input.split([2, 2], -2)[-1]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel2(), x)
class SplitModel3(torch.nn.Module):
def forward(self, input):
return input.split([2, 1, 2])
x = torch.randn(5, 4, 3)
self.run_test(SplitModel3(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_script(self):
class SplitModel(torch.nn.Module):
def forward(self, input):
return input.split([2, 1, 2]), input.split([3, 2])[0]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel(), x)
class SplitModel2(torch.nn.Module):
def forward(self, input):
return input.split([2, 1, 1], -2), input.split([2, 2], -2)[-1]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel2(), x)
class SplitModel3(torch.nn.Module):
def forward(self, input):
return input.split([2, 1, 2])
x = torch.randn(5, 4, 3)
self.run_test(SplitModel3(), x)
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_split_size_as_list(self):
class SplitModel(torch.nn.Module):
def forward(self, input, split_sizes: List[int]):
out = []
split_list: List[Tensor] = input.split(split_sizes)
for ob in split_list:
out.append(ob)
return torch.cat(out, dim=0)
x = torch.randn(6, 4, 3)
split_sizes = [torch.tensor(2), torch.tensor(4)]
self.run_test(SplitModel(), (x, split_sizes))
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_size_with_slice(self):
class SplitModule(torch.nn.Module):
def forward(self, x, y, t):
splits = (x.size(1), y.size(1))
out, out2 = torch.split(t, splits, dim=1)
return out, out2
x = torch.randn(2, 3)
y = torch.randn(2, 4)
t = torch.randn(2, 7)
self.run_test(
SplitModule(),
(x, y, t),
input_names=["x", "y", "t"],
dynamic_axes={"x": [0, 1], "y": [0, 1], "t": [0, 1]},
)
self.run_test(SplitModule(), (x, y, t), remained_onnx_input_idx=[2])
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_dynamic(self):
class SplitModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.split(2)[1]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel(), x)
class SplitModel2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.split(2, -3)[1]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel2(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_dynamic_axes(self):
class Split(torch.nn.Module):
def forward(self, x):
return x.split(1, dim=-1)
x = torch.randn(4, 384, 2)
input_names = ["logits"]
self.run_test(
Split(),
x,
input_names=input_names,
dynamic_axes={input_names[0]: {0: "batch"}},
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_chunk(self):
class ChunkModel(torch.nn.Module):
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, x):
return torch.chunk(x, 3, dim=self.dim)
model = ChunkModel()
model.eval()
model_neg_dim = ChunkModel(-1)
model_neg_dim.eval()
x = torch.randn(1, 18)
for dim_size_ in range(13, 16):
y = torch.randn(1, dim_size_)
self.run_test(
model,
x,
additional_test_inputs=[y],
input_names=["x"],
dynamic_axes={"x": {0: "batch_size", 1: "dims"}},
)
self.run_test(
model_neg_dim,
x,
additional_test_inputs=[y],
input_names=["x"],
dynamic_axes={"x": {0: "batch_size", 1: "dims"}},
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_dynamic_chunk(self):
class ChunkModel(torch.nn.Module):
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, x):
return torch.chunk(x, x.size(0), dim=self.dim)
model = ChunkModel()
model.eval()
model_neg_dim = ChunkModel(-1)
model_neg_dim.eval()
x = torch.randn(3, 18)
for dim_size_ in range(13, 16):
y = torch.randn(3, dim_size_)
self.run_test(
model,
x,
additional_test_inputs=[y],
input_names=["x"],
dynamic_axes={"x": {0: "batch_size", 1: "dims"}},
)
self.run_test(
model_neg_dim,
x,
additional_test_inputs=[y],
input_names=["x"],
dynamic_axes={"x": {0: "batch_size", 1: "dims"}},
)
def test_concat(self):
class ConcatModel(torch.nn.Module):
def forward(self, x, y, z):
return torch.cat((x, y, z))
x = torch.randn(3, 4, 5)
y = torch.randn(1, 4, 5)
z = torch.randn(2, 4, 5)
self.run_test(ConcatModel(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(11)
def test_concat_dynamic(self):
class ConcatDynamicModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.cat(x.unbind())
x = torch.randn(4, 5, 6)
self.run_test(ConcatDynamicModel(), x)
def test_stack(self):
class StackModel(torch.nn.Module):
def forward(self, x, y, z):
return torch.stack((x, y, z), 1)
x = torch.randn(3, 4, 5)
y = torch.randn(3, 4, 5)
z = torch.randn(3, 4, 5)
self.run_test(StackModel(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(11)
def test_stack_dynamic(self):
class StackDynamicModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.stack(x.unbind(), 1)
x = torch.randn(4, 5, 6)
self.run_test(StackDynamicModel(), x)
def test_loop_dynamic(self):
class LoopModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(x.size(2)):
x = x + i
return x
model = LoopModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(9)
def test_loop_nested(self):
class NestedLoopsModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(5):
a = 0
while a < 4:
a += 1
x = x + a
return x
model = NestedLoopsModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(11)
def test_loop_with_list(self):
class ListLoopModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
res = []
res1 = []
arr = x.split([3, 4, 1, 1, 2, 3, 2], 0)
res2 = torch.zeros(3, 4, dtype=torch.long)
res3 = []
res4 = []
for i in range(len(arr)):
res.append(arr[i].sum(0, False))
res1.append(arr[-1 - i].sum(0, False))
res2 += 1
res3 = res3 + [arr[i].sum(0, False)]
res4 += [arr[-1 - i].sum(0, False)]
return res, res1, res2, torch.stack(res3), torch.stack(res4)
model = ListLoopModel()
inputs = torch.randn(16)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(11)
def test_loop_transpose(self):
class LoopModel(torch.nn.Module):
def forward(self, x):
res = torch.zeros_like(x[0])
for i in range(x.size(0)):
res += x[0].transpose(0, 1)
return res
model = torch.jit.script(LoopModel())
x = torch.randn(5, 3, 3)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_loop_multi_dim(self):
class LoopMultiDimModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
for x_ in torch.flip(x.narrow(0, 0, 7), [0]):
y = x_[0][y]
return y
model = LoopMultiDimModel()
x = torch.randint(0, 5, (8, 1, 17), dtype=torch.long)
y = torch.ones(1, dtype=torch.long)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_list(self):
class ListModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
tensors = x.unbind()
res = []
res.append(tensors[0])
res.append(tensors[1])
res.pop(1)
res.insert(0, tensors[1])
res.append(tensors[2])
res += [tensors[3], tensors[4]]
res = res + [tensors[5]]
return torch.ones(len(res))
model = ListModel()
inputs = torch.randn(16, 1)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(11)
def test_list_append(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
res += [torch.matmul(x[i], y)]
return res
model = torch.jit.script(ListModel())
x = torch.randn(16, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_append_nested(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
for j in range(x.size(1)):
res += [torch.matmul(x[i][j], y)]
return res
model = torch.jit.script(ListModel())
x = torch.randn(4, 4, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(14) # Need onnx::Identity of sequence in opset 14
def test_list_append_nested_2(self):
class ListModel(torch.nn.Module):
def forward(self, x):
res = []
res_replicate = []
for i in range(x.size(0)):
if len(res) > 2:
for j in range(x.size(1)):
res.append(x[i][j])
res_replicate.append(res[-1])
res.append(res_replicate[-1])
return res, res_replicate
model = torch.jit.script(ListModel())
x = torch.randn(4, 4, 3, 4)
self.run_test(model, (x,))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_append_nested_mixed_dtype(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
for j in range(x.size(1)):
if i == j:
res.append(x == y)
else:
res.append(x != y)
return res
model = torch.jit.script(ListModel())
x = torch.randn(4, 4, 3, 4)
y = torch.randn(3, 4)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_list_pop(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
res += [torch.matmul(x[i], y)]
res.pop()
return res
model = torch.jit.script(ListModel())
x = torch.randn(16, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_pop_nested(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
for j in range(x.size(1)):
res += [torch.matmul(x[i][j], y)]
res.pop()
res += [torch.matmul(x[i][0], y)]
return res
model = torch.jit.script(ListModel())
x = torch.randn(4, 4, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_list_del(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
res += [torch.matmul(x[i], y)]
del res[2]
return res
model = torch.jit.script(ListModel())
x = torch.randn(16, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_del_nested(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
for j in range(x.size(1)):
res += [torch.matmul(x[i][j], y)]
del res[i]
res += [torch.matmul(x[i][0], y)]
return res
model = torch.jit.script(ListModel())
x = torch.randn(4, 4, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_list_set(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
res.append(x[i])
res[y] = x[y]
return res
model = torch.jit.script(ListModel())
x = torch.randn(12, 4)
y = torch.tensor(2, dtype=torch.long)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_idx_sum(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
indices = torch.arange(x.size(0))
res = []
for i in range(x.size(0)):
res.append(x[i])
return res[torch.sum(indices[:y])]
model = torch.jit.script(ListModel())
x = torch.randn(12, 4)
y = torch.tensor(2, dtype=torch.long)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_factories(self):
class TensorFactory(torch.nn.Module):
def forward(self, x):
return torch.zeros(x.size()) + torch.ones(x.size())
x = torch.randn(2, 3, 4)
self.run_test(
TensorFactory(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(TensorFactory(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_factories_script(self):
class TensorFactory(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.zeros(x.shape, dtype=torch.float) + torch.ones(
x.shape, dtype=torch.float
)
x = torch.randn(2, 3, 4)
self.run_test(
TensorFactory(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(TensorFactory(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_like_factories_script(self):
class TensorFactory(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
zeros = torch.zeros_like(
x,
dtype=torch.float,
layout=torch.strided,
device=torch.device("cpu"),
)
ones = torch.ones_like(
x,
dtype=torch.float,
layout=torch.strided,
device=torch.device("cpu"),
)
return zeros + ones
x = torch.randn(2, 3, 4)
self.run_test(
TensorFactory(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(TensorFactory(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(13)
def test_tensor_split(self):
class TensorSplitModel(torch.nn.Module):
def forward(self, input):
return (
input.tensor_split([1, 3]),
# test with output indexing.
input.tensor_split([2, 4])[0],
# test split on specific dim.
input.tensor_split([1, 3, 4], dim=-2),
# test split on specific dim and output indexing.
input.tensor_split([0, 2], dim=-2)[-1],
# test with out of bound end index (5).
input.tensor_split([2, 3, 5]),
)
self.run_test(TensorSplitModel(), torch.randn(5, 4, 3))
@skipIfUnsupportedMinOpsetVersion(13)
def test_tensor_split_scalar(self):
class TensorSplitModel(torch.nn.Module):
def forward(self, x):
return torch.tensor_split(x, x.size(1))
self.run_test(TensorSplitModel(), torch.randn(1, 2, 3))
@skipIfUnsupportedMinOpsetVersion(13)
def test_tensor_split_dynamic_axes(self):
class TensorSplitModel(torch.nn.Module):
def forward(self, x):
return x.tensor_split(1, dim=-1)
x = torch.randn(4, 384, 2)
input_names = ["logits"]
self.run_test(
TensorSplitModel(),
x,
input_names=input_names,
dynamic_axes={input_names[0]: {0: "batch"}},
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_eye(self):
class TensorFactory(torch.nn.Module):
def forward(self, x):
return (
torch.eye(x.size()[1], 3),
torch.eye(4, 4, dtype=torch.long),
torch.eye(x.size()[1], 2, dtype=torch.long),
torch.eye(x.shape[0]),
torch.eye(x.shape[0], dtype=torch.float64),
)
x = torch.randn(2, 3, 4)
another_x = torch.randn(5, 6, 7)
self.run_test(
TensorFactory(),
x,
additional_test_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2]},
)
@skipIfUnsupportedMinOpsetVersion(13)
def test_diagonal(self):
class DiagonalModel(torch.nn.Module):
def forward(self, x):
return torch.diagonal(x)
x = torch.randn(2, 4, 5, 2)
# Other test inputs to test dynamic behavior
another_x = torch.randn(5, 6, 7, 8)
self.run_test(
DiagonalModel(),
x,
additional_test_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2, 3]},
)
class DiagonalModelNegOffset(torch.nn.Module):
def forward(self, x):
return torch.diagonal(x, offset=-1)
x = torch.randn(2, 4, 5, 2)
# Other test inputs to test dynamic behavior
another_x = torch.randn(5, 6, 7, 8)
self.run_test(
DiagonalModelNegOffset(),
x,
additional_test_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2, 3]},
)
class DiagonalModelPosOffset(torch.nn.Module):
def forward(self, x):
return torch.diagonal(x, offset=1)
x = torch.randn(2, 4, 5, 2)
# Other test inputs to test dynamic behavior
another_x = torch.randn(5, 6, 7, 8)
self.run_test(
DiagonalModelPosOffset(),
x,
additional_test_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2, 3]},
)
class DiagonalModelWithDims(torch.nn.Module):
def forward(self, x):
return torch.diagonal(x, offset=-1, dim1=1, dim2=2)
x = torch.randn(2, 4, 5, 2)
# Other test inputs to test dynamic behavior
another_x = torch.randn(5, 6, 7, 8)
self.run_test(
DiagonalModelWithDims(),
x,
additional_test_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2, 3]},
)
class DiagonalModelOffsetOverrun(torch.nn.Module):
def forward(self, x):
return torch.diagonal(x, offset=-2), torch.diagonal(x, offset=5)
x = torch.randn(2, 4, 5, 2)
# Other test inputs to test dynamic behavior
another_x = torch.randn(5, 6, 7, 8)
self.run_test(
DiagonalModelOffsetOverrun(),
x,
additional_test_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2, 3]},
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_zero(self):
class Zero_(torch.nn.Module):
def forward(self, x):
return x.zero_(), x
x = torch.randn(2, 3, 4)
self.run_test(Zero_(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(Zero_(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_zeros(self):
class Zero_(torch.nn.Module):
def forward(self, x):
return x.new_zeros(x.shape[1:2]), x.new_zeros(
x.shape[2:], dtype=torch.long
)
x = torch.randn(2, 3, 4)
self.run_test(Zero_(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(Zero_(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_ones(self):
class OnesModel(torch.nn.Module):
def forward(self, x):
return x.new_ones(x.shape[1:2]), x.new_ones(
x.shape[2:], dtype=torch.long
)
x = torch.randn(2, 3, 4)
self.run_test(OnesModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(OnesModel(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
@skipScriptTest() # torch.zeros/torch.ones with size tensor of dim != 0 not scriptable.
def test_zeros_ones_with_tensor_input(self):
class ZeroAndOnes(torch.nn.Module):
def forward(self, x):
return torch.zeros(x, 1), torch.ones(x, 1)
x = torch.tensor([2])
self.run_test(ZeroAndOnes(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
@skipShapeChecking
def test_tolist(self):
class List(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
res: List[int] = input.tolist()
return res
self.run_test(List(), (torch.randint(100, (1,)),))
@skipIfUnsupportedMinOpsetVersion(9)
def test_list_pass(self):
class Slice(torch.nn.Module):
def forward(self, x, y):
return x.new_zeros(x.shape[2:] + y.shape[1:])
x = torch.randn(2, 3, 4, 5)
y = torch.randn(1, 2, 3, 4)
self.run_test(
Slice(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2, 3], "y": [0, 1, 2, 3]},
)
self.run_test(Slice(), (x, y), remained_onnx_input_idx=[])
class Size(torch.nn.Module):
def forward(self, x, y):
return x.new_zeros(x.shape + y.shape)
x = torch.randn(2, 3, 4)
y = torch.randn(1, 2, 3)
self.run_test(
Size(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2], "y": [0, 1, 2]},
)
self.run_test(Size(), (x, y), remained_onnx_input_idx=[])
class Array(torch.nn.Module):
def forward(self, x, y):
arr1 = [x.shape[0], x.shape[1], 2]
arr2 = [y.shape[0], y.shape[1]]
return x.new_zeros(arr1 + arr2)
x = torch.randn(2, 3, 4)
y = torch.randn(1, 2, 3)
self.run_test(
Array(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2], "y": [0, 1, 2]},
)
self.run_test(Array(), (x, y), remained_onnx_input_idx=[])
class List(torch.nn.Module):
def forward(self, x, y):
l1 = list(x.shape)
l2 = list(y.shape)
return x.new_zeros(l1 + l2)
x = torch.randn(2, 3, 4)
y = torch.randn(1, 2, 3)
self.run_test(
List(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2], "y": [0, 1, 2]},
)
self.run_test(List(), (x, y), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_empty(self):
class Emtpy(torch.nn.Module):
def forward(self, x):
return (
x.new_empty(x.shape[0]).fill_(0),
x.new_empty(x.shape[0], dtype=torch.long) * 0,
)
x = torch.randn(2, 3, 4)
self.run_test(Emtpy(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(Emtpy(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_full(self):
class Full(torch.nn.Module):
def forward(self, x):
return x.new_full(x.shape[1:2], 5), x.new_full(
x.shape[0:1], 1.3, dtype=torch.long
)
x = torch.randn(2, 3, 4)
self.run_test(Full(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(Full(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_list(self):
class Arithmetic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
return torch.cat([x.add_(3), y.fill_(0)])
x = torch.randn(2, 3)
y = torch.randn(2, 3)
self.run_test(
Arithmetic(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1], "y": [0, 1]},
)
self.run_test(Arithmetic(), (x, y), remained_onnx_input_idx=[0])
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_fill(self):
class Fill_(torch.nn.Module):
def forward(self, x):
return x.fill_(3), x
x = torch.randn(2, 3, 4)
self.run_test(Fill_(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(Fill_(), x, remained_onnx_input_idx=[])
def test_inplace_arithmetic(self):
class Arithmetic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
x.add_(3)
y.mul_(x)
return x, y
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
self.run_test(Arithmetic(), (x, y))
def test_inplace_arithmetic_half(self):
class InplaceAddModel(torch.nn.Module):
def forward(self, x, y):
return x.add_(y)
class InplaceMulModel(torch.nn.Module):
def forward(self, x, y):
return x.mul_(y)
x = torch.randn(2, 2, dtype=torch.half)
y = torch.randn(2, 2, dtype=torch.float)
self.run_test(InplaceAddModel(), (x, y), rtol=1e-2, atol=1e-2)
self.run_test(InplaceMulModel(), (x, y), rtol=1e-2, atol=1e-2)
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_with_loop(self):
class M(torch.nn.Module):
def forward(self, x):
a = torch.ones(
12,
)
for i in range(10):
a.add_(
torch.ones(
12,
)
)
return a + x
m = M()
x = torch.randn(
12,
)
self.run_test(torch.jit.script(M()), (x))
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_with_loop_2(self):
class M(torch.nn.Module):
def forward(self, x):
_bias = torch.ones(
12,
)
a = torch.ones(
12,
) # used in loop, altered.
a_ref = a # not used in loop, should be altered.
b = x.clone() # used in loop, not be altered.
b_ref = b # not used in loop, should not be altered.
for i in range(10):
if i == 3:
for j in range(5):
a += _bias
_bias.add_(
torch.ones(
12,
)
)
b = b + torch.ones(
12,
)
_bias.add_(
torch.ones(
12,
)
)
a += _bias
# TODO: value for a_ref is incorrect.
# a_ref += torch.ones(12,)
b_ref += torch.ones(
12,
)
return _bias + x, a, b, b_ref
m = M()
x = torch.zeros(
12,
)
self.run_test(torch.jit.script(M()), (x))
@skipIfUnsupportedMinOpsetVersion(11)
def test_inplace_attr_with_loop(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self._bias = torch.arange(
12,
)
def forward(self, x):
self._bias = torch.arange(
12,
)
for i in range(10):
if i == 3:
for j in range(5):
self._bias += torch.arange(
12,
)
return self._bias + x
m = M()
x = torch.zeros(
12,
)
self.run_test(torch.jit.script(M()), (x))
@skipIfUnsupportedMinOpsetVersion(11)
def test_inplace_attr_copy_with_loop(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self._bias = torch.arange(
12,
)
def forward(self, x):
self._bias = torch.arange(
12,
)
for i in range(10):
if i == 3:
for j in range(5):
self._bias.copy_(
torch.arange(
12,
)
)
self._bias.copy_(
self._bias
+ torch.arange(
12,
)
)
self._bias.copy_(
self._bias
+ torch.arange(
12,
)
)
return self._bias + x
m = M()
x = torch.zeros(
12,
)
self.run_test(torch.jit.script(M()), (x))
@skipIfUnsupportedMinOpsetVersion(14) # Need onnx::Identity of sequence in opset 14
def test_inplace_sequence_with_loop(self):
class M(torch.nn.Module):
def process(self, beam_hyps: List[Tensor], done: Tensor, x):
batch_size = x.shape[0]
for i in range(batch_size):
if done[i]:
continue
beam_idx = 0
for _, token in enumerate(x[i]):
beam_hyps.append(token)
beam_idx += 1
if beam_idx == 6:
break
done[i] = len(beam_hyps) > 4
return beam_hyps, done
def forward(self, x):
beam_hyps: List[Tensor] = []
batch_size = x.shape[0]
cur_len = 0
max_len = x.shape[1]
done = torch.zeros(batch_size, dtype=torch.bool)
while cur_len < max_len:
beam_hyps, done = self.process(beam_hyps, done, x[:, 0, :])
cur_len = cur_len + 1
return beam_hyps
m = torch.jit.script(M())
x = torch.randn(8, 4, 3)
self.run_test(torch.jit.script(M()), (x))
@skipScriptTest() # Sort with dynamic dim not supported in ONNX
def test_sort(self):
class SortModel(torch.nn.Module):
def forward(self, x):
out = []
for i in range(-2, 2):
out.append(torch.sort(x, dim=i, descending=True))
return out
x = torch.randn(3, 4)
self.run_test(SortModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest() # Sort with dynamic dim not supported in ONNX
def test_sort_ascending(self):
class SortModel(torch.nn.Module):
def forward(self, x):
out = []
for i in range(-2, 2):
out.append(torch.sort(x, dim=i, descending=False))
return out
x = torch.randn(3, 4)
self.run_test(SortModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_argsort(self):
class ArgSortModel(torch.nn.Module):
def forward(self, x):
return torch.argsort(x, dim=1, descending=False)
x = torch.randn(3, 4)
self.run_test(ArgSortModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_masked_fill(self):
class MaskedFillModel(torch.nn.Module):
def forward(self, x):
mask = torch.tensor([[0, 0, 1], [1, 1, 0]], dtype=torch.uint8)
return x.masked_fill(mask, 2)
x = torch.zeros(4, 2, 3, requires_grad=True)
self.run_test(MaskedFillModel(), x)
class MaskedFillModel2(torch.nn.Module):
def forward(self, x):
return x.masked_fill(x > 3, -1)
x = torch.arange(16).view(2, 2, 4).to(torch.float32)
self.run_test(MaskedFillModel2(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_masked_fill_inplace(self):
class MaskedFillModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
mask = torch.tensor([[0, 0, 1], [1, 1, 0]], dtype=torch.uint8)
x.masked_fill_(mask, 2)
return x
x = torch.zeros(4, 2, 3, requires_grad=True)
self.run_test(MaskedFillModel(), x)
class MaskedFillModel2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
x.masked_fill_(x > 3, -1)
return x
x = torch.arange(16).view(2, 2, 4).to(torch.float32)
self.run_test(MaskedFillModel2(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_masked_scatter(self):
class MaskedScatterModel(torch.nn.Module):
def forward(self, x):
return torch.masked_scatter(x, x.ge(0.5), torch.ones(100, 100) * 5)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(MaskedScatterModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_masked_select(self):
class MaskedSelectModel(torch.nn.Module):
def forward(self, x):
return torch.masked_select(x, x.ge(0.5))
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(MaskedSelectModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_to_masked_fill(self):
class MaskedFillModel(torch.nn.Module):
def forward(self, input_mask, some_const):
mask = input_mask.clone()
mask[mask != some_const] = 1
mask[mask == some_const] = 0
return mask
mask = torch.randn(2, 2, 2, requires_grad=True)
constant = torch.tensor(5, dtype=torch.float)
self.run_test(MaskedFillModel(), (mask, constant))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_to_masked_scatter(self):
class MaskedScatterModel(torch.nn.Module):
def forward(self, input_mask, some_const):
mask = input_mask.clone()
mask[mask != some_const] = torch.ones(8)
return mask
mask = torch.randn(2, 2, 2, requires_grad=True)
constant = torch.tensor(5, dtype=torch.float)
self.run_test(MaskedScatterModel(), (mask, constant))
@skipIfUnsupportedMinOpsetVersion(9)
def test_pixel_shuffle(self):
class PixelShuffle(torch.nn.Module):
def forward(self, x):
return torch.pixel_shuffle(x, upscale_factor=2)
x = torch.randn(2, 16, 4, 3, requires_grad=True)
y = torch.randn(4, 32, 8, 4, requires_grad=True)
self.run_test(PixelShuffle(), x)
self.run_test(
PixelShuffle(),
x,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2, 3]},
additional_test_inputs=[y],
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_pixel_unshuffle(self):
class PixelUnshuffle(torch.nn.Module):
def forward(self, x):
return torch.pixel_unshuffle(x, downscale_factor=2)
x = torch.randn(2, 16, 4, 6, requires_grad=True)
y = torch.randn(4, 32, 8, 4, requires_grad=True)
self.run_test(PixelUnshuffle(), x)
self.run_test(
PixelUnshuffle(),
x,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2, 3]},
additional_test_inputs=[y],
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_reciprocal(self):
class ReciprocalModel(torch.nn.Module):
def forward(self, x):
return torch.reciprocal(x)
model = ReciprocalModel()
x = torch.tensor([2, 4])
self.run_test(model, x.to(torch.long))
self.run_test(model, x.to(torch.float))
self.run_test(model, x.to(torch.double))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scalar_type(self):
class ArithmeticModel(torch.nn.Module):
def forward(self, x):
return x.size(0) * 2 * x, 2 - x
x = torch.ones(2, 3, dtype=torch.float32)
self.run_test(ArithmeticModel(), x)
class ComparisonModel(torch.nn.Module):
def forward(self, x, y):
a = torch.tensor([12.0])
return x.lt(1.5) & y.le(2) & x.le(1), x.gt(y), x.lt(y), a.ge(x.size(0))
x = torch.ones(2, 3, dtype=torch.int32)
y = torch.ones(2, 3, dtype=torch.float32)
self.run_test(ComparisonModel(), (x, y))
class MatMulModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x + torch.mm(x, x) + x
x = torch.ones(3, 3)
self.run_test(MatMulModel(), x)
class AddMMModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x
x = torch.ones(3, 3)
self.run_test(AddMMModel(), x)
class FullModel(torch.nn.Module):
# add is used for exporting full
def forward(self, x):
return torch.full((3, 4), x)
x = torch.tensor(12.0)
self.run_test(FullModel(), x)
class CatModel(torch.nn.Module):
def forward(self, fp16, fp32):
return torch.cat([fp16, fp32])
fp16 = Tensor([0.5])
fp16 = fp16.half()
fp32 = Tensor([1.5])
self.run_test(CatModel(), (fp16, fp32))
@skipIfUnsupportedMinOpsetVersion(9)
def test_full_like(self):
class FullLikeModel(torch.nn.Module):
def forward(self, x):
return torch.full_like(x, 1.3, dtype=torch.int)
x = torch.tensor(12)
self.run_test(FullLikeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
@skipDtypeChecking
def test_full_like_value(self):
class FullLikeModel(torch.nn.Module):
def forward(self, x, y):
out = y + 2
return torch.full_like(x, out)
x = torch.tensor(12)
y = torch.tensor(2)
self.run_test(FullLikeModel(), (x, y))
def test_l1_norm(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p=1, dim=-1, keepdim=False)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_l2_norm(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p=2, dim=-2, keepdim=False)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_frobenius_norm(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p="fro", dim=0, keepdim=False)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_frobenius_norm_keepdim(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p="fro", dim=(0, 1), keepdim=True)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_unfold(self):
class UnfoldModel(torch.nn.Module):
def forward(self, x):
return x.unfold(dimension=2, size=2, step=2)
x = torch.randn(4, 2, 3, requires_grad=True)
y = torch.randn(2, 1, 3, requires_grad=True)
self.run_test(
UnfoldModel(),
x,
dynamic_axes={"x": [0, 1]},
input_names=["x"],
additional_test_inputs=[y],
)
def test_unfold_infer_shape(self):
class UnfoldModule(torch.jit.ScriptModule):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv1d(3, 1, 3, stride=2)
@torch.jit.script_method
def forward(self, x):
x = self.conv(x)
return x.unfold(dimension=2, size=2, step=2)
x = torch.randn(32, 3, 64)
self.run_test(UnfoldModule(), x)
@skipIfUnsupportedMinOpsetVersion(12)
def test_unfold_dynamic_inputs(self):
class UnfoldModel(torch.nn.Module):
def forward(self, x):
return x.unfold(dimension=2, size=x.shape[1], step=x.shape[1] - 1)
x = torch.randn(4, 2, 4, requires_grad=True)
self.run_test(UnfoldModel(), x)
class UnfoldModel(torch.nn.Module):
def forward(self, x):
return x.unfold(dimension=2, size=x.shape[1], step=1)
x = torch.randn(4, 2, 4, requires_grad=True)
self.run_test(UnfoldModel(), x)
@skipIfUnsupportedMinOpsetVersion(9) # MatMul long inputs is added in ONNX opset 9.
def test_mv(self):
class MatmulModel(torch.nn.Module):
def forward(self, input, other):
return torch.mv(input, other)
x = torch.randn(4, 5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
self.run_test(MatmulModel(), (x, y))
x = torch.randint(10, (4, 5))
y = torch.randint(10, (5,))
self.run_test(MatmulModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9) # MatMul long inputs is added in ONNX opset 9.
def test_dot(self):
class MatmulModel(torch.nn.Module):
def forward(self, input, other):
return torch.dot(input, other)
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
self.run_test(MatmulModel(), (x, y))
x = torch.randint(10, (5,))
y = torch.randint(10, (5,))
self.run_test(MatmulModel(), (x, y))
@skipScriptTest() # SpectralNorm not TorchScript compatible.
def test_spectral_norm(self):
m = torch.nn.utils.spectral_norm(torch.nn.Linear(2, 4))
x = torch.randn(6, 2)
self.run_test(m, (x,))
def test_prelu(self):
class PReluModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.prelu = torch.nn.PReLU()
def forward(self, x):
return self.prelu(x)
x = torch.randn(2, 3, 4)
y = torch.randn(2, 4, 5)
self.run_test(
PReluModel(),
x,
input_names=["x"],
dynamic_axes={"x": [1, 2]},
additional_test_inputs=[y],
)
def test_prelu_scalar(self):
x = torch.scalar_tensor(1.0)
self.run_test(torch.nn.PReLU(), x, input_names=["x"])
def test_relu6(self):
class Relu6Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu6 = torch.nn.ReLU6()
def forward(self, x):
return self.relu6(x)
x = torch.randn(2, 3, 4) * 100.0
y = torch.randn(2, 4, 5) * 100.0
self.run_test(
Relu6Model(),
x,
input_names=["x"],
dynamic_axes={"x": [1, 2]},
additional_test_inputs=[y],
)
def test_silu(self):
class SiLUModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.silu = torch.nn.SiLU()
def forward(self, x):
return self.silu(x)
x = torch.randn(2, 3, 4)
self.run_test(SiLUModel(), (x))
@skipIfUnsupportedMinOpsetVersion(14)
def test_tril(self):
class trilModel(torch.nn.Module):
def forward(self, x):
return torch.tril(x)
x = torch.randn(2, 3, 4)
self.run_test(trilModel(), (x))
class trilModelwithDiagonal(torch.nn.Module):
def forward(self, x):
return torch.tril(x, diagonal=1)
x = torch.randn(2, 3, 4)
self.run_test(trilModelwithDiagonal(), (x))
class trilModelwithNegDiagonal(torch.nn.Module):
def forward(self, x):
return torch.tril(x, diagonal=-1)
x = torch.randn(2, 3, 4)
self.run_test(trilModelwithNegDiagonal(), (x))
@skipIfUnsupportedMinOpsetVersion(14)
def test_triu(self):
class triuModel(torch.nn.Module):
def forward(self, x):
return torch.triu(x)
x = torch.randn(2, 3, 4)
self.run_test(triuModel(), (x))
class triuModelwithDiagonal(torch.nn.Module):
def forward(self, x):
return torch.triu(x, diagonal=1)
x = torch.randn(2, 3, 4)
self.run_test(triuModelwithDiagonal(), (x))
class trilModelwithNegDiagonal(torch.nn.Module):
def forward(self, x):
return torch.tril(x, diagonal=-1)
x = torch.randn(2, 3, 4)
self.run_test(trilModelwithNegDiagonal(), (x))
def test_mish(self):
class MishModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.mish = torch.nn.Mish()
def forward(self, x):
return self.mish(x)
x = torch.randn(2, 3, 4)
self.run_test(MishModel(), (x))
def test_remainder(self):
class RemainderModel(torch.nn.Module):
def forward(self, input, other):
return torch.remainder(input, other)
x = torch.randn(4, 2, 3)
y = torch.randn(1, 2, 1)
self.run_test(RemainderModel(), (x, y))
x = torch.tensor([7, 6, -7, -6], dtype=torch.long)
y = torch.tensor([2], dtype=torch.long)
self.run_test(RemainderModel(), (x, y))
x = x.to(torch.float)
self.run_test(RemainderModel(), (x, y))
y = y.to(torch.float)
self.run_test(RemainderModel(), (x, y))
x = x.to(torch.int32)
self.run_test(RemainderModel(), (x, y))
def test_remainder_scalar(self):
class RemainderModel(torch.nn.Module):
def __init__(self, scalar=2.55):
super().__init__()
self.scalar = scalar
def forward(self, input):
return torch.remainder(input, self.scalar)
x = torch.randint(10, (2, 3))
self.run_test(RemainderModel(), x)
x = torch.tensor([7, 6, -7, -6], dtype=torch.long)
self.run_test(RemainderModel(2), x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_fmod(self):
class FModModel(torch.nn.Module):
def forward(self, input, other):
return torch.fmod(input, other)
x = torch.randn(4, 2, 3)
y = torch.randn(1, 2, 1)
self.run_test(FModModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(10)
def test_fmod_scalar(self):
class FModModel(torch.nn.Module):
def forward(self, input):
return torch.fmod(input, 2.55)
x = torch.randint(10, (2, 3))
self.run_test(FModModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_glu(self):
class GluModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.glu(x)
x = torch.randn(2, 4, 5, 6, requires_grad=True)
self.run_test(GluModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_gelu(self):
class GeluModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.gelu(x, approximate="none")
x = torch.randn(2, 4, 5, 6, requires_grad=True)
self.run_test(GeluModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_tanh_gelu(self):
class GeluModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.gelu(x, approximate="tanh")
x = torch.randn(2, 4, 5, 6, requires_grad=True)
self.run_test(GeluModel(), x)
def test_add_inplace(self):
class InplaceAddModel(torch.nn.Module):
def forward(self, x):
x += 12
return x
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(InplaceAddModel(), x)
def test_addcmul(self):
class AddcmulModel(torch.nn.Module):
def forward(self, x, t1, t2):
return torch.addcmul(x, t1, t2), torch.addcmul(x, t1, t2, value=2.2)
x = torch.randn(1, 3)
t1 = torch.randn(3, 1)
t2 = torch.randn(1, 3)
self.run_test(AddcmulModel(), (x, t1, t2))
def test_rsqrt(self):
class RsqrtModel(torch.nn.Module):
def forward(self, x):
return x.rsqrt()
x = torch.randn(4, 2, 3, requires_grad=True, dtype=torch.float64)
self.run_test(RsqrtModel(), x)
def test_rsqrt_zeros(self):
class RsqrtModel(torch.nn.Module):
def forward(self, x):
return x.rsqrt()
x = torch.zeros(4, 2, 3, requires_grad=True, dtype=torch.float64)
self.run_test(RsqrtModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_unique(self):
class UniqueModel(torch.nn.Module):
def forward(self, x):
return torch.unique(
x, sorted=True, return_inverse=False, return_counts=True
)
x = torch.tensor([1, 3, 2, 3], dtype=torch.long)
self.run_test(UniqueModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_unique_along_dim(self):
class UniqueModel(torch.nn.Module):
def forward(self, x):
return torch.unique(
x, dim=0, sorted=True, return_inverse=True, return_counts=False
)
x = torch.tensor([1, 3, 2, 3], dtype=torch.long)
self.run_test(UniqueModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_cumsum(self):
class CumSum(torch.nn.Module):
def forward(self, input):
return torch.cumsum(input, dim=0)
x = torch.randn(2, 3, 4)
model = CumSum()
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_cumsum_with_cast(self):
class CumSum(torch.nn.Module):
def forward(self, input):
return torch.cumsum(input, dim=0, dtype=torch.float32)
model = CumSum()
x = torch.tensor([2, 3, 4], dtype=torch.int32)
self.run_test(model, x)
x = torch.tensor([False, True, True])
self.run_test(model, x)
@skipScriptTest() # error in propagate as assign input shape
@skipIfUnsupportedMinOpsetVersion(10)
def test_embedding_bag(self):
model = torch.nn.EmbeddingBag(10, 5, mode="sum", scale_grad_by_freq=True)
input = torch.randint(10, (7,))
offset = torch.tensor([0, 2, 5, 6])
self.run_test(model, (input, offset))
model = torch.nn.EmbeddingBag(10, 5, mode="sum", include_last_offset=True)
input = torch.randint(10, (7,))
offset = torch.tensor([0, 2, 5, 6])
self.run_test(model, (input, offset))
model = torch.nn.EmbeddingBag(10, 5, mode="max")
input = torch.randint(10, (7, 5))
self.run_test(model, (input))
@skipIfUnsupportedMinOpsetVersion(11)
def test_embedding_bag_1d_per_sample_weights(self):
class EmbeddingModel(torch.nn.Module):
def forward(self, embedding_matrix, input, offset, weights):
return torch.nn.functional.embedding_bag(
input,
embedding_matrix,
offsets=offset,
mode="sum",
per_sample_weights=weights,
)
model = EmbeddingModel()
x = torch.randint(7, (6,))
w = torch.randn(
6,
)
offset = torch.tensor([0, 2, 5])
embedding_matrix = torch.rand(10, 15)
self.run_test(model, (embedding_matrix, x, offset, w))
@skipIfUnsupportedMinOpsetVersion(11)
def test_embedding_bag_2d_per_sample_weights(self):
class EmbeddingModel(torch.nn.Module):
def forward(self, embedding_matrix, input, weights):
return torch.nn.functional.embedding_bag(
input, embedding_matrix, mode="sum", per_sample_weights=weights
)
embedding_matrix = torch.rand(10, 15)
model = EmbeddingModel()
x = torch.randint(7, (2, 3))
w = torch.randn(2, 3)
x2 = torch.randint(7, (4, 3))
w2 = torch.randn(4, 3)
self.run_test(
model,
(embedding_matrix, x, w),
input_names=["embed", "x", "w"],
dynamic_axes={"x": [0], "w": [0]},
additional_test_inputs=[(embedding_matrix, x2, w2)],
)
@skipScriptTest() # scripting prim::Uninitialized, prim::dtype, prim::unchecked_cast
@skipIfUnsupportedMinOpsetVersion(11)
@unittest.skip(
"Due to ONNX Loop shape inference issue. "
"https://msdata.visualstudio.com/Vienna/_workitems/edit/1352001"
)
def test_embedding_bag_dynamic_input(self):
class EmbeddingModel1D(torch.nn.Module):
def forward(self, embedding_matrix, input, weights, offsets):
return torch.nn.functional.embedding_bag(
input,
embedding_matrix,
offsets=offsets,
mode="sum",
per_sample_weights=weights,
)
model = EmbeddingModel1D()
x = torch.randint(7, (6,))
w = torch.randn(
6,
)
offsets = torch.tensor([0, 2, 5], dtype=torch.long)
embedding_matrix = torch.rand(10, 15)
x2 = torch.randint(7, (2,))
w2 = torch.randn(
2,
)
embedding_matrix2 = torch.rand(12, 25)
offsets2 = torch.tensor(
[
0,
],
dtype=torch.long,
)
self.run_test(
model,
(embedding_matrix, x, w, offsets),
additional_test_inputs=[(embedding_matrix2, x2, w2, offsets2)],
input_names=["embedding_matrix", "x", "offsets", "w"],
dynamic_axes={
"embedding_matrix": [0, 1],
"x": [0],
"offsets": [0],
"w": [0],
},
)
class EmbeddingModel2D(torch.nn.Module):
def forward(self, embedding_matrix, input, weights):
return torch.nn.functional.embedding_bag(
input, embedding_matrix, mode="sum", per_sample_weights=weights
)
model = EmbeddingModel2D()
x = torch.randint(7, (2, 3))
w = torch.randn(2, 3)
embedding_matrix = torch.rand(10, 15)
x2 = torch.randint(7, (3, 5))
w2 = torch.randn(3, 5)
embedding_matrix2 = torch.rand(12, 25)
self.run_test(
model,
(embedding_matrix, x, w),
additional_test_inputs=[(embedding_matrix2, x2, w2)],
input_names=["embedding_matrix", "x", "w"],
dynamic_axes={"embedding_matrix": [0, 1], "x": [0, 1], "w": [0, 1]},
)
@skipIfUnsupportedMinOpsetVersion(8)
def test_meshgrid(self):
class Meshgrid(torch.nn.Module):
def forward(self, x, y, z):
output1, output2, output3 = torch.meshgrid(x, y, z)
return output1, output2, output3
x = torch.randn(3, requires_grad=True)
y = torch.zeros(4, requires_grad=True)
z = torch.randn(5, requires_grad=True)
self.run_test(Meshgrid(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(8)
def test_meshgrid_scalar(self):
class Meshgrid(torch.nn.Module):
def forward(self, x, y, z):
output1, output2, output3 = torch.meshgrid(x, y, z)
return output1, output2, output3
x = torch.ones(3, requires_grad=True)
y = torch.zeros(4, requires_grad=True)
z = torch.tensor(2.0)
self.run_test(Meshgrid(), (x, y, z))
def test_baddbmm(self):
class MyModule(torch.nn.Module):
def forward(self, input, batch1, batch2):
return torch.baddbmm(
input, batch1, batch2, alpha=torch.tensor(5), beta=3.5
)
x = torch.randn(10, 3, 5)
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
model = MyModule()
self.run_test(model, (x, batch1, batch2))
def test_baddbmm_dynamic(self):
class MyModule(torch.nn.Module):
def forward(self, input, batch1, batch2, alpha, beta):
return torch.baddbmm(input, batch1, batch2, alpha=alpha, beta=beta)
x = torch.randn(10, 3, 5)
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
alpha = torch.tensor(5)
beta = torch.tensor(3.5)
model = MyModule()
self.run_test(model, (x, batch1, batch2, alpha, beta))
def test_numel(self):
class MyModule(torch.nn.Module):
def forward(self, input):
return input.numel() * input
x = torch.randn(2, 3, 5)
x2 = torch.randn(4, 5, 6)
model = MyModule()
self.run_test(
model,
(x,),
input_names=["x"],
dynamic_axes={"x": [0, 1, 2]},
additional_test_inputs=[(x2,)],
)
def test_numel_empty(self):
class MyModule(torch.nn.Module):
def forward(self, input):
return input.numel() * input
x = torch.randn(0)
x2 = torch.randn(4)
model = MyModule()
self.run_test(
model,
(x,),
input_names=["x"],
dynamic_axes={"x": [0]},
additional_test_inputs=[(x2,)],
)
def test_dtype(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input, other):
return input.to(dtype=other.dtype) + other
x = torch.randn(2, 3)
y = torch.randn(2, 3)
self.run_test(MyModel(), (x, y))
def test_dtype_eq(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input, other):
if input.dtype == other.dtype:
return input + other
return input
x = torch.randn(2, 3)
y = torch.randn(2, 3)
self.run_test(MyModel(), (x, y))
def test_cast_to(self):
class MyModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input, other):
return input.to(other) + other
x = torch.randn(2, 3, 4)
y = torch.tensor([1], dtype=torch.int64)
model = MyModule()
self.run_test(model, (x, y))
def test_cast_to_bool(self):
class MyModule(torch.nn.Module):
def forward(self, input, other):
return torch.cat((input.to(other), other), 0)
x = torch.randn(2, 3, 4)
y = torch.zeros([2, 3, 4], dtype=torch.bool)
model = MyModule()
self.run_test(model, (x, y))
# ONNX supports bfloat16 for opsets >= 13
@skipIfUnsupportedMinOpsetVersion(13)
def test_cast_type_as_with_bfloat16(self):
class MyModule(torch.nn.Module):
def forward(self, x):
y = torch.ones((3, 4), dtype=torch.bfloat16)
x = x.type_as(y)
return x.to(dtype=torch.float16)
x = torch.ones(3, 4, dtype=torch.float16)
model = MyModule()
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_type_as(self):
class MyModule(torch.nn.Module):
def forward(self, x):
y = torch.tensor([1.0])
return x.type_as(y)
a = torch.tensor([True, False], dtype=torch.bool)
b = torch.randn(3, 4, dtype=torch.double)
c = torch.ones((2, 2), dtype=torch.int64)
model = MyModule()
self.run_test(model, a)
self.run_test(model, b)
self.run_test(model, c)
@skipIfUnsupportedMinOpsetVersion(9)
def test_ones_bool(self):
class MyModule(torch.nn.Module):
def forward(self, input):
true = torch.ones(input.shape, dtype=torch.bool)
return input.to(true) & true
x = torch.randn(2, 3, 4)
model = MyModule()
self.run_test(model, x)
def test_log(self):
class Log(torch.nn.Module):
def forward(self, input):
return torch.log(input)
x = torch.rand(2, 3, 4)
model = Log()
self.run_test(model, x)
def test_log1p(self):
class Log1p(torch.nn.Module):
def forward(self, input):
return torch.log1p(input)
x = torch.rand(2, 3, 4)
model = Log1p()
self.run_test(model, x)
def test_log10(self):
class Log10(torch.nn.Module):
def forward(self, input):
return torch.log10(input)
x = torch.rand(2, 3, 4)
model = Log10()
self.run_test(model, x)
def test_log2(self):
class Log2(torch.nn.Module):
def forward(self, input):
return torch.log2(input)
x = torch.tensor(1.0)
model = Log2()
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_round(self):
class Round(torch.nn.Module):
def forward(self, x):
return torch.round(x)
x = torch.tensor([0.9920, -1.0362, -1.5000, 3.5000], requires_grad=True)
self.run_test(Round(), x)
def test_constant_pad(self):
model = torch.nn.ConstantPad1d(2, 3.5)
x = torch.randn(2, 4, 4)
self.run_test(model, x)
model = torch.nn.ConstantPad2d((3, 0, 2, 1), 3.5)
x = torch.randn(2, 2, 4, 4)
self.run_test(model, x)
# Dynamic padding is added in opset 11
@skipIfUnsupportedMinOpsetVersion(11)
def test_pad_types(self):
# Test for different pad integer types
class Pad(torch.nn.Module):
def forward(self, x, pad: List[int]):
return torch.nn.functional.pad(x, pad)
x = torch.randn(2, 2, 4, 4)
y = pad = [2, 4]
self.run_test(Pad(), (x, y))
y = pad = [
torch.tensor(2, dtype=torch.int64),
torch.tensor(4, dtype=torch.int64),
]
self.run_test(Pad(), (x, y))
@skipIfUnsupportedMaxOpsetVersion(10)
@skipScriptTest() # TODO: the logic in symbolic_opset9 doesn't handle script
def test_unsupported_pad(self):
class Pad(torch.nn.Module):
def forward(self, x, pad: List[int]):
return torch.nn.functional.pad(x, pad)
x = torch.randn(2, 2, 4, 4)
y = [2, 4]
with self.assertRaisesRegex(
RuntimeError,
(
"Unsupported: ONNX export of Pad.*"
+ "The sizes of the padding must be constant"
),
):
self.run_test(Pad(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_if_fold(self):
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.dim() == 2:
y = y + 4
y = y + 2
else:
y = y - 1
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.numel() > 1:
y = y + 4
else:
y = y + 2
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.dim() != 3:
y = y + 4
y = y + 2
else:
return y
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.dim() >= 1:
y = y + 4
else:
y = y - 1
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.dim() <= 1:
y = y + 4
else:
y = y + 2
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.dim() < 3 and y.dtype == torch.int:
y = y + 4
y = y + 2
else:
return y
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.dim() == 3 and y.dtype == torch.int:
y = y + 4
y = y + 2
else:
y = y + 1
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.numel() != 0 and y.dim() == 2:
y = y + 4
y = y + 2
else:
return y
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, x, y):
if x.numel() == y.numel():
y = x + y
else:
y = y - x
return y
x = torch.ones((3, 4), dtype=torch.int)
y = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), (x, y))
class IfFoldModel(torch.nn.Module):
def forward(self, x, y):
if x.numel() != y.numel():
y = x + y
else:
y = y - x
return y
x = torch.ones((3, 4), dtype=torch.int)
y = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_uninitialized(self):
class UninitializedModel(torch.nn.Module):
def forward(self, y):
if y.shape[1] < 5:
if y.size(0) == 1:
y = y + 4
else:
return y
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(UninitializedModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_uninitialized_dynamic(self):
class UninitializedModel(torch.nn.Module):
def forward(self, y):
if y.shape[1] < 5:
if y.size(0) == 1:
y = y + 4
else:
return y
return y
x = torch.ones((3, 4), dtype=torch.int)
y = torch.ones((6, 7), dtype=torch.int)
self.run_test(
UninitializedModel(),
x,
additional_test_inputs=[y],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1]},
)
# onnx::Identity of sequence supported for ONNX opset >= 14
@skipIfUnsupportedMinOpsetVersion(14)
def test_uninitialized_tensorList(self):
class UninitializedTensorListModel(torch.nn.Module):
def forward(self, x):
if x[0].shape[0] < 5:
if x.size(0) == 1:
x = x + 4
else:
return [x]
return [x]
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(torch.jit.script(UninitializedTensorListModel()), x)
# onnx::Identity of sequence supported for ONNX opset >= 14
@skipIfUnsupportedMinOpsetVersion(14)
def test_uninitialized_tensorList_dynamic(self):
class UninitializedTensorListModel(torch.nn.Module):
def forward(self, x):
if x[0].shape[0] < 5:
if x.size(0) == 1:
x += x
else:
return list(x)
return list(x)
x = torch.ones((3, 4), dtype=torch.double)
self.run_test(
torch.jit.script(UninitializedTensorListModel()),
x,
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1]},
)
# onnx::Identity of sequence supported for ONNX opset >= 14
@skipIfUnsupportedMinOpsetVersion(14)
def test_uninitialized_intList(self):
class UninitializedListModel(torch.nn.Module):
def forward(self, x):
y = list(range(x.size(0)))
if y[0] < 5:
# if x.size(0) != 3, ORT will throw type error.
if x.size(0) == 3:
y.append(10)
else:
return y
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(
torch.jit.script(UninitializedListModel()),
x,
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1]},
)
# onnx::Identity of sequence supported for ONNX opset >= 14
@skipIfUnsupportedMinOpsetVersion(14)
def test_uninitialized_tensorList_shape(self):
class UninitializedModel(torch.nn.Module):
def forward(self, x):
if x.shape[1] < 5:
if x.size(0) == 1:
x = x + 4
else:
x_list = list(x)
x_list.append(x)
return x_list
return [x, x]
x = torch.ones((3, 4), dtype=torch.int)
y = torch.ones((4, 6), dtype=torch.int)
self.run_test(
torch.jit.script(UninitializedModel()),
x,
additional_test_inputs=[y],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1]},
)
# Sequence type as loop-carried dependencies only supported for ONNX opset >= 13
@skipIfUnsupportedMinOpsetVersion(13)
def test_sequance_loopcarried(self):
class SequanceLoopModel(torch.nn.Module):
def forward(self, x):
outputs = []
for i in range(3):
outputs += [x]
return torch.stack(outputs).transpose(0, 1)
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(torch.jit.script(SequanceLoopModel()), x)
def test_reflection_pad(self):
model = torch.nn.ReflectionPad1d(2)
x = torch.randn(2, 4, 4)
self.run_test(model, x)
model = torch.nn.ReflectionPad2d((3, 0, 2, 1))
x = torch.randn(2, 2, 4, 4)
self.run_test(model, x)
def test_replication_pad(self):
model = torch.nn.ReplicationPad1d(2)
x = torch.randn(2, 4, 4)
self.run_test(model, x)
model = torch.nn.ReplicationPad2d((3, 0, 2, 1))
x = torch.randn(2, 2, 4, 4)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_im2col(self):
class Unfold(torch.nn.Module):
def forward(self, input):
return (
torch.nn.functional.unfold(
input, kernel_size=(10, 15), dilation=2, padding=5, stride=3
),
torch.nn.functional.unfold(
input, kernel_size=(2, 2), dilation=1, padding=0, stride=3
),
torch.nn.functional.unfold(
input, kernel_size=(1, 1), dilation=5, padding=2, stride=3
),
)
x = torch.rand(1, 1, 200, 100)
self.run_test(Unfold(), x)
@skipIfNoLapack
@skipIfUnsupportedMinOpsetVersion(11)
def test_det(self):
class Det(torch.nn.Module):
def forward(self, x):
return torch.linalg.det(x)
x = torch.randn(2, 3, 5, 5)
self.run_test(Det(), x)
def test_linalg_norm(self):
class LinalgSingleDimModel(torch.nn.Module):
def __init__(self, ord_val):
super().__init__()
self.ord = ord_val
def forward(self, x):
return torch.linalg.norm(x, ord=self.ord, dim=1)
x = torch.randn(2, 3, 5, 5)
self.run_test(LinalgSingleDimModel(None), x)
self.run_test(LinalgSingleDimModel(2), x)
self.run_test(LinalgSingleDimModel(float("inf")), x)
self.run_test(LinalgSingleDimModel(-float("inf")), x)
self.run_test(LinalgSingleDimModel(-4), x)
self.run_test(LinalgSingleDimModel(1.5), x)
class LinalgMultiDimModel(torch.nn.Module):
def __init__(self, ord_val):
super().__init__()
self.ord = ord_val
def forward(self, x):
return torch.linalg.norm(x, ord=self.ord, dim=(0, 2))
x = torch.randn(2, 3, 5, 5)
self.run_test(LinalgMultiDimModel("fro"), x)
self.run_test(LinalgMultiDimModel(float("inf")), x)
self.run_test(LinalgMultiDimModel(-float("inf")), x)
self.run_test(LinalgMultiDimModel(1), x)
self.run_test(LinalgMultiDimModel(-1), x)
class LinalgNoDimNoOrdModel(torch.nn.Module):
def forward(self, x):
return torch.linalg.norm(x)
x = torch.randn(2, 3, 5, 5)
self.run_test(LinalgNoDimNoOrdModel(), x)
y = torch.randn(2, 3)
self.run_test(LinalgNoDimNoOrdModel(), y)
z = torch.randn(2)
self.run_test(LinalgNoDimNoOrdModel(), z)
class LinalgNoDim1DModel(torch.nn.Module):
def __init__(self, ord_val):
super().__init__()
self.ord = ord_val
def forward(self, x):
return torch.linalg.norm(x, ord=self.ord)
x = torch.randn(2)
self.run_test(LinalgNoDim1DModel(None), x)
self.run_test(LinalgNoDim1DModel(2), x)
self.run_test(LinalgNoDim1DModel(float("inf")), x)
self.run_test(LinalgNoDim1DModel(-float("inf")), x)
self.run_test(LinalgNoDim1DModel(-4), x)
self.run_test(LinalgNoDim1DModel(1.5), x)
class LinalgNoDim2DModel(torch.nn.Module):
def __init__(self, ord_val):
super().__init__()
self.ord = ord_val
def forward(self, x):
return torch.linalg.norm(x, ord=self.ord)
x = torch.randn(2, 3)
self.run_test(LinalgNoDim2DModel("fro"), x)
self.run_test(LinalgNoDim2DModel(float("inf")), x)
self.run_test(LinalgNoDim2DModel(-float("inf")), x)
self.run_test(LinalgNoDim2DModel(1), x)
self.run_test(LinalgNoDim2DModel(-1), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_linalg_vector_norm_zero(self):
class LinalgVectorNormModel(torch.nn.Module):
def __init__(self, ord_val):
super().__init__()
self.ord = ord_val
def forward(self, x):
return torch.linalg.vector_norm(x, ord=self.ord)
x = torch.randn(2, 3, 5, 5)
self.run_test(LinalgVectorNormModel(0), x)
def test_linalg_vector_norm(self):
class LinalgVectorNormModel(torch.nn.Module):
def __init__(self, ord_val, dim_info):
super().__init__()
self.ord = ord_val
self.dim, self.keepdim = dim_info
def forward(self, x):
return torch.linalg.vector_norm(
x, ord=self.ord, dim=self.dim, keepdim=self.keepdim
)
x = torch.randn(2, 3, 5, 5)
ord_options = [2, float("inf"), -float("inf"), -4, 1.5]
dim_options = [(None, False), (1, False), ((1, 2), False), ((1, 2), True)]
for ord_val in ord_options:
for dim_info in dim_options:
self.run_test(LinalgVectorNormModel(ord_val, dim_info), x)
def test_linalg_matrix_norm(self):
class LinalgMatrixNormModel(torch.nn.Module):
def __init__(self, ord_val, dim_val=(-2, -1), keepdim_val=False):
super().__init__()
self.ord = ord_val
self.dim = dim_val
self.keepdim = keepdim_val
def forward(self, x):
return torch.linalg.matrix_norm(
x, ord=self.ord, dim=self.dim, keepdim=self.keepdim
)
x = torch.randn(2, 3, 5, 5)
ord_options = ["fro", float("inf"), -float("inf"), 1, -1]
for ord_val in ord_options:
self.run_test(LinalgMatrixNormModel(ord_val), x)
self.run_test(LinalgMatrixNormModel(ord_val, (0, 2)), x)
self.run_test(LinalgMatrixNormModel(ord_val, (0, 2), True), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_linalg_cross(self):
class Cross(torch.nn.Module):
def forward(self, x, y):
return torch.linalg.cross(x, y, dim=1), torch.linalg.cross(x, y)
x = torch.randn(5, 3, 2, 3)
y = torch.randn(1, 3, 1, 3)
self.run_test(Cross(), input_args=(x, y))
# This test checks output scalar type in the ONNX graph should not be null
# https://github.com/pytorch/pytorch/issues/28607
@skipIfUnsupportedMinOpsetVersion(10)
def test_trace_script(self):
@torch.jit.script
def center_slice_helper(input, h_offset):
return input[:, h_offset:]
class CenterCrop(torch.nn.Module):
def forward(self, input):
return center_slice_helper(input, torch.tensor(input.shape[1] - 1))
x = torch.randn(3, 4)
self.run_test(CenterCrop(), x)
@skipIfNoLapack
@skipIfUnsupportedMinOpsetVersion(11)
def test_logdet(self):
class LogDet(torch.nn.Module):
def forward(self, x):
return torch.logdet(x)
x = torch.randn(2, 3, 5, 5)
self.run_test(LogDet(), x)
def test_dim(self):
class DimModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
out = input * 2
out *= out.dim()
return out
empty_input = torch.randn(0, requires_grad=True)
multi_dim_input = torch.randn(1, 2, 3, requires_grad=True)
self.run_test(DimModel(), empty_input)
self.run_test(DimModel(), multi_dim_input)
@skipIfUnsupportedMinOpsetVersion(11)
def test_dim_1(self):
class M(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, poses):
boxes = torch.zeros([poses.shape[0], 2, 4])
batch_boxes = []
for kp_boxes in boxes:
kp_boxes = torchvision.ops.clip_boxes_to_image(kp_boxes, (2, 3))
batch_boxes.append(kp_boxes)
return batch_boxes
dummy_inputs = torch.rand(2, 2, 3)
self.run_test(M(), (dummy_inputs,), input_names=["x"], dynamic_axes={"x": [0]})
@skipIfUnsupportedMinOpsetVersion(12)
@skipDtypeChecking
def test_outer(self):
class Outer(torch.nn.Module):
def forward(self, x, y):
return torch.outer(x, y)
x = torch.arange(1, 5)
y = torch.arange(1, 4)
self.run_test(Outer(), input_args=(x, y))
x = torch.arange(1, 6).to(dtype=torch.float32)
y = torch.arange(1, 4).to(dtype=torch.long)
self.run_test(Outer(), input_args=(x, y))
x = torch.arange(2, 5).to(dtype=torch.float32)
y = torch.arange(2, 4).to(dtype=torch.float64)
self.run_test(Outer(), input_args=(x, y))
x = torch.arange(3, 6).to(dtype=torch.int32)
y = torch.arange(4, 7).to(dtype=torch.long)
self.run_test(Outer(), input_args=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_movedim(self):
class MovedimModel(torch.nn.Module):
def forward(self, x):
return (
x.movedim(1, 3),
x.movedim(2, 0),
x.movedim(1, 1),
x.movedim((1, 2, 3), (3, 0, 1)),
x.movedim((0, 1, 2), (1, 2, 3)),
x.movedim((1, 3, 2), (1, 3, 2)),
)
x = torch.randn(5, 3, 4, 2)
self.run_test(MovedimModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_moveaxis(self):
# moveaxis is an alias of movedim; thus, mostly copied from `test_movedim`.
class MoveaxisModel(torch.nn.Module):
def forward(self, x):
return (
x.moveaxis(1, 3),
x.moveaxis(2, 0),
x.moveaxis(1, 1),
x.moveaxis((1, 2, 3), (3, 0, 1)),
x.moveaxis((0, 1, 2), (1, 2, 3)),
x.moveaxis((1, 3, 2), (1, 3, 2)),
)
x = torch.randn(5, 3, 4, 2)
self.run_test(MoveaxisModel(), x)
@skipIfUnsupportedMinOpsetVersion(12)
def test_einsum(self):
class EinsumModelBatchDiagonal(torch.nn.Module):
def forward(self, x):
eqn = "...ii ->...i"
return torch.einsum(eqn, x)
for x in [torch.randn(3, 5, 5), torch.randn(3, 5, 5).to(dtype=torch.bool)]:
self.run_test(EinsumModelBatchDiagonal(), input_args=(x,))
class EinsumModelBatchMatmul(torch.nn.Module):
def forward(self, x, y):
eqn = "bij, bjk -> bik"
return torch.einsum(eqn, x, y)
x = torch.randn(5, 2, 3)
y = torch.randn(5, 3, 4)
self.run_test(EinsumModelBatchMatmul(), input_args=(x, y))
class EinsumModelInnerProd(torch.nn.Module):
def forward(self, x, y):
eqn = "i,i"
return torch.einsum(eqn, x, y)
x = torch.randn(5)
y = torch.randn(5)
self.run_test(EinsumModelInnerProd(), input_args=(x, y))
class EinsumModelTranspose(torch.nn.Module):
def forward(self, x):
eqn = "ij->ji"
return torch.einsum(eqn, x)
for x in [torch.randn(3, 4), torch.randn(3, 4).to(dtype=torch.bool)]:
self.run_test(EinsumModelTranspose(), input_args=(x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_cosine_similarity(self):
x = torch.randn(5, 3, 2)
y = torch.randn(5, 3, 2)
self.run_test(torch.nn.CosineSimilarity(dim=2), input_args=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_pairwise_distance(self):
x = torch.randn(5, 3, 2)
y = torch.randn(5, 3, 2)
self.run_test(torch.nn.PairwiseDistance(p=2.0), input_args=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_cross(self):
class Cross(torch.nn.Module):
def forward(self, x, y):
return torch.cross(x, y, dim=3), torch.cross(x, y)
x = torch.randn(5, 3, 2, 3)
y = torch.randn(5, 3, 2, 3)
self.run_test(Cross(), input_args=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_cdist(self):
class Cdist(torch.nn.Module):
def forward(self, x, y):
return torch.cdist(x, y)
x = torch.randn(5, 3, 3)
y = torch.randn(5, 2, 3)
self.run_test(Cdist(), input_args=(x, y))
@skipIfUnsupportedMinOpsetVersion(12)
def test_crossentropyloss(self):
for ignore_index in [-100, 1]:
x = torch.randn(3, 5)
y = torch.empty(3, dtype=torch.long).random_(5)
y[y == 1] = ignore_index
self._crossentropyloss(x, y, ignore_index)
x = torch.randn(3, 5, 2)
y = torch.empty(3, 2, dtype=torch.long).random_(5)
y[y == 1] = ignore_index
self._crossentropyloss(x, y, ignore_index)
x = torch.randn(3, 5, 2, 7)
y = torch.empty(3, 2, 7, dtype=torch.long).random_(5)
y[y == 1] = ignore_index
self._crossentropyloss(x, y, ignore_index)
def _crossentropyloss(self, x, y, ignore_index):
class CrossEntropyLossNone(torch.nn.Module):
def __init__(self, ignore_index):
super().__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(reduction="none")
else:
self.loss = torch.nn.CrossEntropyLoss(
reduction="none", ignore_index=ignore_index
)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossNone(ignore_index), input_args=(x, y))
class CrossEntropyLossNoneWeight(torch.nn.Module):
def __init__(self, ignore_index):
super().__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(
reduction="none", weight=torch.randn(5)
)
else:
self.loss = torch.nn.CrossEntropyLoss(
reduction="none",
weight=torch.randn(5),
ignore_index=ignore_index,
)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossNoneWeight(ignore_index), input_args=(x, y))
class CrossEntropyLossSum(torch.nn.Module):
def __init__(self, ignore_index):
super().__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(reduction="sum")
else:
self.loss = torch.nn.CrossEntropyLoss(
reduction="sum", ignore_index=ignore_index
)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossSum(ignore_index), input_args=(x, y))
class CrossEntropyLossSumWeight(torch.nn.Module):
def __init__(self, ignore_index):
super().__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(
reduction="sum", weight=torch.randn(5)
)
else:
self.loss = torch.nn.CrossEntropyLoss(
reduction="sum",
weight=torch.randn(5),
ignore_index=ignore_index,
)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossSumWeight(ignore_index), input_args=(x, y))
class CrossEntropyLossMean(torch.nn.Module):
def __init__(self, ignore_index):
super().__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss()
else:
self.loss = torch.nn.CrossEntropyLoss(ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossMean(ignore_index), input_args=(x, y))
class CrossEntropyLossMeanWeight(torch.nn.Module):
def __init__(self, ignore_index):
super().__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(weight=torch.randn(5))
else:
self.loss = torch.nn.CrossEntropyLoss(
weight=torch.randn(5), ignore_index=ignore_index
)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossMeanWeight(ignore_index), input_args=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_kldiv_loss(self):
x = torch.rand(5).log()
y = torch.rand(5)
self._kldiv_loss(x, y)
x = torch.rand(2, 3, 5).log()
y = torch.rand(2, 3, 5)
self._kldiv_loss(x, y)
x = torch.rand(2, 3, 5, 7).log()
y = torch.rand(2, 3, 5, 7)
self._kldiv_loss(x, y)
def _kldiv_loss(self, x, y):
class KLDivLossNone(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.KLDivLoss(reduction="none", log_target=True)
def forward(self, input, target):
return self.loss(input, target.log())
self.run_test(KLDivLossNone(), input_args=(x, y))
class KLDivLossMean(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.KLDivLoss(reduction="mean", log_target=False)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossMean(), input_args=(x, y))
class KLDivLossSum(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.KLDivLoss(reduction="sum", log_target=True)
def forward(self, input, target):
return self.loss(input, target.log())
self.run_test(KLDivLossSum(), input_args=(x, y))
class KLDivLossBatchMean(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.KLDivLoss(reduction="batchmean", log_target=False)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossBatchMean(), input_args=(x, y))
class KLDivLossMiniBatchMean(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.KLDivLoss(
reduction="batchmean", size_average=False, log_target=True
)
def forward(self, input, target):
return self.loss(input, target.log())
self.run_test(KLDivLossMiniBatchMean(), input_args=(x, y))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.NLLLoss(reduction="none")
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(2 * input), target)
return output
N, C = 5, 4
input = torch.randn(N, 16)
target = torch.empty(N, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_2d_none(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.NLLLoss(reduction="none")
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_2d_mean(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.NLLLoss(reduction="mean")
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_2d_sum(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.NLLLoss(reduction="sum")
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_2d_mean_weights(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.NLLLoss(reduction="mean", weight=torch.randn(C))
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_2d_mean_ignore_index(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.NLLLoss(reduction="mean", ignore_index=1)
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_dynamic_ignore_index(self):
import torch.nn.functional as F
def linear_combination(x, y, epsilon):
return epsilon * x + (1 - epsilon) * y
def reduce_loss(loss, reduction="mean"):
return (
loss.mean()
if reduction == "mean"
else loss.sum()
if reduction == "sum"
else loss
)
class LabelSmoothingCrossEntropy(torch.nn.Module):
def __init__(self, epsilon: float = 0.1, reduction="mean"):
super().__init__()
self.epsilon = epsilon
self.reduction = reduction
def forward(self, preds, target, start_position):
n = preds.size()[-1]
log_preds = F.log_softmax(preds, dim=-1)
ignore_index = start_position.size(1)
nll = F.nll_loss(
log_preds,
target,
reduction=self.reduction,
ignore_index=ignore_index,
)
return nll + start_position.float()
N = 5
preds = torch.randn(N, 16)
target = torch.randint(5, (N,))
start_position = torch.randint(10, (N, N))
self.run_test(LabelSmoothingCrossEntropy(), (preds, target, start_position))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_2d_mean_ignore_index_weights(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.NLLLoss(
reduction="mean", weight=torch.randn(C), ignore_index=1
)
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_binary_cross_entropy_with_logits(self):
x = torch.randn(5)
y = torch.empty(5).random_(2)
self._bce_logits(x, y)
x = torch.randn(3, 4)
y = torch.empty(3, 4).random_(2)
weight = torch.tensor([3])
self._bce_logits_wegiht(x, y, weight)
x = torch.randn(3, 2, 4)
y = torch.empty(3, 2, 4).random_(2)
pos_weight = torch.empty([2, 4]).random_(2)
self._bce_logits_posweight(x, y, pos_weight)
x = torch.randn(3, 3, 4)
y = torch.empty(3, 3, 4).random_(2)
weight = torch.tensor([3])
pos_weight = torch.empty([3, 4]).random_(2)
self._bce_logits_loss_weight_posweight(x, y, weight, pos_weight)
def _bce_logits(self, x, y):
class BCEWithLogitsLossNone(torch.nn.Module):
def forward(self, input, target):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, reduction="none"
)
self.run_test(BCEWithLogitsLossNone(), input_args=(x, y))
class BCEWithLogitsLossMean(torch.nn.Module):
def forward(self, input, target):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, reduction="mean"
)
self.run_test(BCEWithLogitsLossMean(), input_args=(x, y))
class BCEWithLogitsLossSum(torch.nn.Module):
def forward(self, input, target):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, reduction="sum"
)
self.run_test(BCEWithLogitsLossSum(), input_args=(x, y))
def _bce_logits_wegiht(self, x, y, weight):
class BCEWithLogitsLossWegihtNone(torch.nn.Module):
def forward(self, input, target, weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, weight=weight, reduction="none"
)
self.run_test(BCEWithLogitsLossWegihtNone(), input_args=(x, y, weight))
class BCEWithLogitsLossWegihtMean(torch.nn.Module):
def forward(self, input, target, weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, weight=weight, reduction="mean"
)
self.run_test(BCEWithLogitsLossWegihtMean(), input_args=(x, y, weight))
class BCEWithLogitsLossWegihtSum(torch.nn.Module):
def forward(self, input, target, weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, weight=weight, reduction="sum"
)
self.run_test(BCEWithLogitsLossWegihtSum(), input_args=(x, y, weight))
def _bce_logits_posweight(self, x, y, pos_weight):
class BCEWithLogitsLossPosWegihtNone(torch.nn.Module):
def forward(self, input, target, pos_weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, pos_weight=pos_weight, reduction="none"
)
self.run_test(BCEWithLogitsLossPosWegihtNone(), input_args=(x, y, pos_weight))
class BCEWithLogitsLossPosWegihtMean(torch.nn.Module):
def forward(self, input, target, pos_weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, pos_weight=pos_weight, reduction="mean"
)
self.run_test(BCEWithLogitsLossPosWegihtMean(), input_args=(x, y, pos_weight))
class BCEWithLogitsLossPosWegihtSum(torch.nn.Module):
def forward(self, input, target, pos_weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, pos_weight=pos_weight, reduction="sum"
)
self.run_test(BCEWithLogitsLossPosWegihtSum(), input_args=(x, y, pos_weight))
def _bce_logits_loss_weight_posweight(self, x, y, weight, pos_weight):
class BCEWithLogitsLossWeightPosweightNone(torch.nn.Module):
def forward(self, input, target, weight, pos_weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input,
target,
weight=weight,
pos_weight=pos_weight,
reduction="none",
)
self.run_test(
BCEWithLogitsLossWeightPosweightNone(),
input_args=(x, y, weight, pos_weight),
)
class BCEWithLogitsLossWeightPosweightMean(torch.nn.Module):
def forward(self, input, target, weight, pos_weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input,
target,
weight=weight,
pos_weight=pos_weight,
reduction="mean",
)
self.run_test(
BCEWithLogitsLossWeightPosweightMean(),
input_args=(x, y, weight, pos_weight),
)
class BCEWithLogitsLossWeightPosweightSum(torch.nn.Module):
def forward(self, input, target, weight, pos_weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, weight=weight, pos_weight=pos_weight, reduction="sum"
)
self.run_test(
BCEWithLogitsLossWeightPosweightSum(), input_args=(x, y, weight, pos_weight)
)
def test_torch_mm(self):
class M(torch.nn.Module):
def forward(self, mat1, mat2):
mm = torch.mm(mat1, mat2)
return mm
mat1 = torch.randn(2, 3)
mat2 = torch.randn(3, 3)
self.run_test(M(), input_args=(mat1, mat2))
@skipIfUnsupportedMinOpsetVersion(
9
) # Because where op is not supported for opset < 9.
def test_where_with_bool_tensor(self):
class M(torch.nn.Module):
def forward(self, mat1, mat2):
out = torch.where(mat1 > 0, mat1, mat2)
return out
mat1 = torch.randn(2, 3)
mat2 = torch.ones(2, 3)
self.run_test(M(), input_args=(mat1, mat2))
@skipIfUnsupportedMinOpsetVersion(
9
) # Because where op is not supported for opset < 9.
def test_where_with_byte_tensor(self):
class M(torch.nn.Module):
def forward(self, cond, mat1, mat2):
out = torch.where(cond, mat1, mat2)
return out
cond = torch.ones(2, 3, dtype=torch.uint8)
cond[1, 2] = 0
mat1 = torch.randn(2, 3)
mat2 = torch.ones(2, 3)
self.run_test(M(), input_args=(cond, mat1, mat2))
@skipIfUnsupportedMinOpsetVersion(10) # ONNX IsInf op is added in opset 10.
def test_isinf(self):
class M(torch.nn.Module):
def forward(self, x):
return x.isinf()
x = torch.tensor([[1, 2, float("inf")], [2, float("nan"), float("inf")]])
self.run_test(M(), (x,))
@skipIfUnsupportedMinOpsetVersion(10)
def test_isfinite(self):
class M(torch.nn.Module):
def forward(self, x):
return x.isfinite()
x = torch.tensor([[1, 2, float("inf")], [2, float("nan"), -float("inf")]])
self.run_test(M(), (x,))
@skipIfUnsupportedMinOpsetVersion(9) # ONNX IsNaN op is added in opset 9.
def test_isnan(self):
class M(torch.nn.Module):
def forward(self, x):
return x.isnan()
x = torch.tensor([[1, 2, float("inf")], [2, float("nan"), float("inf")]])
self.run_test(M(), (x,))
@skipIfUnsupportedMinOpsetVersion(
10
) # ONNX IsNaN, IsInf op is added in opset 9, 10 respectively.
def test_nan_to_num(self):
class NoParams(torch.nn.Module):
def forward(self, x):
return x.nan_to_num()
x = torch.tensor([[1, 2, float("inf")], [2, float("nan"), -float("inf")]])
xint = torch.ones((2, 4), dtype=torch.int)
xhalf = torch.ones((2, 4), dtype=torch.half)
self.run_test(NoParams(), (x,))
self.run_test(NoParams(), (xint,))
self.run_test(NoParams(), (xhalf,))
class WithParams(torch.nn.Module):
def forward(self, x):
return x.nan_to_num(nan=2.3, posinf=4.5, neginf=6.7)
x = torch.tensor([[1, 2, float("inf")], [2, float("nan"), -float("inf")]])
self.run_test(WithParams(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_maximum_minimum(self):
class ModelWithNan(torch.nn.Module):
def forward(self, x, y):
return torch.maximum(x, y), torch.minimum(x, y)
x = torch.tensor([-2, -2, float("nan")])
y = torch.rand(1, 3)
self.run_test(ModelWithNan(), (x, y))
@skipIfUnsupportedMinOpsetVersion(12)
def test_minimum_dtypes(self):
class MinimumModel(torch.nn.Module):
def forward(self, x, y):
return torch.minimum(x, y)
x = torch.randn((5, 5), dtype=torch.float16)
y = torch.randn((5, 5), dtype=torch.float)
self.run_test(MinimumModel(), (x, y))
x = torch.randn((5, 5), dtype=torch.float16)
y = torch.randint(10, (5, 5), dtype=torch.int16)
self.run_test(MinimumModel(), (x, y))
x = torch.randint(10, (5, 5), dtype=torch.int16)
y = torch.randint(10, (5, 5), dtype=torch.int32)
self.run_test(MinimumModel(), (x, y))
x = torch.randint(10, (5, 5), dtype=torch.int)
y = torch.full_like(x, True)
self.run_test(MinimumModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_any(self):
class M(torch.nn.Module):
def forward(self, x):
return x.any()
x = torch.tensor([[True, False], [False, False]])
self.run_test(M(), (x,))
class MDim(torch.nn.Module):
def forward(self, x):
return x.any(dim=1)
x = torch.rand(3, 4).bool()
self.run_test(MDim(), (x,))
class MKeepdim(torch.nn.Module):
def forward(self, x):
return x.any(dim=1, keepdim=True)
x = torch.rand(3, 4).bool()
self.run_test(MKeepdim(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_all(self):
class M(torch.nn.Module):
def forward(self, x):
return x.all()
x = torch.tensor([[True, False], [False, False]])
self.run_test(M(), (x,))
class MDim(torch.nn.Module):
def forward(self, x):
return x.all(dim=1)
x = torch.rand(3, 4).bool()
self.run_test(MDim(), (x,))
class MKeepdim(torch.nn.Module):
def forward(self, x):
return x.all(dim=1, keepdim=True)
x = torch.rand(3, 4).bool()
self.run_test(MKeepdim(), (x,))
def test_dropout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.dropout = torch.nn.Dropout(0.3)
def forward(self, x):
dropout = self.dropout(x)
return dropout
x = torch.randn(10, 3, 53)
self.run_test(M(), (x))
def test_rrelu_eval(self):
x = torch.tensor([0.5, -0.5])
self.run_test(torch.nn.RReLU(0.1, 0.3).eval(), x)
def test_shape_constant_fold(self):
class ShapeModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
shape = self.weight.shape[0]
return x + shape
x = torch.randn(2, 5)
self.run_test(ShapeModule(), (x,), rtol=1e-3, atol=1e-5)
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu(self):
class Celu(torch.nn.Module):
def __init__(self):
super().__init__()
self.celu = torch.nn.CELU(alpha=1.0)
def forward(self, input):
return self.celu(input)
input = torch.randn(2)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu_default(self):
class Celu(torch.nn.Module):
def __init__(self):
super().__init__()
self.celu = torch.nn.CELU()
def forward(self, input):
return self.celu(input)
input = torch.randn(2)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu_alpha(self):
class Celu(torch.nn.Module):
def __init__(self):
super().__init__()
self.celu = torch.nn.CELU(alpha=2.0)
def forward(self, input):
return self.celu(input)
input = torch.randn(2)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu_cast(self):
class Celu(torch.nn.Module):
def __init__(self):
super().__init__()
self.celu = torch.nn.CELU()
def forward(self, input):
return self.celu(input)
input = torch.randn(2, 5, 7, dtype=torch.float64)
self.run_test(Celu(), (input,))
def test_lower_tuple(self):
class TupleModule(torch.nn.Module):
def forward(self, input1: Tensor, input2: Tensor, input3: Tensor) -> Tensor:
a = (input1, input2)
b = a
c = (input1, input2, input3)
for i in range(5):
d = a[0]
for j in range(2):
e, f = a
a = (d, f)
f = c[2]
if f.size(0) != input1.size(-1):
g = b[1]
b = (g, f)
else:
k = c[1:]
b = (f, k[0])
m, n = b
c = (input1, n, m)
p, q, r = c
return p + q + r
input1 = torch.randn(2)
input2 = torch.randn(2)
input3 = torch.randn(2)
self.run_test(TupleModule(), (input1, input2, input3))
def test_lower_tuple_2(self):
class TupleModule(torch.nn.Module):
def forward(self, input1: Tensor, input2: Tensor) -> Tuple[Tensor, Tensor]:
a = (input1, input2)
for x in range(5):
c, d = a
a = (c, d)
return a
input1 = torch.randn(2)
input2 = torch.randn(2)
self.run_test(TupleModule(), (input1, input2))
def test_lower_tuple_3(self):
class TupleModule(torch.nn.Module):
def forward(
self,
input1: Tuple[Tensor, Tensor],
input2: Tuple[Tensor, Tensor],
) -> Tuple[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]]:
a = input1
b = input2
for x in range(5):
c, d = a
e, f = b
if c.shape[0] == e.shape[0]:
e = e + c
else:
f = f + d
a = (e, f)
b = (c, d)
return a, b
input1 = (torch.randn(2), torch.randn(2))
input2 = (torch.randn(2), torch.randn(2))
self.run_test(TupleModule(), (input1, input2))
@skipIfUnsupportedMinOpsetVersion(9)
def test_where(self):
class Model(torch.nn.Module):
def forward(self, cond, input, other):
return torch.where(cond, input, other)
x = torch.randint(0, 1, (2, 3, 4), dtype=torch.bool)
y = torch.randn(2, 1, 4)
z = torch.ones(2, 3, 1)
self.run_test(Model(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(9)
@skipScriptTest() # scripting tests run for opsets > 11. See: test_where_condition_script
def test_where_condition(self):
class Model1(torch.nn.Module):
def forward(self, input):
return torch.stack(torch.where(input > 0.5), dim=1)
x = torch.randint(0, 2, (2, 3, 4), dtype=bool)
self.run_test(Model1(), (x))
class Model2(torch.nn.Module):
def forward(self, input, other):
return torch.stack(torch.where(input > other), dim=1)
x = torch.randint(0, 1, (2, 3, 4), dtype=bool)
y = torch.randint(1, 2, (2, 3, 4), dtype=bool)
self.run_test(Model2(), (x, y))
@skipIfUnsupportedOpsetVersion([13])
@skipIfUnsupportedMinOpsetVersion(11)
def test_where_condition_script(self):
class Model1(torch.nn.Module):
def forward(self, input):
return torch.stack(torch.where(input > 0.5), dim=1)
x = torch.randint(0, 2, (2, 3, 4), dtype=bool)
self.run_test(Model1(), (x))
class Model2(torch.nn.Module):
def forward(self, input, other):
return torch.stack(torch.where(input > other), dim=1)
x = torch.randint(0, 1, (2, 3, 4), dtype=bool)
y = torch.randint(1, 2, (2, 3, 4), dtype=bool)
self.run_test(Model2(), (x, y))
def test_empty_branch(self):
class EmptyBranchModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
out = input + 1
if out.dim() > 2:
if out.dim() > 3:
out += 3
else:
pass
else:
pass
return out
x = torch.randn(1, 2, 3, requires_grad=True)
self.run_test(EmptyBranchModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_derive_index_scripting(self):
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(len(x) - 1, -len(x), -2):
y = x[idx]
j += [x * y]
return j
x = torch.randn(5, 13)
self.run_test(MyModule(), x)
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(-len(x), len(x) - 1, 2):
y = x[idx]
j += [x * y]
return j
x = torch.randn(5, 13)
self.run_test(MyModule(), x)
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(len(x) - 1, -len(x), -3):
y = x[idx]
j += [x * y]
return j
self.run_test(MyModule(), x)
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(-len(x), len(x) - 1, 3):
y = x[idx]
j += [x * y]
return j
self.run_test(MyModule(), x)
@skipScriptTest() # Scripting fails for add lists for opsets < 11. Chek test_derive_index_scripting
def test_derive_index(self):
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(len(x) - 1, -len(x), -2):
y = x[idx]
j += [x * y]
return j
x = torch.randn(5, 13)
self.run_test(MyModule(), x)
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(-len(x), len(x) - 1, 2):
y = x[idx]
j += [x * y]
return j
x = torch.randn(5, 13)
self.run_test(MyModule(), x)
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(len(x) - 1, -len(x), -3):
y = x[idx]
j += [x * y]
return j
self.run_test(MyModule(), x)
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(-len(x), len(x) - 1, 3):
y = x[idx]
j += [x * y]
return j
self.run_test(MyModule(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_if_transpose(self):
class IfModel(torch.nn.Module):
def forward(self, x):
x = x.transpose(0, 1)
if x.size(0) == 2:
return x.transpose(0, 1)
else:
return x
x = torch.randn(2, 3)
self.run_test(
torch.jit.script(IfModel()),
x,
output_names=["output_1"],
dynamic_axes={"output_1": [0, 1]},
)
@skipIfUnsupportedMinOpsetVersion(13)
def test_if_list(self):
class IfModel(torch.nn.Module):
def forward(self, x, y, cond):
res = []
if cond:
res = res + [x]
else:
res = res + [y]
return res
x = torch.randn(2, 3)
y = torch.randn(3, 3)
cond = torch.tensor(1, dtype=torch.bool)
self.run_test(torch.jit.script(IfModel()), (x, y, cond))
@skipIfUnsupportedMinOpsetVersion(13)
def test_if_view(self):
class IfModel(torch.nn.Module):
def forward(self, x, y, cond):
bs, seq = y.shape[:2]
if cond:
res = x.view(bs, seq, -1)
else:
res = y
return res.transpose(1, 2)
x = torch.randn(2, 16, 2, 2)
y = torch.randn(2, 16, 8)
cond = torch.tensor(1, dtype=torch.bool)
self.run_test(
torch.jit.script(IfModel()),
(x, y, cond),
output_names=["output_1"],
dynamic_axes={"output_1": [1]},
)
@skipScriptTest(min_opset_version=11) # dynamic split support addded in 11
def test_split_tensor_scalar(self):
class SplitModel(torch.nn.Module):
def forward(self, x):
return torch.split(x, x.size(1))
x = torch.randn(1, 2, 3, requires_grad=True)
self.run_test(SplitModel(), x)
def test_split_tensor_multi(self):
class SplitModel(torch.nn.Module):
def forward(self, x):
return torch.split(x, torch.ones(3))
x = torch.randn(1, 2, 3, requires_grad=True)
def run_model():
SplitModel(x)
self.assertRaises(TypeError, run_model)
@skipIfUnsupportedMinOpsetVersion(9)
def test_embedding(self):
class EmbedModel(torch.nn.Module):
def forward(self, input, emb):
return torch.nn.functional.embedding(input, emb, padding_idx=1)
model = EmbedModel()
x = torch.randint(4, (4,))
x[2] = x[0] = 1
embedding_matrix = torch.rand(10, 3)
self.run_test(model, (x, embedding_matrix))
x = torch.randint(4, (4, 3, 2))
x[2] = 1
x[0][1] = 1
self.run_test(model, (x, embedding_matrix))
self.run_test(
model, (x, embedding_matrix), training=torch.onnx.TrainingMode.TRAINING
)
class EmbedModelWithoutPaddingIdx(torch.nn.Module):
def forward(self, input, emb):
return torch.nn.functional.embedding(input, emb)
model = EmbedModelWithoutPaddingIdx()
x = torch.randint(4, (4, 3, 2))
self.run_test(model, (x, embedding_matrix))
@skipIfUnsupportedMinOpsetVersion(9)
def test_embedding_module(self):
class EmbedModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(4, 3, padding_idx=1)
self.emb2 = torch.nn.Embedding(4, 3, padding_idx=1)
with torch.no_grad():
self.emb2.weight[1] = torch.ones(3)
def forward(self, input):
return self.emb(input), self.emb2(input)
model = EmbedModel()
x = torch.randint(4, (4,))
x[2] = x[0] = 1
self.run_test(model, (x,))
x = torch.randint(4, (4, 3, 2))
x[2] = 1
x[0][1] = 1
self.run_test(model, (x,))
class EmbedModelWithoutPaddingIdx(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(4, 3)
def forward(self, input):
return self.emb(input)
model = EmbedModelWithoutPaddingIdx()
x = torch.randint(4, (4, 3, 2))
self.run_test(model, (x,))
@skipIfUnsupportedMinOpsetVersion(11)
def test_embedding_renorm(self):
n, d = 7, 5
embedding = torch.nn.Embedding(n, d, max_norm=0.2)
idx = torch.tensor([2, 1])
self.run_test(embedding, idx)
embedding = torch.nn.Embedding(n, d, max_norm=0.5, norm_type=1.0)
idx = torch.tensor([4, 3, 4, 2])
self.run_test(embedding, idx)
def _dispatch_rnn_test(self, name, *args, **kwargs):
if name == "elman":
self._elman_rnn_test(*args, **kwargs)
if name == "lstm":
self._lstm_test(*args, **kwargs)
if name == "gru":
self._gru_test(*args, **kwargs)
def _elman_rnn_test(
self,
layers,
nonlinearity,
bidirectional,
initial_state,
packed_sequence,
dropout,
**extra_kwargs,
):
class ElmanWithStateModel(torch.nn.Module):
def __init__(self, layers, nonlinearity, bidirect, dropout, batch_first):
super().__init__()
self.batch_first = batch_first
self.inner_model = torch.nn.RNN(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
layers,
nonlinearity=nonlinearity,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input: rnn_utils.PackedSequence, hx=None):
return self.inner_model(input, hx)
class ElmanWithoutStateModel(torch.nn.Module):
def __init__(self, layers, nonlinearity, bidirect, dropout, batch_first):
super().__init__()
self.batch_first = batch_first
self.inner_model = torch.nn.RNN(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
layers,
nonlinearity=nonlinearity,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input: rnn_utils.PackedSequence):
return self.inner_model(input)
batch_first = packed_sequence == 2
if initial_state:
model = ElmanWithStateModel(
layers=layers,
bidirect=bidirectional,
nonlinearity=nonlinearity,
dropout=dropout,
batch_first=batch_first,
)
if packed_sequence:
model = (
rnn_model_with_packed_sequence.RnnModelWithPackedSequenceWithState(
model, batch_first
)
)
else:
model = ElmanWithStateModel(
layers=layers,
bidirect=bidirectional,
nonlinearity=nonlinearity,
dropout=dropout,
batch_first=batch_first,
)
if packed_sequence:
model = rnn_model_with_packed_sequence.RnnModelWithPackedSequenceWithoutState(
model, batch_first
)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_test(model, input)
# test that the model still runs with a different batch size
other_input = make_input(RNN_BATCH_SIZE + 1)
self.run_test(model, other_input)
def _lstm_test(
self,
layers,
bidirectional,
initial_state,
packed_sequence,
dropout,
**extra_kwargs,
):
batch_first = packed_sequence == 2
if packed_sequence:
model = lstm_flattening_result.LstmFlatteningResultWithSeqLength(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
layers,
bidirectional,
dropout,
batch_first,
)
if initial_state:
model = (
rnn_model_with_packed_sequence.RnnModelWithPackedSequenceWithState(
model, batch_first
)
)
else:
model = rnn_model_with_packed_sequence.RnnModelWithPackedSequenceWithoutState(
model, batch_first
)
else:
model = lstm_flattening_result.LstmFlatteningResultWithoutSeqLength(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
layers,
bidirectional,
dropout,
batch_first,
)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
c0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append((h0, c0))
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_test(model, input)
# test that the model still runs with a different batch size
other_input = make_input(RNN_BATCH_SIZE + 1)
self.run_test(model, other_input)
def _gru_test(
self,
layers,
bidirectional,
initial_state,
packed_sequence,
dropout,
**extra_kwargs,
):
class GRUWithStateModel(torch.nn.Module):
def __init__(self, layers, bidirect, dropout, batch_first):
super().__init__()
self.batch_first = batch_first
self.inner_model = torch.nn.GRU(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
num_layers=layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input: rnn_utils.PackedSequence, hx):
return self.inner_model(input, hx)
class GRUWithoutStateModel(torch.nn.Module):
def __init__(self, layers, bidirect, dropout, batch_first):
super().__init__()
self.batch_first = batch_first
self.inner_model = torch.nn.GRU(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
num_layers=layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input: rnn_utils.PackedSequence):
return self.inner_model(input)
class GRUNoSeqLengthWithoutStateModel(torch.nn.Module):
def __init__(self, layers, bidirect, dropout, batch_first):
super().__init__()
self.batch_first = batch_first
self.inner_model = torch.nn.GRU(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
num_layers=layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input):
return self.inner_model(input)
class GRUNoSeqLengthWithStateModel(torch.nn.Module):
def __init__(self, layers, bidirect, dropout, batch_first):
super().__init__()
self.batch_first = batch_first
self.inner_model = torch.nn.GRU(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
num_layers=layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input, hx):
return self.inner_model(input, hx)
batch_first = packed_sequence == 2
if packed_sequence:
if initial_state:
model = GRUWithStateModel(
layers=layers,
bidirect=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
model = (
rnn_model_with_packed_sequence.RnnModelWithPackedSequenceWithState(
model, batch_first
)
)
else:
model = GRUWithoutStateModel(
layers=layers,
bidirect=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
model = rnn_model_with_packed_sequence.RnnModelWithPackedSequenceWithoutState(
model, batch_first
)
else:
if initial_state:
model = GRUNoSeqLengthWithStateModel(
layers=layers,
bidirect=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
else:
model = GRUNoSeqLengthWithoutStateModel(
layers=layers,
bidirect=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_test(model, input)
# test that the model still runs with a different batch size
other_input = make_input(RNN_BATCH_SIZE + 1)
self.run_test(model, other_input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_fake_quantize_per_tensor(self):
class FakeQuantizePerTensorModel(torch.nn.Module):
def forward(self, input):
scale = 1.0 / 127
zero_point = 0
quant_min = -128
quant_max = 127
return torch.fake_quantize_per_tensor_affine(
input, scale, zero_point, quant_min, quant_max
)
x = torch.randn(6, 4, 3, 3)
self.run_test(FakeQuantizePerTensorModel(), (x))
@skipIfUnsupportedMinOpsetVersion(13)
def test_fake_quantize_per_tensor_dynamic_scale_zeropoint(self):
class FakeQuantizePerTensorModel(torch.nn.Module):
def forward(self, input, scale, zero_point):
quant_min = -128
quant_max = 127
return torch.fake_quantize_per_tensor_affine(
input, scale, zero_point, quant_min, quant_max
)
x = torch.randn(6, 4, 3, 3)
scale = torch.tensor(1.0 / 127)
zero_point = torch.tensor(0)
self.run_test(FakeQuantizePerTensorModel(), (x, scale, zero_point))
@skipIfUnsupportedMinOpsetVersion(13)
def test_fake_quantize_per_channel(self):
class FakeQuantizePerChannelModel(torch.nn.Module):
def forward(self, input):
amax = torch.ones(4)
scale = amax / 127.0
zero_point = torch.zeros_like(amax, dtype=torch.int)
# Quantize twice to test differnet branches
y = torch.fake_quantize_per_channel_affine(
input, scale, zero_point, 1, 0, 255
)
return torch.fake_quantize_per_channel_affine(
y, scale, zero_point, 1, -128, 127
)
x = torch.randn(6, 4, 3, 3)
self.run_test(FakeQuantizePerChannelModel(), (x))
@skipIfUnsupportedMinOpsetVersion(13)
# RuntimeError: Can't redefine method:
# forward on class: __torch__.torch.nn.modules.linear.Linear
@skipScriptTest()
def test_fake_quantize_activation(self):
from torch import quantization
m = torch.nn.Linear(1, 1)
m.qconfig = quantization.QConfig(
activation=quantization.default_fake_quant,
weight=quantization.default_per_channel_weight_fake_quant,
)
quantization.prepare_qat(m.train(), inplace=True)
m.apply(quantization.enable_observer)
m.apply(quantization.enable_fake_quant)
for module in m.modules():
if isinstance(module, quantization.FakeQuantize):
module.calculate_qparams()
m.apply(quantization.disable_observer)
m.eval()
# Fake quantize activation is a special case, as it restricts quantized range to be (0, 127),
# while standard 8bit quantization range is (-128, 127) or (0, 255).
# Set fixed weight, bias and inputs to test if ONNX handles the overflow correctly.
m.weight = torch.nn.Parameter(torch.tensor([[1.0], [1.0], [1.0]]))
m.bias = torch.nn.Parameter(torch.tensor([0.0]))
x = torch.tensor([[150.0], [127.0], [-5.0]])
self.run_test(m, x)
def test_batchnorm_training(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn1 = torch.nn.BatchNorm2d(3, affine=False)
self.cv1 = torch.nn.Conv2d(3, 3, 10)
self.bn2 = torch.nn.BatchNorm2d(3, affine=True)
self.cv2 = torch.nn.Conv2d(3, 3, 10)
self.bn3 = torch.nn.BatchNorm2d(3, affine=False)
def forward(self, x):
x = self.bn1(x)
x = self.cv1(x)
x = self.bn2(x)
x = self.cv2(x)
x = self.bn3(x)
return x
x = torch.randn(10, 3, 20, 20) * 2
model_export = MyModule()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.TRAINING,
rtol=1e-3,
atol=1e-5,
)
model_export.train()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.PRESERVE,
rtol=1e-3,
atol=1e-5,
)
def test_batchnorm_training_mode_fix_layer(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn1 = torch.nn.BatchNorm2d(3, affine=True)
self.cv1 = torch.nn.Conv2d(3, 3, 10)
self.bn2 = torch.nn.BatchNorm2d(3, affine=False)
self.cv2 = torch.nn.Conv2d(3, 3, 10)
self.bn3 = torch.nn.BatchNorm2d(3, affine=True)
self.bn3.eval()
def forward(self, x):
x = self.bn1(x)
x = self.cv1(x)
x = self.bn2(x)
x = self.cv2(x)
x = self.bn3(x)
return x
x = torch.randn(10, 3, 128, 128)
model_export = MyModule()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.TRAINING,
rtol=1e-3,
atol=1e-5,
)
model_export.train()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.PRESERVE,
rtol=1e-3,
atol=1e-5,
)
def test_batchnorm_eval_mode_train_layer(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn1 = torch.nn.BatchNorm2d(3, affine=True)
self.cv1 = torch.nn.Conv2d(3, 3, 10)
self.bn2 = torch.nn.BatchNorm2d(3, affine=False)
self.cv2 = torch.nn.Conv2d(3, 3, 10)
self.bn3 = torch.nn.BatchNorm2d(3, affine=True)
self.bn3.train()
def forward(self, x):
x = self.bn1(x)
x = self.cv1(x)
x = self.bn2(x)
x = self.cv2(x)
x = self.bn3(x)
return x
x = torch.randn(10, 3, 128, 128)
model_export = MyModule()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.EVAL,
rtol=1e-3,
atol=1e-5,
)
model_export.eval()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.PRESERVE,
rtol=1e-3,
atol=1e-5,
)
def test_instancenorm_training(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.in1 = torch.nn.InstanceNorm2d(3, affine=True)
self.cv1 = torch.nn.Conv2d(3, 3, 10)
self.in2 = torch.nn.InstanceNorm2d(3, affine=False)
self.cv2 = torch.nn.Conv2d(3, 3, 10)
self.in3 = torch.nn.InstanceNorm2d(3, affine=True)
def forward(self, x):
x = self.in1(x)
x = self.cv1(x)
x = self.in2(x)
x = self.cv2(x)
x = self.in3(x)
return x
x = torch.randn(10, 3, 128, 128)
model_export = MyModule()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.TRAINING,
rtol=1e-3,
atol=1e-5,
)
model_export.train()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.PRESERVE,
rtol=1e-3,
atol=1e-5,
)
def test_instancenorm_training_mode_fix_layer(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.in1 = torch.nn.InstanceNorm2d(3, affine=True)
self.cv1 = torch.nn.Conv2d(3, 3, 10)
self.in2 = torch.nn.InstanceNorm2d(3, affine=False)
self.cv2 = torch.nn.Conv2d(3, 3, 10)
self.in3 = torch.nn.InstanceNorm2d(3, affine=True)
self.in3.eval()
def forward(self, x):
x = self.in1(x)
x = self.cv1(x)
x = self.in2(x)
x = self.cv2(x)
x = self.in3(x)
return x
x = torch.randn(10, 3, 128, 128)
model_export = MyModule()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.TRAINING,
rtol=1e-3,
atol=1e-5,
)
model_export.train()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.PRESERVE,
rtol=1e-3,
atol=1e-5,
)
def test_instancenorm_eval_mode_train_layer(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.in1 = torch.nn.InstanceNorm2d(8, affine=True)
self.cv1 = torch.nn.Conv2d(8, 8, 10)
self.in2 = torch.nn.InstanceNorm2d(8, affine=False)
self.cv2 = torch.nn.Conv2d(8, 8, 10)
self.in3 = torch.nn.InstanceNorm2d(8, affine=True)
self.in3.train()
def forward(self, x):
x = self.in1(x)
x = self.cv1(x)
x = self.in2(x)
x = self.cv2(x)
x = self.in3(x)
return x
x = torch.randn(10, 8, 128, 128)
model_export = MyModule()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.EVAL,
rtol=1e-3,
atol=1e-5,
)
model_export.eval()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.PRESERVE,
rtol=1e-3,
atol=1e-5,
)
@skipIfUnsupportedMinOpsetVersion(12)
def test_dropout_training(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.dropout = torch.nn.Dropout(0.4)
def forward(self, x):
dropout = self.dropout(x)
return dropout
model = MyModule()
x = torch.randn(10)
model.train()
model_onnx = io.BytesIO()
torch.onnx.export(
model,
x,
model_onnx,
opset_version=self.opset_version,
do_constant_folding=False,
training=torch.onnx.TrainingMode.TRAINING,
)
ort_sess = verification._ort_session(model_onnx)
ort_outs = verification._run_ort(ort_sess, (x,))
assert not torch.all(torch.eq(x, torch.from_numpy(ort_outs[0])))
script_model = torch.jit.script(model)
output = model(x)
model_onnx = io.BytesIO()
torch.onnx.export(
model,
x,
model_onnx,
opset_version=self.opset_version,
do_constant_folding=False,
training=torch.onnx.TrainingMode.TRAINING,
)
ort_outs = verification._run_ort(ort_sess, (x,))
assert not torch.all(torch.eq(x, torch.from_numpy(ort_outs[0])))
@skipIfUnsupportedMinOpsetVersion(12)
def test_dropout_training_zero(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.dropout = torch.nn.Dropout(0.5)
def forward(self, x):
dropout = self.dropout(x)
return dropout
model = MyModule()
# ensure there are no zeros in the input
x = torch.randn(10, 3, 128, 128)
y = x.numpy()
y_mask = np.where(y == 0, 1, y)
input = torch.from_numpy(y_mask)
nb_elements = torch.numel(input)
model.train()
model_onnx = io.BytesIO()
torch.onnx.export(
model,
x,
model_onnx,
opset_version=self.opset_version,
do_constant_folding=False,
training=torch.onnx.TrainingMode.TRAINING,
)
ort_sess = verification._ort_session(model_onnx)
ort_outs = verification._run_ort(ort_sess, (x,))
y = model(input)
output = y.cpu().numpy()
ort_mask = np.where(ort_outs[0] != 0, 1, 0)
pyt_mask = np.where(output != 0, 1, 0)
ratio_pytorch = np.sum(pyt_mask) / nb_elements
ratio_ort = np.sum(ort_mask) / nb_elements
np.testing.assert_allclose(ratio_pytorch, ratio_ort, rtol=0.01, atol=0.01)
script_model = torch.jit.script(model)
y = model(input)
output = y.cpu().numpy()
model_onnx = io.BytesIO()
torch.onnx.export(
model,
x,
model_onnx,
opset_version=self.opset_version,
do_constant_folding=False,
training=torch.onnx.TrainingMode.TRAINING,
)
ort_sess = verification._ort_session(model_onnx)
ort_outs = verification._run_ort(ort_sess, (x,))
ort_mask = np.where(ort_outs[0] != 0, 1, 0)
pyt_mask = np.where(output != 0, 1, 0)
ratio_pytorch = np.sum(pyt_mask) / nb_elements
ratio_ort = np.sum(ort_mask) / nb_elements
np.testing.assert_allclose(ratio_pytorch, ratio_ort, rtol=0.01, atol=0.01)
def test_conv_bn(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(
3, 16, kernel_size=1, stride=2, padding=3, bias=True
)
self.bn = torch.nn.BatchNorm2d(16, affine=True)
def forward(self, x):
x = self.conv(x)
bn = self.bn(x)
return bn
model_export = MyModule()
x = torch.randn(10, 3, 128, 128)
self.run_test(model_export, (x,), training=torch.onnx.TrainingMode.EVAL)
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.TRAINING,
rtol=1e-3,
atol=1e-5,
)
def test_multiple_conv_bn(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(
3, 64, kernel_size=7, stride=2, padding=3, bias=False
)
self.conv2 = torch.nn.Conv2d(
64, 2, kernel_size=1, stride=1, padding=0, bias=False
)
self.conv3 = torch.nn.Conv2d(
2, 2, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn = torch.nn.BatchNorm2d(64)
self.bn2 = torch.nn.BatchNorm2d(2)
self.relu = torch.nn.ReLU(inplace=True)
self.maxpool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.bn(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn2(x)
x = self.relu(x)
return x
model_export = MyModule()
x = torch.randn(2, 3, 224, 224)
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.TRAINING,
rtol=1e-3,
atol=1e-5,
)
self.run_test(model_export, (x,), training=torch.onnx.TrainingMode.EVAL)
@skipIfUnsupportedMinOpsetVersion(11)
def test_nms(self):
num_boxes = 100
boxes = torch.rand(num_boxes, 4)
boxes[:, 2:] += boxes[:, :2]
scores = torch.randn(num_boxes)
class Module(torch.nn.Module):
def forward(self, boxes, scores):
return torchvision.ops.nms(boxes, scores, 0.5)
self.run_test(Module(), (boxes, scores))
@unittest.skip(
"Broken in recent TorchVision, see https://github.com/pytorch/pytorch/issues/81121"
)
@skipIfUnsupportedMinOpsetVersion(11)
# TODO: Fails with vision 0.13. See #77671
def test_batched_nms(self):
num_boxes = 100
boxes = torch.rand(num_boxes, 4)
boxes[:, 2:] += boxes[:, :2]
scores = torch.randn(num_boxes)
idxs = torch.randint(0, 5, size=(num_boxes,))
class Module(torch.nn.Module):
def forward(self, boxes, scores, idxs):
return torchvision.ops.batched_nms(boxes, scores, idxs, 0.5)
self.run_test(Module(), (boxes, scores, idxs))
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_clip_boxes_to_image(self):
boxes = torch.randn(5, 4) * 500
boxes[:, 2:] += boxes[:, :2]
size = torch.randn(200, 300)
size_2 = torch.randn(300, 400)
class Module(torch.nn.Module):
def forward(self, boxes, size):
shape = (size.shape[0], size.shape[1])
return torchvision.ops.boxes.clip_boxes_to_image(boxes, shape)
self.run_test(
Module(),
(boxes, size),
input_names=["boxes", "size"],
dynamic_axes={"size": [0, 1]},
additional_test_inputs=[(boxes, size), (boxes, size_2)],
)
@unittest.skip(
"Broken in recent TorchVision, see https://github.com/pytorch/pytorch/issues/81121"
)
@skipIfUnsupportedMaxOpsetVersion(15) # TODO: Opset 16 RoiAlign result mismatch
@skipIfUnsupportedMinOpsetVersion(11)
def test_roi_align(self):
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)
model = torchvision.ops.RoIAlign((5, 5), 1.0, 2)
self.run_test(model, (x, single_roi))
@unittest.skip(
"Broken in recent TorchVision, see https://github.com/pytorch/pytorch/issues/81121"
)
@skipIfUnsupportedMaxOpsetVersion(15) # TODO: Opset 16 RoiAlign result mismatch
@skipIfUnsupportedMinOpsetVersion(11)
def test_roi_align_aligned(self):
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 1.5, 1.5, 3, 3]], dtype=torch.float32)
model1 = torchvision.ops.RoIAlign((5, 5), 1.0, 2, aligned=True)
self.run_test(model1, (x, single_roi))
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32)
model2 = torchvision.ops.RoIAlign((5, 5), 0.5, 3, aligned=True)
self.run_test(model2, (x, single_roi))
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32)
model3 = torchvision.ops.RoIAlign((5, 5), 1.8, 2, aligned=True)
self.run_test(model3, (x, single_roi))
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32)
model4 = torchvision.ops.RoIAlign((2, 2), 2.5, 0, aligned=True)
self.run_test(model4, (x, single_roi))
@unittest.skip(
"Broken in recent TorchVision, see https://github.com/pytorch/pytorch/issues/81121"
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_roi_pool(self):
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
rois = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)
pool_h = 5
pool_w = 5
model = torchvision.ops.RoIPool((pool_h, pool_w), 2.0)
self.run_test(model, (x, rois))
@skipIfUnsupportedMinOpsetVersion(11)
def test_resize_images(self):
class TransformModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.transform = _init_test_generalized_rcnn_transform()
def forward(self, images):
return self.transform.resize(images, None)[0]
input = torch.rand(3, 10, 20)
input_test = torch.rand(3, 100, 150)
self.run_test(
TransformModule(),
(input,),
input_names=["input1"],
dynamic_axes={"input1": [0, 1, 2]},
additional_test_inputs=[(input,), (input_test,)],
)
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_transform_images(self):
class TransformModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.transform = _init_test_generalized_rcnn_transform()
def forward(self, images: List[Tensor]):
return self.transform(images)[0].tensors
input = torch.rand(3, 100, 200), torch.rand(3, 200, 200)
input_test = torch.rand(3, 100, 200), torch.rand(3, 200, 200)
self.run_test(
TransformModule(),
(input,),
additional_test_inputs=[(input,), (input_test,)],
)
def get_features(self, images):
s0, s1 = images.shape[-2:]
features = [
("0", torch.rand(2, 256, s0 // 4, s1 // 4)),
("1", torch.rand(2, 256, s0 // 8, s1 // 8)),
("2", torch.rand(2, 256, s0 // 16, s1 // 16)),
("3", torch.rand(2, 256, s0 // 32, s1 // 32)),
("4", torch.rand(2, 256, s0 // 64, s1 // 64)),
]
features = OrderedDict(features)
return features
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_rpn(self):
class RPNModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.rpn = _init_test_rpn()
def forward(self, images, features: Dict[str, Tensor]):
images_m = torchvision.models.detection.image_list.ImageList(
images, [(i.shape[-1], i.shape[-2]) for i in images]
)
return self.rpn(images_m, features)
images = torch.rand(2, 3, 150, 150)
features = self.get_features(images)
images2 = torch.rand(2, 3, 80, 80)
test_features = self.get_features(images2)
model = RPNModule()
model.eval()
model(images, features)
self.run_test(
model,
(images, features),
input_names=["input1", "input2", "input3", "input4", "input5", "input6"],
dynamic_axes={
"input1": [0, 1, 2, 3],
"input2": [0, 1, 2, 3],
"input3": [0, 1, 2, 3],
"input4": [0, 1, 2, 3],
"input5": [0, 1, 2, 3],
"input6": [0, 1, 2, 3],
},
additional_test_inputs=[(images, features), (images2, test_features)],
# dict_check=False,
)
@skipIfUnsupportedMaxOpsetVersion(15) # TODO: Opset 16 RoiAlign result mismatch
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_multi_scale_roi_align(self):
class TransformModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.model = torchvision.ops.MultiScaleRoIAlign(
["feat1", "feat2"], 3, 2
)
self.image_sizes = [(512, 512)]
def forward(self, input: Dict[str, Tensor], boxes: List[Tensor]) -> Tensor:
return self.model(input, boxes, self.image_sizes)
i = OrderedDict()
i["feat1"] = torch.rand(1, 5, 64, 64)
i["feat2"] = torch.rand(1, 5, 16, 16)
boxes = torch.rand(6, 4) * 256
boxes[:, 2:] += boxes[:, :2]
i1 = OrderedDict()
i1["feat1"] = torch.rand(1, 5, 64, 64)
i1["feat2"] = torch.rand(1, 5, 16, 16)
boxes1 = torch.rand(6, 4) * 256
boxes1[:, 2:] += boxes1[:, :2]
self.run_test(
TransformModule(),
(
i,
[boxes],
),
additional_test_inputs=[
(
i,
[boxes],
),
(
i1,
[boxes1],
),
],
)
def test_set_(self):
class M(torch.nn.Module):
def forward(self, x, y):
x.set_(y)
return x
x = torch.ones(2, 3)
y = torch.randn(4, 6)
self.run_test(M(), (x, y), remained_onnx_input_idx=[1])
y2 = torch.randn(5, 2)
self.run_test(
M(),
(x, y),
remained_onnx_input_idx=[1],
input_names=["x", "y"],
dynamic_axes={"x": [0, 1], "y": [0, 1]},
additional_test_inputs=[(y, y2)],
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_set_attr_modules(self):
class InnerModule2(torch.nn.Module):
def __init__(self, embedding_dim):
super().__init__()
self.weights = InnerModule2.get_embedding(embedding_dim)
self.register_buffer("_float_tensor", torch.FloatTensor(1))
self.const = 2
@staticmethod
def get_embedding(embedding_dim: int):
emb = 4 / ((embedding_dim // 2) - 1)
emb = torch.exp(
torch.arange((embedding_dim // 2), dtype=torch.float) * -emb
)
return emb
def forward(self, input, incremental_state: Optional[Tensor] = None):
bsz, seq_len = input.shape[0], input.shape[1]
self.const = 3
if self.weights is None:
self.weights = InnerModule.get_embedding(self.embedding_dim)
self.weights = self.weights.to(self._float_tensor)
self.weights = self.weights * self.const
if incremental_state is not None:
pos = seq_len
return self.weights[1 + pos, :].expand(bsz, 1, -1)
return self.weights.index_select(
0, torch.ones((bsz * seq_len), dtype=torch.int64)
).view(bsz, seq_len, -1)
class InnerModule(torch.nn.Module):
def __init__(self, embedding_dim):
super().__init__()
self.weights = InnerModule.get_embedding(embedding_dim)
self.module = InnerModule2(embedding_dim=8)
@staticmethod
def get_embedding(embedding_dim: int):
emb = 4 / ((embedding_dim // 2) - 1)
emb = torch.exp(
torch.arange((embedding_dim // 2), dtype=torch.float) * -emb
)
return emb
def forward(self, x):
return self.module(x) + self.weights
class Module(torch.nn.Module):
def __init__(self):
super().__init__()
self.module = InnerModule(embedding_dim=8)
def forward(self, x):
return self.module(x)
x = torch.randn(3, 256)
self.run_test(Module(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(Module(), (x,), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_set_attr_modules_2(self):
class InnerModule(torch.nn.Module):
def __init__(self, embedding_dim):
super().__init__()
self.embedding_dim = embedding_dim
self.const = 2.5
self.weights = InnerModule.get_embedding(self.embedding_dim)
self.register_buffer("_float_tensor", torch.FloatTensor(1))
@staticmethod
def get_embedding(embedding_dim: int):
emb = 4 / ((embedding_dim // 2) - 1)
emb = torch.exp(
torch.arange((embedding_dim // 2), dtype=torch.float) * -emb
)
return emb
def forward(self, input, incremental_state: Optional[Tensor] = None):
bsz, seq_len = input.shape[0], input.shape[1]
self.const = 1.5
self.weights = InnerModule.get_embedding(self.embedding_dim)
return (
self.weights.index_select(
0, torch.ones((bsz * seq_len), dtype=torch.int64)
).view(bsz, seq_len, -1)
) * self.const
class Module(torch.nn.Module):
def __init__(self):
super().__init__()
self.module = InnerModule(embedding_dim=8)
def forward(self, x):
return self.module(x)
x = torch.randn(3, 256)
self.run_test(Module(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(Module(), (x,), remained_onnx_input_idx=[])
def test_set_attr(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv1d(3, 10, 2)
self.b = False
def forward(self, box_regression, weight):
self.b = True
self.conv.weight = weight
w = torch.softmax(self.conv.weight, dim=0)
self.conv.weight = w + w
if self.b:
return box_regression + self.conv.weight
else:
return box_regression - self.conv.weight
model = torch.jit.script(MyModule())
weight = torch.ones(3, 2)
box_regression = torch.randn(3, 2)
self.run_test(model, (box_regression, weight))
@skipIfUnsupportedMinOpsetVersion(11)
def test_set_attr_2(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv1d(10, 3, 3)
self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))
def set_cell_anchors(self, anchors):
if self.conv.bias is not None:
b = self.conv.bias
assert b is not None
self.conv.bias = anchors + b
elif self.conv.weight is not None:
self.conv.weight = torch.randn(3, 10)
self.conv.bias = self.conv.weight[:]
def forward(self, anchors) -> Optional[Tensor]:
self.set_cell_anchors(anchors)
return self.conv.bias
model = torch.jit.script(MyModule())
anchors = torch.ones(3, 10, 3)
self.run_test(model, (anchors))
@skipIfUnsupportedMinOpsetVersion(11)
def test_set_attr_3(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv1d(10, 3, 3)
self.conv.weight = torch.nn.Parameter(torch.zeros(3, 10))
self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))
def set_cell_anchors(self, anchors, boxes):
self.conv.weight = torch.ones(3, 10)
if self.conv.bias is not None:
self.conv.bias = torch.randn(3, 10, 3)
self.conv.weight = anchors + self.conv.weight
boxes[:] = torch.zeros(2, 3)
def forward(self, anchors) -> Tuple[Tensor, Tensor]:
boxes = torch.ones(2, 2, 3)
self.set_cell_anchors(anchors, boxes)
if self.conv.bias is not None:
return self.conv.weight, boxes
return anchors, boxes
model = torch.jit.script(MyModule())
anchors = torch.rand(3, 10)
self.run_test(model, (anchors))
@skipIfUnsupportedMinOpsetVersion(11)
def test_set_attr_4(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv1d(10, 3, 3)
self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))
def set_cell_anchors(self, anchors):
self.conv.weight = torch.zeros(10, 3)
if self.conv.bias is not None:
w = self.conv.bias
assert w is not None
self.conv.bias = anchors + w
else:
self.conv.bias = torch.ones(3, 10, 3)
def forward(self, feature_maps, anchors) -> Tuple[Tensor, Tensor]:
self.set_cell_anchors(anchors)
result = []
if self.conv.bias is not None:
a = self.conv.bias
assert a is not None
result += [a]
result += [feature_maps]
return result[0], result[1]
model = torch.jit.script(MyModule())
x = torch.rand(5, 11, 30)
anchors = torch.ones(3, 10, 3)
self.run_test(model, (x, anchors))
@skipIfUnsupportedMinOpsetVersion(11)
def test_set_attr_5(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv1d(10, 3, 3)
self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))
def set_cell_anchors(self, anchors):
self.conv.weight = torch.arange(10)
for i in range(10):
if i == 3:
for j in range(10):
w = self.conv.weight
self.conv.weight = torch.arange(10) + w
self.conv.weight = self.conv.weight + torch.arange(10)
# NOTE: `is not None` and `assert` is for passing torchscript.
if self.conv.bias is not None:
a = self.conv.bias
assert a is not None
self.conv.bias = anchors + a
def forward(self, anchors):
self.set_cell_anchors(anchors)
return self.conv.weight, self.conv.bias
model = torch.jit.script(MyModule())
anchors = torch.ones(3, 10, 3)
self.run_test(model, (anchors))
@skipIfUnsupportedMinOpsetVersion(11)
def test_set_attr_in_loop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv1d(10, 3, 3)
self.conv.weight = torch.nn.Parameter(torch.zeros(3, 10))
self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))
def set_cell_anchors(self, anchors, boxes):
self.conv.weight = torch.randn(3, 10)
for i in range(self.conv.weight.size(0)):
for j in range(10):
self.conv.bias = torch.randn(3, 10, 3)
self.conv.weight = anchors * i
boxes[j] += torch.ones(3, 3)
def forward(self, anchors) -> Tuple[Tensor, Tensor]:
boxes = torch.ones(10, 3, 3)
self.set_cell_anchors(anchors, boxes)
if self.conv.bias is not None:
return self.conv.weight, boxes
return anchors, boxes
model = torch.jit.script(MyModule())
anchors = torch.rand(10)
self.run_test(model, anchors)
@skipIfUnsupportedMinOpsetVersion(13)
def test_set_attr_in_loop_with_list(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv1d(10, 3, 3)
self.conv.weight = torch.nn.Parameter(torch.zeros(3, 10))
self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))
self.boxes: List[Tensor] = [
torch.ones(1)
] # Workaround placeholder for TorchScript
def set_cell_anchors(self, anchors):
self.conv.weight = torch.randn(3, 10)
for i in range(self.conv.weight.size(0)):
for j in range(10):
self.conv.bias = torch.randn(3, 10, 3)
self.conv.weight = anchors * i
self.boxes.append(torch.ones(3, 3))
def forward(self, anchors) -> Tuple[Tensor, List[Tensor]]:
self.boxes = []
self.set_cell_anchors(anchors)
if self.conv.bias is not None:
return self.conv.weight, self.boxes
return anchors, self.boxes
model = torch.jit.script(MyModule())
anchors = torch.rand(10)
self.run_test(model, anchors)
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_if(self):
@torch.jit.script
def check_init(
input_data: Tensor, hidden_size: int, prev_state: Tensor
) -> Tuple[Tensor, Tensor]:
batch_size = input_data.size(0)
spatial_size_0 = input_data.size(2)
spatial_size_1 = input_data.size(3)
# generate empty prev_state, if None is provided
state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)
state = torch.zeros(state_size, device=input_data.device)
state_copy = torch.zeros(state_size, device=input_data.device)
if prev_state.size(0) == 0:
state[:] = (
torch.zeros(batch_size, hidden_size, spatial_size_0, spatial_size_1)
+ state[:]
)
state_copy[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 2
)
state_copy[:] = (
torch.zeros(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 2
)
else:
state[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 4
)
return state, state_copy
class Example(torch.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
def forward(self, input_data, prev_state):
prev_state = check_init(input_data, self.hidden_size, prev_state)
return prev_state[0], prev_state[1]
model = Example(10)
random_data = torch.rand((1, 5, 30, 30))
empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)
self.run_test(
model,
(random_data, empty_tensor),
input_names=["random_data", "empty_tensor"],
dynamic_axes={"random_data": [0, 1, 2, 3], "empty_tensor": [0, 1, 2, 3, 4]},
)
self.run_test(model, (random_data, empty_tensor), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_if_2(self):
@torch.jit.script
def check_init(
input_data: Tensor, hidden_size: int, prev_state: Tensor
) -> Tuple[Tensor, Tensor]:
batch_size = input_data.size(0)
spatial_size_0 = input_data.size(2)
spatial_size_1 = input_data.size(3)
# generate empty prev_state, if None is provided
state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)
state = torch.zeros(state_size, device=input_data.device)
state_copy = torch.zeros(state_size, device=input_data.device)
if prev_state.size(0) == 0:
for i in range(2):
state[:] = (
torch.ones(
batch_size, hidden_size, spatial_size_0, spatial_size_1
)
* i
)
state_copy[:] = (
torch.ones(
batch_size, hidden_size, spatial_size_0, spatial_size_1
)
* i
)
elif prev_state.size(0) == 1:
s = state[:]
state[:] = prev_state + s
elif prev_state.size(0) == 2:
state[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 4
)
return state, state_copy
class Example(torch.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
def forward(self, input_data, prev_state):
prev_state = check_init(input_data, self.hidden_size, prev_state)
return prev_state[0], prev_state[1]
model = Example(10)
random_data = torch.rand((1, 5, 30, 30))
empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)
random_state = torch.rand((1, 1, 10, 30, 30))
self.run_test(
model,
(random_data, empty_tensor),
input_names=["data", "state"],
dynamic_axes={"data": [0, 1, 2], "state": [0, 1, 2, 3, 4]},
additional_test_inputs=[(random_data, random_state)],
)
self.run_test(
model,
(random_data, empty_tensor),
input_names=["data", "state"],
dynamic_axes={"state": [0, 1, 2, 3, 4]},
additional_test_inputs=[(random_data, random_state)],
remained_onnx_input_idx=[1],
)
self.run_test(model, (random_data, empty_tensor), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_if_3(self):
@torch.jit.script
def check_init(
input_data: Tensor, hidden_size: int, prev_state: Tensor
) -> Tensor:
batch_size = input_data.size(0)
spatial_size_0 = input_data.size(2)
spatial_size_1 = input_data.size(3)
# generate empty prev_state, if None is provided
state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)
state = torch.zeros(state_size, device=input_data.device)
if prev_state.size(0) < 2:
state = state * 3
if prev_state.size(0) == 0:
state[:] = (
torch.ones(
batch_size, hidden_size, spatial_size_0, spatial_size_1
)
* 3
)
else:
state = state + 2
return state
class Example(torch.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
def forward(self, input_data, prev_state):
prev_state = check_init(input_data, self.hidden_size, prev_state)
return prev_state
model = Example(4)
random_data = torch.rand((1, 5, 4, 4))
empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)
self.run_test(
model,
(random_data, empty_tensor),
input_names=["random_data", "empty_tensor"],
dynamic_axes={"random_data": [0, 1, 2, 3], "empty_tensor": [0, 1, 2, 3, 4]},
)
self.run_test(model, (random_data, empty_tensor), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_if_4(self):
@torch.jit.script
def check_init(
input_data: Tensor, hidden_size: int, prev_state: Tensor
) -> Tensor:
batch_size = input_data.size(0)
spatial_size_0 = input_data.size(2)
spatial_size_1 = input_data.size(3)
# generate empty prev_state, if None is provided
state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)
state = torch.zeros(state_size, device=input_data.device)
if prev_state.size(0) == 0:
state = state + 3
state[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 3
)
state = state + 3
state[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 4
)
else:
state = state + 2
return state
class Example(torch.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
def forward(self, input_data, prev_state):
prev_state = check_init(input_data, self.hidden_size, prev_state)
return prev_state
model = Example(4)
random_data = torch.rand((1, 5, 4, 4))
empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)
self.run_test(
model,
(random_data, empty_tensor),
input_names=["random_data", "empty_tensor"],
dynamic_axes={"random_data": [0, 1, 2, 3], "empty_tensor": [0, 1, 2, 3, 4]},
)
self.run_test(model, (random_data, empty_tensor), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_if_5(self):
@torch.jit.script
def check_init(
input_data: Tensor, hidden_size: int, prev_state: Tensor
) -> Tuple[Tensor, Tensor]:
batch_size = input_data.size(0)
spatial_size_0 = input_data.size(2)
spatial_size_1 = input_data.size(3)
# generate empty prev_state, if None is provided
state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)
state = torch.zeros(state_size, device=input_data.device)
state_ref = state
if prev_state.size(0) == 0:
state[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 3
)
state = state + 3
state[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 4
)
else:
state = state + 2
return state, state_ref
class Example(torch.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
def forward(self, input_data, prev_state):
prev_state, state_ref = check_init(
input_data, self.hidden_size, prev_state
)
return prev_state, state_ref
model = Example(4)
random_data = torch.rand((1, 5, 4, 4))
empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)
self.run_test(
model,
(random_data, empty_tensor),
input_names=["random_data", "empty_tensor"],
dynamic_axes={"random_data": [0, 1, 2, 3], "empty_tensor": [0, 1, 2, 3, 4]},
)
self.run_test(model, (random_data, empty_tensor), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_list_append_in_block(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
res.append(torch.matmul(x[i], y))
return res
model = torch.jit.script(ListModel())
x = torch.randn(16, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_append_in_nested_block(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
for j in range(x.size(1)):
res.append(torch.matmul(x[i][j], y))
return res
model = torch.jit.script(ListModel())
x = torch.randn(4, 4, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_pop_in_block(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
elem = torch.matmul(x[0], y)
for i in range(x.size(0)):
res.append(torch.matmul(x[i], y))
for i in range(x.size(0)):
elem = res.pop()
for i in range(x.size(0)):
res.append(torch.matmul(x[i], y))
elem = res.pop()
return res.append(elem)
model = torch.jit.script(ListModel())
x = torch.randn(16, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_del_in_block(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
elem = torch.matmul(x[0], y)
for i in range(x.size(0)):
res.append(torch.matmul(x[i], y))
for i in range(x.size(0)):
del res[0]
for i in range(x.size(0)):
res.append(torch.matmul(x[i], y))
del res[0]
return res.append(elem)
model = torch.jit.script(ListModel())
x = torch.randn(16, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_list_unpack(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
elem = torch.matmul(x[0], y)
for i in range(x.size(0)):
res.append(torch.matmul(x[i], y))
a, b, c = res
return a, b
model = torch.jit.script(ListModel())
x = torch.randn(3, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_inplace_ops(self):
@torch.jit.script
def check_init(input_data: Tensor, hidden_size: int) -> Tensor:
batch_size = input_data.size(0)
spatial_size_0 = input_data.size(2)
spatial_size_1 = input_data.size(3)
# generate empty prev_state, if None is provided
state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)
state = torch.zeros(state_size, device=input_data.device)
if input_data.size(0) == 1:
state[1] += (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 2
)
state[1] /= (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 3
)
for i in range(input_data.size(0)):
state[1] += torch.ones(
batch_size, hidden_size, spatial_size_0, spatial_size_1
)
state[1] /= (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* i
)
return state
class Example(torch.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
def forward(self, input_data):
state = check_init(input_data, self.hidden_size)
return state
model = Example(10)
random_data = torch.rand((1, 5, 30, 30))
self.run_test(
model,
(random_data),
input_names=["random_data"],
dynamic_axes={"random_data": [0, 1, 2, 3]},
)
self.run_test(model, (random_data), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_input_mask_model(self):
class InputMaskModel(torch.nn.Module):
def __init__(self, output_size):
super().__init__()
self.bias = torch.nn.Parameter(
torch.empty(output_size, dtype=torch.float)
)
with torch.no_grad():
self.bias.zero_()
def forward(self, model_input, y):
input_mask = (model_input <= 0) | (model_input > 25)
y[input_mask, :] = 0.0
output = y + self.bias
return output
output_size = 4
m = InputMaskModel(output_size)
x = torch.tensor([0, 4, 24, 25], dtype=torch.int64)
y = torch.tensor(
[
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.2, 0.3, 0.4],
],
dtype=torch.float,
)
self.run_test(m, (x, y))
class InputMaskModel(torch.nn.Module):
def __init__(self, output_size):
super().__init__()
def forward(self, model_input_1, model_input_2, y):
input_mask_1 = (model_input_1 <= 0) | (model_input_1 > 25)
input_mask_2 = (model_input_2 < 1) | (model_input_2 >= 12)
y[input_mask_1, input_mask_2] = 0.0
return y
output_size = 4
m = InputMaskModel(output_size)
x1 = torch.tensor([0, 4, 24, 25], dtype=torch.int64)
x2 = torch.tensor([0, 3, 12, 15], dtype=torch.int64)
y = torch.tensor(
[
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.2, 0.3, 0.4],
],
dtype=torch.float,
)
self.run_test(m, (x1, x2, y))
@skipScriptTest()
def test_unsafe_chunk(self):
class ChunkModel(torch.nn.Module):
def forward(self, x):
return torch.unsafe_chunk(x, 3, dim=1)
model = ChunkModel()
model.eval()
x = torch.randn(1, 18)
self.run_test(model, x, input_names=["x"])
def test_symbolic_shape_inference(self):
# ConstantOfShape is tested in test_embedding_bag
# Tile is tested in test_repeat
# test Shape, Reshape, Transpose, Gather
class ShapeModel(torch.nn.Module):
def forward(self, x, y):
shape = x.size()[:3] + (-1,) # shape [4], ("batch", 3, 4, -1)
y = y.reshape(shape) # batch, 3, 4, 10/batch
return y.transpose(1, 2)
model = ShapeModel()
model.eval()
x = torch.ones(2, 3, 4, 5)
y = torch.ones(3, 4, 5, 2)
self.run_test(
model,
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2, 3], "y": [0, 1, 2, 3]},
)
self.run_test(model, (x, y), remained_onnx_input_idx=[1])
class ViewModel(torch.nn.Module):
def forward(self, x):
return x.view(-1)
model = ViewModel()
model.eval()
x = torch.tensor(2.0)
self.run_test(model, (x,))
# test prim::ListConstruct for Reshape input 1
class ViewModel_2(torch.nn.Module):
def forward(self, x):
N, C, H, W = x.shape[0], x.shape[2], x.shape[3], x.shape[4]
x1 = x.view(N, -1, C, H, W)
x2 = x1.permute(0, 3, 4, 1, 2)
return x2.reshape(N, -1, C)
model = ViewModel_2()
model.eval()
x = torch.ones(2, 3, 4, 5, 6)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_symbolic_shape_inference_arange(self):
# test Range
class ArangeModel(torch.nn.Module):
def forward(self, signal):
frame_step = 2
outer_dimensions = signal.size()[:-2]
frames, frame_length = signal.size()[-2:]
subframe_length = signal.size()[0]
subframe_step = frame_step // subframe_length
subframes_per_frame = frame_length // subframe_length
output_size = frame_step * (frames - 1) + frame_length
output_subframes = output_size // subframe_length
frame = torch.arange(0, output_subframes)
return frame
model = ArangeModel()
model.eval()
M, C, K, N = 1, 2, 3, 4
x = torch.randint(5, (M, C, K, N))
y = torch.randint(5, (M, C + 1, K + 1, N + 1))
self.run_test(model, x, input_names=["x"], dynamic_axes={"x": [0, 1, 2, 3]})
self.run_test(model, x, remained_onnx_input_idx=[])
self.run_test(
model,
x,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2, 3]},
additional_test_inputs=[(x,), (y,)],
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_symbolic_shape_inference_box(self):
# test NonZero
class BoxModel(torch.nn.Module):
def forward(self, boxes):
min_size = 1e-2
ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]
keep = (ws >= min_size) & (hs >= min_size)
keep = torch.where(keep)[0]
return keep
model = BoxModel()
model.eval()
x = torch.ones(2, 4)
y = torch.ones(3, 5)
self.run_test(model, x)
self.run_test(
model,
x,
input_names=["x"],
dynamic_axes={"x": [0, 1]},
additional_test_inputs=[(x,), (y,)],
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_symbolic_shape_inference_box_if(self):
# test If
class BoxIfModel(torch.nn.Module):
def forward(self, boxes, scores):
score_thresh = 0.0
inds = torch.where(scores > score_thresh)[0]
boxes_1 = boxes[inds]
if boxes_1.numel() > 3:
return boxes_1
else:
return boxes_1 * 2
model = BoxIfModel()
model.eval()
boxes = torch.ones(2, 4)
scores = torch.ones(1, 4)
self.run_test(model, (boxes, scores))
@skipIfUnsupportedMinOpsetVersion(11)
@skipDtypeChecking
def test_symbolic_shape_inference_arange_2(self):
# test Range
class ArangeModel(torch.nn.Module):
def forward(self, start):
return torch.arange(start.size(0), 8.5, 1.5, dtype=torch.int64)
x = torch.randn(2, 3, 4)
self.run_test(
ArangeModel(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(ArangeModel(), (x,), remained_onnx_input_idx=[])
class ArangeModel2(torch.nn.Module):
def forward(self, start):
return torch.arange(start.size(0), 8.5, 1.5, dtype=torch.double)
x = torch.randn(2, 3, 4)
self.run_test(
ArangeModel2(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(ArangeModel2(), (x,), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_symbolic_shape_inference_nonzero(self):
class OneLikeModel(torch.nn.Module):
def forward(self, x):
ones = torch.ones_like(
x,
dtype=torch.float,
layout=torch.strided,
device=torch.device("cpu"),
)
return torch.nonzero(ones)
x = torch.randn(2)
self.run_test(OneLikeModel(), x, input_names=["x"], dynamic_axes={"x": [0]})
self.run_test(OneLikeModel(), x, remained_onnx_input_idx=[])
x = torch.randn(2, 3, 4)
self.run_test(
OneLikeModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(OneLikeModel(), x, remained_onnx_input_idx=[])
class ZeroLikeModel(torch.nn.Module):
def forward(self, x):
zeros = torch.zeros_like(
x,
dtype=torch.float,
layout=torch.strided,
device=torch.device("cpu"),
)
return torch.nonzero(zeros)
x = torch.randn(2)
self.run_test(ZeroLikeModel(), x, input_names=["x"], dynamic_axes={"x": [0]})
self.run_test(ZeroLikeModel(), x, remained_onnx_input_idx=[])
x = torch.randn(2, 3, 4)
self.run_test(
ZeroLikeModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(ZeroLikeModel(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_symbolic_shape_inference_expand_1(self):
class ExpandModel(torch.nn.Module):
def forward(self, x):
return x.expand(4, 6, 2)
x = torch.randn(6, 1, requires_grad=True)
self.run_test(ExpandModel(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_symbolic_shape_inference_expand_2(self):
class M(torch.nn.Module):
def forward(self, x):
input_shape = x.size()
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
<= seq_ids[None, :, None]
)
return causal_mask.transpose(0, 1)
x = torch.randn(3, 16)
self.run_test(M(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(M(), (x,), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(10)
def test_symbolic_shape_inference_slice(self):
class M(torch.nn.Module):
def forward(self, x, position_bias):
input_shape = x.size()
batch_size, seq_length = input_shape
position_bias = position_bias[:, :, -seq_length:, :]
return position_bias.transpose(0, 1)
x = torch.randn(3, 16)
position_bias = torch.randn(1, 3, 20, 8)
self.run_test(
M(),
(x, position_bias),
input_names=["x", "position_bias"],
dynamic_axes={"x": [0, 1], "position_bias": [0, 1, 2, 3]},
)
self.run_test(M(), (x, position_bias), remained_onnx_input_idx=[1])
def test_symbolic_shape_inference_slice_2(self):
class M(torch.nn.Module):
def forward(self, position_bias):
position_bias = position_bias[:, :, -2:, :]
return position_bias.transpose(0, 1)
position_bias = torch.randn(1, 3, 20, 8)
self.run_test(M(), (position_bias,))
@skipIfUnsupportedMinOpsetVersion(9)
@skipScriptTest()
def test_symbolic_shape_inference_time(self):
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
h0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
model_lstm = torch.nn.LSTM(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False
)
self.run_test(
model_lstm,
(input, (h0, c0)),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1]},
)
model_gru = torch.nn.GRU(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False, bias=False
)
self.run_test(
model_gru, (input, h0), input_names=["x", "y"], dynamic_axes={"x": [0, 1]}
)
model_rnn = torch.nn.RNN(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False, bias=False
)
self.run_test(
model_rnn, (input, h0), input_names=["x", "y"], dynamic_axes={"x": [0, 1]}
)
def test_symbolic_shape_inference_dynamic_axes(self):
class M(torch.nn.Module):
def forward(self, input_ids):
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
return input_ids.transpose(0, 1)
x = torch.randn(3, 16)
self.run_test(
M(),
(x,),
input_names=["input_ids"],
dynamic_axes={"input_ids": {0: "batch", 1: "sequence"}},
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_hann_window_periodic(self):
class HannWindowModule_Periodic(torch.nn.Module):
def __init__(self):
super().__init__()
self.window_length = 0
def forward(self, x, window_length: int):
self.window_length = window_length
return torch.add(
x,
torch.hann_window(
self.window_length, periodic=True, dtype=torch.float
),
)
win_length = 100
x = torch.randn(win_length)
module = HannWindowModule_Periodic()
self.run_test(module, (x, win_length))
@skipIfUnsupportedMinOpsetVersion(9)
def test_hann_window_not_periodic(self):
class HannWindowModule_NotPeriodic(torch.nn.Module):
def __init__(self):
super().__init__()
self.window_length = 0
def forward(self, x, window_length: int):
self.window_length = window_length
return torch.add(
x,
torch.hann_window(
self.window_length, periodic=False, dtype=torch.float
),
)
win_length = 100
x = torch.randn(win_length)
module = HannWindowModule_NotPeriodic()
self.run_test(module, (x, win_length))
@skipIfUnsupportedMinOpsetVersion(9)
@skipScriptTest()
def test_hann_window_default_values(self):
class HannWindowModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.window_length = 0
def forward(self, x, window_length: int):
import torch.nn.functional as F
self.window_length = window_length
return torch.add(x, F.relu(torch.hann_window(self.window_length)))
win_length = 100
x = torch.randn(win_length, dtype=torch.float)
module = HannWindowModule()
output = module(x, win_length)
self.run_test(module, (x, win_length))
@skipIfUnsupportedMinOpsetVersion(12)
def test_tensordot_dim_count(self):
class M(torch.nn.Module):
def forward(self, x, y):
output = torch.tensordot(x, y, 2)
return output
x = torch.randint(6, (7, 5, 3, 4))
y = torch.randint(6, (3, 4, 9, 2))
self.run_test(M(), (x, y))
@skipIfUnsupportedMinOpsetVersion(12)
def test_tensordot_dim_list(self):
class M(torch.nn.Module):
def forward(self, x, y):
output = torch.tensordot(x, y, ([1, -2, -1], [1, 0, 3]))
return output
x = torch.randint(6, (7, 4, 3, 5, 2))
y = torch.randint(6, (5, 4, 4, 2, 6))
self.run_test(M(), (x, y))
@skipIfUnsupportedMinOpsetVersion(12)
def test_tensordot_dynamic_dim(self):
class M(torch.nn.Module):
def forward(self, x, y):
output = torch.tensordot(x, y, 2)
return output
x = torch.randint(6, (7, 5, 3, 4))
y = torch.randint(6, (3, 4, 9, 2))
new_x = torch.randint(6, (8, 6, 2, 5))
new_y = torch.randint(6, (2, 5, 3, 4))
self.run_test(
M(),
(x, y),
additional_test_inputs=[(new_x, new_y)],
input_names=["input_x", "input_y"],
dynamic_axes={"input_x": [0, 1, 2, 3], "input_y": [0, 1, 2, 3]},
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_to_device(self):
class M_ToDevice(torch.nn.Module):
def forward(self, x, y):
return x.to(y.device), y
class M_ToDeviceDtype(torch.nn.Module):
def forward(self, x, y):
return x.to(y.device, dtype=torch.long), y
x = torch.randn(6)
y = torch.randn(6)
self.run_test(M_ToDevice(), (x, y))
self.run_test(M_ToDeviceDtype(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
@skipScriptTest()
def test_fill(self):
class FillModule(torch.nn.Module):
def forward(self, x, filled_value: int):
return x.fill_(filled_value)
x = torch.randn((4, 5, 6))
filled_value = 7
self.run_test(FillModule(), (x, filled_value))
class FillScalarModule(torch.nn.Module):
def forward(self, x):
res = x + 2
res.fill_(2.5)
return res, x
x = torch.ones(2, 3, 4, dtype=torch.long)
self.run_test(FillScalarModule(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_add_normal(self):
class M(torch.nn.Module):
def __init__(self, dim, index, updates):
super().__init__()
self.dim = dim
self.index = index
self.updates = updates
def forward(self, x):
x.index_add_(self.dim, self.index, self.updates)
return x
x = torch.ones(5, 4, 3)
updates = torch.tensor([[1], [4], [7], [3], [2]], dtype=torch.float)
index = torch.tensor([0, 2, 3, 1, 4])
self.run_test(M(0, index, updates), (x,))
updates = torch.tensor(
[[[1, 5, 7], [2, 4, 5], [5, 5, 6], [2, 3, 4]]], dtype=torch.float
)
index = torch.tensor([0, 2, 3, 1])
self.run_test(M(1, index, updates), (x,))
updates = torch.tensor(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [2, 3, 4]]], dtype=torch.float
)
index = torch.tensor([0, 2, 1])
self.run_test(M(2, index, updates), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_add_dim_size_differ(self):
class M(torch.nn.Module):
def __init__(self, dim, index, updates):
super().__init__()
self.dim = dim
self.index = index
self.updates = updates
def forward(self, x):
x.index_add_(self.dim, self.index, self.updates)
return x
x = torch.ones(5, 4, 3)
updates = torch.tensor([[[1, 5, 7], [2, 4, 5], [5, 5, 6]]], dtype=torch.float)
index = torch.tensor([0, 2, 1])
self.run_test(M(1, index, updates), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_add_in_loop(self):
class M(torch.nn.Module):
def __init__(self, dim, index, updates, loop_count):
super().__init__()
self.dim = dim
self.index = index
self.updates = updates
self.loop_count = loop_count
def forward(self, x):
for i in range(self.loop_count):
x.index_add_(self.dim, self.index, self.updates)
return x
x = torch.ones(5, 4, 3)
updates = torch.tensor(
[[[1, 5, 7], [2, 4, 5], [5, 5, 6], [2, 3, 4]]], dtype=torch.float
)
index = torch.tensor([0, 2, 3, 1])
loop_count = torch.randint(20, (1,))[0].item()
self.run_test(M(1, index, updates, loop_count), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_add_if(self):
class M(torch.nn.Module):
def __init__(self, dim, updates, index_true, index_false):
super().__init__()
self.dim = dim
self.updates = updates
self.index_true = index_true
self.index_false = index_false
def forward(self, x, cond):
if cond:
x.index_add_(self.dim, self.index_true, self.updates)
else:
x.index_add_(self.dim, self.index_false, self.updates)
return x
x = torch.ones(5, 4, 3)
updates = torch.tensor(
[[[1, 5, 7], [2, 4, 5], [5, 5, 6], [2, 3, 4]]], dtype=torch.float
)
index_true = torch.tensor([0, 2, 3, 1])
index_false = torch.tensor([1, 0, 2, 3])
cond = torch.tensor(1, dtype=torch.bool)
self.run_test(
torch.jit.script(M(1, updates, index_true, index_false)), (x, cond)
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_add_dynamic_axes(self):
class M(torch.nn.Module):
def __init__(self, dim, index, updates):
super().__init__()
self.dim = dim
self.index = index
self.updates = updates
def forward(self, x):
x.index_add_(self.dim, self.index, self.updates)
return x
x = torch.ones(5, 4, 3)
y = torch.ones(7, 8, 3)
updates = torch.tensor(
[[[1, 5, 7], [2, 4, 5], [5, 5, 6], [2, 3, 4]]], dtype=torch.float
)
index = torch.tensor([0, 2, 3, 1])
self.run_test(
M(1, index, updates),
(x,),
additional_test_inputs=[y],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1]},
)
def test_roll(self):
class M(torch.nn.Module):
def __init__(self, shifts, dims):
super().__init__()
self.shifts = shifts
self.dims = dims
def forward(self, x):
return torch.roll(x, self.shifts, self.dims)
x = torch.randn(2, 3, 4)
self.run_test(M([1, 1], [1, 0]), (x,))
self.run_test(M([0, 1, 2], [1, 0, 2]), (x,))
self.run_test(M(2, 1), (x,))
self.run_test(M([-1, 3], [-2, -1]), (x,))
def test_sum(self):
class M(torch.nn.Module):
def forward(self, x):
return torch.sum(x)
x = torch.ones(12, 3)
self.run_test(M(), (x,), input_names=["x"], dynamic_axes={"x": [0]})
@skipShapeChecking
def test_sum_empty_tensor(self):
class M(torch.nn.Module):
def forward(self, x):
return x[0:0].sum(), x.sum()
x = torch.ones(12)
self.run_test(M(), (x,))
x = torch.ones(2, 0, 3)
self.run_test(M(), (x,))
x = torch.ones(0)
self.run_test(M(), (x,))
@skipIfUnsupportedMinOpsetVersion(11)
def test_broad_cast_tensors(self):
class M(torch.nn.Module):
def forward(self, x, y):
m = torch.broadcast_tensors(x, y)
return m
x = torch.randint(5, (1,))
y = torch.randint(5, (5,))
self.run_test(M(), (x, y))
x = torch.randint(5, (4, 2, 1, 4))
y = torch.randint(5, (2, 3, 1))
self.run_test(M(), (x, y))
x = torch.randn(2, 1, 4)
y = torch.randn(5, 2, 3, 1)
self.run_test(M(), (x, y))
@skipScriptTest()
@skipIfUnsupportedMinOpsetVersion(11)
def test_dist_normal(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.distributions.Normal(x, y).sample().size(0), x, y
self.run_test(M(), (torch.tensor([0.0]), torch.tensor([[1.0], [2.0]])))
self.run_test(M(), (torch.tensor([0.0]), torch.tensor([1.0])))
self.run_test(
M(),
(
torch.tensor([[[0.0], [10.0]], [[2.0], [8.0]], [[2.0], [8.0]]]),
torch.tensor([[1.0], [3.0]]),
),
)
@skipScriptTest()
@skipIfUnsupportedMinOpsetVersion(11)
def test_dist_normal_correctness(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.distributions.Normal(x, y).sample([20000])
expected_mean = 5.0
expected_std = 10.0
model_export = M()
dummy_input = (torch.tensor([expected_mean]), torch.tensor([expected_std]))
model_onnx = io.BytesIO()
torch.onnx.export(
model_export, dummy_input, model_onnx, opset_version=self.opset_version
)
ort_sess = verification._ort_session(model_onnx)
ort_out = verification._run_ort(ort_sess, inputs=dummy_input)
actual_std = np.std(ort_out)
actual_mean = np.mean(ort_out)
assert (
abs(abs(actual_mean) - expected_mean) <= expected_mean * 0.1
), "the gap of mean between ort outputs and expected one is unacceptable."
assert (
abs(abs(actual_std) - expected_std) <= expected_std * 0.1
), "the gap of variance between ort outputs and expected one is unacceptable."
@skipScriptTest()
@skipIfUnsupportedMinOpsetVersion(11)
def test_dist_uniform(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.distributions.Uniform(x, y).sample().size(0), x, y
self.run_test(M(), (torch.tensor([0.0]), torch.tensor([10.0])))
self.run_test(M(), (torch.tensor([[0.0], [6.0]]), torch.tensor([[1.0], [7.0]])))
self.run_test(
M(), (torch.tensor([1.0]), torch.tensor([[10.0], [7.0], [9.0], [20.0]]))
)
@skipScriptTest()
@skipIfUnsupportedMinOpsetVersion(11)
def test_dist_uniform_correctness(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.distributions.Uniform(x, y).sample([10000])
expected_min = 5.0
expected_max = 10.0
expected_mean = (expected_min + expected_max) / 2
model_export = M()
dummy_input = (torch.tensor([expected_min]), torch.tensor([expected_max]))
model_onnx = io.BytesIO()
torch.onnx.export(
model_export, dummy_input, model_onnx, opset_version=self.opset_version
)
ort_sess = verification._ort_session(model_onnx)
ort_out = verification._run_ort(ort_sess, inputs=dummy_input)
actual_min = np.min(ort_out)
actual_max = np.max(ort_out)
actual_mean = np.mean(ort_out)
assert (
actual_min >= expected_min
), "the minimum value of ort outputs is out of scope."
assert (
actual_max <= expected_max
), "the maximum value of ort outputs is out of scope."
assert (
abs(actual_mean - expected_mean) <= expected_mean * 0.05
), "the mean value of ort outputs is out of scope."
@skipIfUnsupportedMinOpsetVersion(13)
def test_sequence_to_int(self):
class M(torch.nn.Module):
def forward(self, x):
result = torch.tensor([2 for i in range(x.size()[0])], dtype=torch.int)
return x, result
x = torch.randn(10, 5)
self.run_test(M(), (x,))
@skipIfUnsupportedMinOpsetVersion(13)
def test_sequence_to_float(self):
class M(torch.nn.Module):
def forward(self, x):
result = torch.tensor(
[1.1 for i in range(x.size()[0])], dtype=torch.float
)
return x, result
x = torch.randn(10, 5)
self.run_test(M(), (x,))
@skipIfUnsupportedMinOpsetVersion(13)
def test_sequence_to_bool(self):
class M(torch.nn.Module):
def forward(self, x):
result = torch.tensor(
[False for i in range(x.size()[0])], dtype=torch.bool
)
return x, result
x = torch.randn(10, 5)
self.run_test(M(), (x,))
def test_tuple_output_from_if_with_raised_exception(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, t: Tensor) -> Tuple[Tensor, Tensor]:
if float(t) < 0:
raise Exception("Negative input")
else:
return torch.zeros(5), torch.zeros(5)
x = torch.zeros(1)
self.run_test(torch.jit.script(M()), (x,))
# NOTE: For quantization tests, choose scale and zero point carefully
# such that inputs and outputs do not always overflow/underflow.
# Otherwise test results could be inaccurate.
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_linear(self):
model = torch.nn.quantized.Linear(4, 8)
# Set fixed weight to avoid flaky test.
weight = torch.quantize_per_tensor(
torch.arange(32, dtype=torch.float).view(8, 4), 0.5, 0, torch.qint8
)
# Set non-zero bias.
bias = torch.arange(8, dtype=torch.float)
model.set_weight_bias(weight, bias)
# Set fixed input to avoid flaky test.
input = torch.randn(4, 4)
input = torch.arange(16, dtype=torch.float).view(4, 4) - 8
input_tensor = torch.quantize_per_tensor(input, 0.5, 128, torch.quint8)
self.run_test(model, input_tensor)
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_conv2d(self):
model = torch.nn.quantized.Conv2d(16, 33, 3, stride=2)
# Manually initialize model weight and bias to random numbers.
# By default all zeros.
q_weight = torch.quantize_per_tensor(
torch.randn(33, 16, 3, 3), 0.5, 0, torch.qint8
)
bias = torch.arange(33).to(torch.float) - 16
model.set_weight_bias(q_weight, bias)
input = torch.randn(3, 16, 32, 32)
q_input = torch.quantize_per_tensor(input, 0.5, 128, torch.quint8)
self.run_test(model, q_input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_adaptive_avg_pool2d(self):
model = torch.nn.AdaptiveAvgPool2d((5, 7))
input = torch.randn(4, 3, 10, 14)
q_input = torch.quantize_per_tensor(input, 0.2, 128, torch.quint8)
self.run_test(model, q_input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_conv2d_relu(self):
model = torch.nn.intrinsic.quantized.ConvReLU2d(16, 33, 3, stride=2)
# Manually initialize model weight and bias to random numbers.
# By default all zeros.
q_weight = torch.quantize_per_tensor(
torch.randn(33, 16, 3, 3), 0.5, 0, torch.qint8
)
bias = torch.arange(33).to(torch.float) - 16
model.set_weight_bias(q_weight, bias)
input = torch.randn(3, 16, 32, 32)
q_input = torch.quantize_per_tensor(input, 0.5, 128, torch.quint8)
self.run_test(model, q_input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_hardswish(self):
model = torch.nn.quantized.Hardswish(1.0, 0)
input = torch.randn(2, 6)
q_input = torch.quantize_per_tensor(input, 0.26, 128, torch.quint8)
self.run_test(model, q_input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_hardsigmoid(self):
model = torch.nn.Hardsigmoid()
input = torch.randn(2, 6)
q_input = torch.quantize_per_tensor(input, 0.26, 128, torch.quint8)
self.run_test(model, q_input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_sigmoid(self):
model = torch.nn.Sigmoid()
input = torch.randn(2, 6)
q_input = torch.quantize_per_tensor(input, 0.26, 128, torch.quint8)
self.run_test(model, q_input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_flatten(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input)
x = torch.quantize_per_tensor(torch.randn(1, 2, 3, 4), 1, 0, torch.quint8)
self.run_test(FlattenModel(), x)
@unittest.skip(
"ONNX Runtime 1.11 does not support quantized cat. Enable after ORT 1.12 is enabled in CI."
)
@skipIfUnsupportedMinOpsetVersion(10)
@skipScriptTest() # torch.jit.frontend.FrontendError: Cannot instantiate class 'QFunctional' in a script function:
def test_quantized_cat_when_concatinating_the_same_tensor(self):
class QuantizedSelfConcatenationModel(torch.nn.Module):
def forward(self, x):
return torch.nn.quantized.QFunctional().cat((x, x), dim=1)
q_input = torch.quantize_per_tensor(torch.ones(2, 3), 0.26, 128, torch.quint8)
self.run_test(QuantizedSelfConcatenationModel(), q_input)
@common_utils.parametrize(
"x, y",
[
common_utils.subtest(
[
torch.quantize_per_tensor(
torch.ones(2, 3), 0.26, 128, torch.quint8
),
torch.quantize_per_tensor(
torch.zeros(1, 3), 0.26, 128, torch.quint8
),
],
name="different_shape",
),
common_utils.subtest(
[
torch.quantize_per_tensor(
torch.ones(2, 3), 0.26, 128, torch.quint8
),
torch.quantize_per_tensor(torch.ones(2, 3), 42, 1, torch.quint8),
],
name="different_scale",
),
common_utils.subtest(
[
torch.quantize_per_tensor(
torch.ones(2, 3), 0.26, 128, torch.quint8
),
torch.quantize_per_tensor(torch.ones(2, 3), 0.26, 63, torch.quint8),
],
name="different_zero_point",
),
common_utils.subtest(
[
torch.quantize_per_tensor(
torch.ones(2, 3), 0.26, 128, torch.quint8
),
torch.quantize_per_tensor(torch.ones(2, 3), 0.1, 63, torch.quint8),
],
name="different_zero_point_and_scale",
),
],
)
@unittest.skip(
"ONNX Runtime 1.11 does not support quantized cat. Enable after ORT 1.12 is enabled in CI."
)
@skipIfUnsupportedMinOpsetVersion(10)
@skipScriptTest() # torch.jit.frontend.FrontendError: Cannot instantiate class 'QFunctional' in a script function:
def test_quantized_cat(self, x: torch.Tensor, y: torch.Tensor):
class QuantizedConcatenationModel(torch.nn.Module):
def forward(self, x, y):
return torch.nn.quantized.QFunctional().cat((x, y), dim=0)
self.run_test(QuantizedConcatenationModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(10)
# torch.jit.frontend.FrontendError:
# Cannot instantiate class 'QFunctional' in a script function
@skipScriptTest()
def test_quantized_arithmetic_qfunctional(self):
x = torch.quantize_per_tensor(torch.randn(3, 4), 0.2, 128, torch.quint8)
y = torch.quantize_per_tensor(torch.randn(3, 4), 0.2, 128, torch.quint8)
class ArithmeticModel(torch.nn.Module):
def forward(self, x, y):
o = torch.nn.quantized.QFunctional().add(x, y)
o = torch.nn.quantized.QFunctional().mul(o, x)
return o
self.run_test(ArithmeticModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_arithmetic(self):
x = torch.quantize_per_tensor(torch.randn(3, 4), 0.2, 128, torch.quint8)
y = torch.quantize_per_tensor(torch.randn(3, 4), 0.2, 128, torch.quint8)
class ArithmeticModel2(torch.nn.Module):
def forward(self, x, y):
o = torch.ops.quantized.add(x, y, 0.4, 100)
o = torch.ops.quantized.mul(o, x, 0.4, 100)
return o
self.run_test(ArithmeticModel2(), (x, y))
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantize_per_tensor(self):
class Module(torch.nn.Module):
def forward(self, x):
return (
torch.quantize_per_tensor(x, 0.2, 0, torch.qint8),
torch.quantize_per_tensor(x, 0.2, 128, torch.quint8),
)
x = torch.randn(4, 6)
self.run_test(Module(), x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_dequantize(self):
class Module(torch.nn.Module):
def forward(self, x):
return torch.dequantize(x)
x = torch.quantize_per_tensor(torch.randn(3, 4), 0.2, 0, torch.qint8)
self.run_test(Module(), x)
@skipIfUnsupportedMinOpsetVersion(13)
def test_qat_linear_per_channel(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.linear = torch.nn.Linear(4, 3)
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.linear(x)
x = self.dequant(x)
return x
model = M()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.prepare_qat(model)
# Set fixed weight and bias to avoid flaky test.
model.linear.weight = torch.nn.Parameter(
_construct_tensor_for_quantization_test((3, 4))
)
model.linear.bias = torch.nn.Parameter(torch.arange(3, dtype=torch.float))
model = torch.quantization.convert(model)
# Set fixed input to avoid flaky test.
input = _construct_tensor_for_quantization_test((4, 4), offset=-8)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(13)
def test_qat_relu(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.relu = torch.nn.ReLU()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.relu(x)
x = self.dequant(x)
return x
model = M()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.prepare_qat(model)
model = torch.quantization.convert(model)
input = torch.randn(8, 4)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(13)
def test_qat_conv2d(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.conv = torch.nn.Conv2d(2, 4, 3, stride=2)
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.dequant(x)
return x
model = M()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.prepare_qat(model)
# Set fixed weight and bias to avoid flaky test.
model.conv.weight = torch.nn.Parameter(
_construct_tensor_for_quantization_test((2, 4, 3, 3), max_val=2)
)
model.conv.bias = torch.nn.Parameter(torch.tensor([0.0, 1.0]))
model = torch.quantization.convert(model)
# Set fixed input to avoid flaky test.
input = _construct_tensor_for_quantization_test(
(3, 4, 8, 8), offset=-384, max_val=12
)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(13)
def test_qat_conv2d_relu(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.conv = torch.nn.Conv2d(2, 4, 3, stride=2)
self.relu = torch.nn.ReLU()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.relu(x)
x = self.dequant(x)
return x
model = M()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.prepare_qat(model)
# Set fixed weight and bias to avoid flaky test.
model.conv.weight = torch.nn.Parameter(
_construct_tensor_for_quantization_test((2, 4, 3, 3), max_val=2)
)
model.conv.bias = torch.nn.Parameter(torch.tensor([0.0, 1.0]))
model = torch.quantization.convert(model)
# Set fixed input to avoid flaky test.
input = _construct_tensor_for_quantization_test(
(3, 4, 8, 8), offset=-384, max_val=12
)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(13)
def test_qat_conv2d_relu_fused(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.conv = torch.nn.Conv2d(2, 4, 3, stride=2)
self.relu = torch.nn.ReLU()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.relu(x)
x = self.dequant(x)
return x
model = M()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.fuse_modules(model.eval(), [["conv", "relu"]])
model = torch.quantization.prepare_qat(model.train())
# Set fixed weight and bias to avoid flaky test.
model.conv.weight = torch.nn.Parameter(
_construct_tensor_for_quantization_test((2, 4, 3, 3), max_val=2)
)
model.conv.bias = torch.nn.Parameter(torch.tensor([0.0, 1.0]))
model = torch.quantization.convert(model)
# Set fixed input to avoid flaky test.
input = _construct_tensor_for_quantization_test(
(3, 4, 8, 8), offset=-384, max_val=12
)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_qat_maxpool2d(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.pool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.pool(x)
x = self.dequant(x)
return x
model = M()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.prepare_qat(model.train())
model = torch.quantization.convert(model)
# Set fixed input to avoid flaky test.
input = _construct_tensor_for_quantization_test((4, 4, 3, 2))
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_qat_avg_pool2d(self):
model = torch.nn.Sequential(
torch.quantization.QuantStub(),
torch.nn.AvgPool2d(kernel_size=3, stride=2, padding=1),
torch.quantization.DeQuantStub(),
)
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.prepare_qat(model.train())
model = torch.quantization.convert(model)
input = _construct_tensor_for_quantization_test((4, 4, 3, 2))
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(11)
def test_qat_upsample_nearest2d(self):
model = torch.nn.Sequential(
torch.quantization.QuantStub(),
torch.nn.UpsamplingNearest2d(scale_factor=1.5),
torch.quantization.DeQuantStub(),
)
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.prepare_qat(model.train())
model = torch.quantization.convert(model)
input = _construct_tensor_for_quantization_test((4, 3, 2, 2))
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(9)
def test_convolution_allow_tf32(self):
class Module(torch.nn.Module):
def __init__(self, allow_tf32):
super().__init__()
self.allow_tf32 = allow_tf32
weight = torch.rand(32, 3, 3, 3)
self.weight = torch.nn.Parameter(weight)
def forward(self, x):
if self.allow_tf32:
return torch._convolution(
x,
self.weight,
None,
[2, 2],
[0, 0],
[1, 1],
False,
[0, 0],
1,
False,
False,
True,
True,
)
else:
return torch._convolution(
x,
self.weight,
None,
[2, 2],
[0, 0],
[1, 1],
False,
[0, 0],
1,
False,
False,
True,
)
x = torch.randn(1, 3, 224, 224)
self.run_test(Module(False), x, rtol=1e-3, atol=1e-6)
self.run_test(Module(True), x, rtol=1e-3, atol=1e-6)
@skipIfUnsupportedMinOpsetVersion(16)
@common_utils.parametrize(
"mode",
("bilinear", "nearest", "bicubic"),
)
@common_utils.parametrize(
"padding_mode",
("zeros", "border", "reflection"),
)
@common_utils.parametrize(
"align_corners",
(True, False),
name_fn=lambda align_corners: str(align_corners),
)
def test_grid_sample(self, mode, padding_mode, align_corners):
n, c, h_in, w_in, h_out, w_out = 1, 1, 3, 2, 2, 4
class GridSampleModule(torch.nn.Module):
def __init__(self, mode, padding_mode, align_corners) -> None:
super().__init__()
self.mode, self.padding_mode, self.align_corners = (
mode,
padding_mode,
align_corners,
)
def forward(self, input, grid):
return torch.nn.functional.grid_sample(
input, grid, self.mode, self.padding_mode, self.align_corners
)
atol_rtol = {}
if (mode, padding_mode) == ("bicubic", "border"):
if align_corners:
atol_rtol.update({"atol": 0.3, "rtol": 0.4})
else:
atol_rtol.update({"atol": 0.02, "rtol": 0.02})
input, grid = torch.randn(n, c, h_in, w_in), torch.randn(n, h_out, w_out, 2)
self.run_test(
GridSampleModule(mode, padding_mode, align_corners),
(input, grid),
**atol_rtol,
)
@skipTraceTest()
@skipIfUnsupportedMinOpsetVersion(16)
def test_uninitialized_optional(self):
class Module(torch.nn.Module):
def forward(self, y: Optional[Tensor]) -> Optional[Tensor]:
if y is not None:
if y.shape[1] < 5:
if y.size(0) == 1:
y = y + 4
else:
return y
return y
self.run_test(
Module(),
torch.ones((3, 4), dtype=torch.int),
dynamic_axes={"y": {0: "y0", 1: "y1"}},
input_names=["y"],
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_device_eq(self):
class M(torch.nn.Module):
def forward(self, a):
# exercise both Tensor.device (prim::device)
# and torch.device (prim::Constant).
if a.device != torch.device("cpu"):
return a
return torch.zeros_like(a)
mod = torch.jit.script(M()) # preserve control flow
self.run_test(
mod,
# In order for the ONNX model behavior to match the torch model, we
# need to construct input that has the same device that is checked for
# in forward(). In ONNX there is no such thing as a device, so the if
# condition is always false.
torch.randn(3, 3, device="cpu"),
# Force dynamic axes so that the output shape depends on the input.
# Otherwise the entire model will just return a constant and not have
# any inputs.
input_names=["a"],
dynamic_axes={"a": {0: "a0"}},
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_lerp(self):
class LerpModel(torch.nn.Module):
def forward(self, x):
return (
x.lerp(torch.full_like(x, 10), 0.4),
x.lerp(torch.full_like(x, 20), 0.7),
x.lerp(torch.full_like(x, 30), torch.tensor(0.4)),
x.lerp(torch.full_like(x, 40), x / 10.0),
x.lerp(torch.tensor(10.0), x / 10.0),
x.lerp(torch.tensor(10.0), 0.4),
x.lerp(torch.tensor(10.0), torch.tensor(0.4)),
)
self.run_test(LerpModel(), torch.rand(5, 4, 3))
# Cannot export with older opsets because of "ConstantFill" op
# ConstantFill was a temp op removed at opset 8. This is no longer supported by onnxruntime
# There are still some issues prevent us from enabling script test for these scenarios:
# test_gru_*:
# Operator aten::as_tensor is not supported by exporter yet.
# - https://msdata.visualstudio.com/Vienna/_workitems/edit/1055382
# Operator aten::_pack_padded_sequence is not supported by exporter yet.
# - https://msdata.visualstudio.com/Vienna/_workitems/edit/1055384
# test_elman_*:
# Compiling in script mode fails with errors like:
# torch.jit.frontend.UnsupportedNodeError: annotated assignments
# without assigned value aren't supported
# - https://msdata.visualstudio.com/Vienna/_workitems/edit/1160723
# test_lstm_*:
# Compiling in script mode fails with errors like:
# RuntimeError: Arguments for call are not valid.
# - https://msdata.visualstudio.com/Vienna/_workitems/edit/1160723
@skipScriptTest()
@skipIfUnsupportedMinOpsetVersion(9)
@common_utils.parametrize(
"name, nonlinearity",
[
("elman", "relu"),
("elman", "tanh"),
("lstm", None),
("gru", None),
],
)
@common_utils.parametrize(**_parametrize_rnn_args("layers"))
@common_utils.parametrize(**_parametrize_rnn_args("bidirectional"))
@common_utils.parametrize(**_parametrize_rnn_args("initial_state"))
@common_utils.parametrize(**_parametrize_rnn_args("packed_sequence"))
@common_utils.parametrize(**_parametrize_rnn_args("dropout"))
def test_rnn(self, *args, **kwargs):
self._dispatch_rnn_test(*args, **kwargs)
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_pytorch_onnx_onnxruntime.py
|
import difflib
import io
import numpy as np
import onnx
import onnx.helper
import torch
import torch.jit
import torch.onnx
def colonize(msg, sep=": "):
if not msg:
return ""
else:
return msg + sep
class Errors:
"""
An error-collecting object which supports error recovery.
It is intended to be used like a context manager:
>>> with Errors("Top-level error message") as errs:
>>> ...
"""
def __init__(self, msg, rtol=1e-3, atol=1e-5):
self.msg = msg
self.errors = []
self.context = []
self.rtol = rtol
self.atol = atol
# Allocated upon instance creation so that multiple Errors
# can be used
class ShortCircuit(Exception):
pass
self.exc_class = ShortCircuit
def requireAlmostEqual(self, x, y, msg=None):
"""
Test that x and y are nearly equal (equal within self.rtol
precision); aborts execution if they are not.
"""
self.almostEqualAndThen(x, y, msg, self.failWith)
def checkAlmostEqual(self, x, y, msg=None):
"""
Test that x and y are nearly equal (equal within self.rtol
precision), but continue execution even if they are not equal.
To prevent error cascades, you should remember to call "failIfErrs"
at some later point in time.
"""
self.almostEqualAndThen(x, y, msg, self.addErr)
def almostEqualAndThen(self, x, y, msg, k):
"""
Helper for implementing "requireAlmostEqual" and "checkAlmostEqual".
Upon failure, invokes continuation "k" with the error message.
At the moment, only tests on "numpy.ndarray" are supported.
"""
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
try:
np.testing.assert_allclose(
x, y, rtol=self.rtol, atol=self.atol, equal_nan=True, verbose=True
)
except AssertionError as e:
raise
k(f"{colonize(msg)}{str(e).lstrip()}")
else:
raise RuntimeError("Unsupported almost equal test")
def requireEqual(self, x, y, msg=None):
"""
Test that x and y are equal; aborts execution if they are not.
"""
self.equalAndThen(x, y, msg, self.failWith)
def checkEqual(self, x, y, msg=None):
"""
Test that x and y are equal, but continue execution even if they are not equal.
To prevent error cascades, you should remember to call "failIfErrs"
at some later point in time.
"""
self.equalAndThen(x, y, msg, self.addErr)
# Bit-for-bit accuracy test
def equalAndThen(self, x, y, msg, k):
"""
Helper for implementing "requireEqual" and "checkEqual". Upon failure,
invokes continuation "k" with the error message.
"""
if isinstance(x, onnx.TensorProto) and isinstance(y, onnx.TensorProto):
self.equalAndThen(x.name, y.name, msg, k)
# Use numpy for the comparison
t1 = onnx.numpy_helper.to_array(x)
t2 = onnx.numpy_helper.to_array(y)
new_msg = f"{colonize(msg)}In embedded parameter '{x.name}'"
self.equalAndThen(t1, t2, new_msg, k)
elif isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
try:
np.testing.assert_equal(x, y)
except AssertionError as e:
raise
k("{}{}".format(colonize(msg, ": "), str(e).lstrip()))
else:
if x != y:
# TODO: Better algorithm for lists
sx = str(x)
sy = str(y)
if len(sx) > 40 or len(sy) > 40 or "\n" in sx or "\n" in sy:
# long form
l = "=" * 50
k(
"\n{}The value\n{}\n{}\n{}\n\ndoes not equal\n\n{}\n{}\n{}".format(
colonize(msg, ":\n"), l, sx, l, l, sy, l
)
)
else:
k(f"{colonize(msg)}{sx} != {sy}")
def requireMultiLineEqual(self, x, y, msg=None):
"""
Test that long, multi-line strings x and y are equal;
aborts execution if they are not.
"""
self.multiLineEqualAndThen(x, y, msg, self.failWith)
def multiLineEqualAndThen(self, x, y, msg, k):
"""
Helper for implementing "requireMultiLineEqual". Upon failure,
invokes continuation "k" with the error message.
"""
if msg is None:
msg = "Strings are not equal"
if x != y:
diff = difflib.ndiff(x.splitlines(True), y.splitlines(True))
k("{}{}".format(colonize(msg, ":\n\n"), "".join(diff)))
def addErr(self, msg):
"""
Add an error to the error context, but continue executing.
"""
# TODO: instead of immediately concatenating the context in the msg,
# attach it as metadata and make a decision how to format it later.
msg_w_ctx = msg
for c in reversed(self.context):
msg += "\n\n * " + "\n ".join(c.splitlines())
self.errors.append(msg)
def fail(self):
"""
Immediately fail and short-circuit to the next recovery context.
NB: It is an error to "fail" without having added any errors to
the error context.
"""
raise self.exc_class()
def failWith(self, msg):
"""
Add an error to the error context, and then short-circuit.
"""
self.addErr(msg)
self.fail()
def failIfErrs(self):
"""
If there are any errors in the error context, short-circuit.
This is used to prevent error cascades.
"""
if self.errors:
self.fail()
def recover(self):
"""
Returns a context manager which can be used to recover in case of
an error. Example usage:
>>> with errs.recover():
>>> ...
"""
parent_self = self
class Recover:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type == parent_self.exc_class:
return True
return Recover()
def addErrCtxt(self, msg):
"""
Returns a context manager which encloses a fragment of code with
an extra contextual message, e.g., where an error occurred, or a hint
applicable to all errors in the area. Example usage:
>>> with errs.addErrCtx("Some text"):
>>> ...
"""
parent_self = self
class AddContext:
def __enter__(self):
parent_self.context.append(msg)
def __exit__(self, exc_type, exc_value, traceback):
parent_self.context.pop()
return AddContext()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.errors:
errors_msg = "\n\n".join("ERROR: " + x for x in self.errors)
final_msg = "{}\n{}\n{}".format(self.msg, "-" * 70, errors_msg)
raise AssertionError(final_msg)
if exc_type == self.exc_class:
raise RuntimeError("ShortCircuit was raised, but no errors were recorded")
def verify(
model,
args,
backend,
verbose=False,
training=torch.onnx.TrainingMode.EVAL,
rtol=1e-3,
atol=1e-7,
test_args=2,
do_constant_folding=True,
opset_version=None,
keep_initializers_as_inputs=True,
add_node_names=False,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
input_names=None,
dynamic_axes=None,
remained_onnx_input_idx=None,
):
"""
Export a model into ONNX, import it into a specified ONNX backend, and then
on a few random inputs verify that PyTorch and the backend produced the same
results. Requires onnx to be installed.
This function may spuriously fail: some operators are implemented with
different numerical precision in an ONNX backend, in which case an unstable
network (e.g., Inception) may blow up these numerical instabilities. This
situation is less likely to happen if your model has been trained. However,
if this is not the case, you may have found a bug! Please report it to the
PyTorch developers. You can also debug the issue yourself by removing
suffixes of operators from your model until verification passes.
For reproducibility, we recommend explicitly setting PyTorch's seed before
invoking this function.
Args:
model (torch.nn.Module): the model to be exported and verified
args (tuple of arguments): the inputs to
the model, e.g., such that ``model(*args)`` is a valid
invocation of the model. Any non-Variable arguments will
be hard-coded into the exported model; any Variable arguments
will become inputs of the exported model, in the order they
occur in args. If args is a Variable, this is equivalent
to having called it with a 1-ary tuple of that Variable.
(Note: passing keyword arguments to the model is not currently
supported. Give us a shout if you need it.)
backend (onnx.backend module): ONNX backend to verify with
verbose (bool, default False): if specified, we will print out a debug
description of the trace being exported.
training (bool, default False): export the model in training mode. At
the moment, ONNX is oriented towards exporting models for inference
only, so you will generally not need to set this to True.
rtol (float, default 1e-3): relative precision required
test_args (int or iterable of args, default 2):
either an integer specifying the number
of random arguments to generate, or an iterable producing arguments
to test under.
opset_version (int, default None): the opset version of the model to
export. If not specified, the default value in symboli_helper will
be used in utils._export().
operator_export_type (enum, default OperatorExportTypes.ONNX): the operator
export type to use when exporting the model. The default value converts
all operators to ONNX ops.
input_names (list of string): list of input names.
dynamic_axes (dict of (string, list)): dynamic_axes.
remained_onnx_input_idx (list of int, default None): The remained ONNX input index.
"""
def _nested_map(condition, fn, condition_msg=None):
def _map(obj):
if condition(obj):
return fn(obj)
elif obj is None:
return None
elif isinstance(obj, (list, tuple)):
return type(obj)(_map(x) for x in obj)
else:
raise ValueError(
"Auto nesting doesn't know how to process "
"an input object of type "
+ torch.typename(obj)
+ (
". Accepted types: "
+ condition_msg
+ ", or lists/tuples of them"
if condition_msg
else ""
)
)
return _map
def _iter_filter(condition, allow_unknown=False, condition_msg=None):
def _iter(obj):
if condition(obj):
yield obj
elif obj is None:
return
elif isinstance(obj, (list, tuple)):
for o in obj:
yield from _iter(o)
elif allow_unknown:
yield obj
else:
raise ValueError(
"Auto nesting doesn't know how to process "
"an input object of type "
+ torch.typename(obj)
+ (
". Accepted types: "
+ condition_msg
+ ", or lists/tuples of them"
if condition_msg
else ""
)
)
return _iter
def is_tensor(o):
return isinstance(o, torch.Tensor)
_iter_tensors = _iter_filter(is_tensor, condition_msg="Tensors")
def randomize_arg(arg):
new_data = arg.data.clone()
# For now, don't try randomizing non-float tensors; these
# are likely to be things like indices, where just randomly
# spattering some longs is unlikely to work. One way we could
# make this work is to apply a random permutation or something.
if arg.is_floating_point():
new_data.uniform_()
return torch.autograd.Variable(new_data, requires_grad=arg.requires_grad)
randomize_args = _nested_map(is_tensor, randomize_arg)
def backend_args(args):
# TODO: onnx should accept iterables
return tuple(v.data.cpu().numpy() for v in _iter_tensors(args))
def load_bytes(b):
b.seek(0)
x = onnx.load(b)
# doc_string has stack traces - let's remove them to make comparison
# sane
onnx.helper.strip_doc_string(x)
return x
# Special case for common case of passing a single Tensor
if isinstance(args, torch.Tensor):
args = (args,)
with torch.onnx.select_model_mode_for_export(model, training):
proto_bytes = io.BytesIO()
torch_out = torch.onnx._export(
model,
args,
proto_bytes,
verbose=verbose,
do_constant_folding=do_constant_folding,
opset_version=opset_version,
keep_initializers_as_inputs=keep_initializers_as_inputs,
add_node_names=add_node_names,
operator_export_type=operator_export_type,
input_names=input_names,
dynamic_axes=dynamic_axes,
)
if isinstance(model, torch.jit.ScriptModule):
torch_out = model(*args)
proto = load_bytes(proto_bytes)
prepared = backend.prepare(proto)
def run(args, remained_onnx_input_idx):
alt_proto_bytes = io.BytesIO()
torch_out = torch.onnx._export(
model,
args,
alt_proto_bytes,
verbose=verbose,
do_constant_folding=do_constant_folding,
opset_version=opset_version,
keep_initializers_as_inputs=keep_initializers_as_inputs,
add_node_names=add_node_names,
operator_export_type=operator_export_type,
input_names=input_names,
dynamic_axes=dynamic_axes,
)
if isinstance(model, torch.jit.ScriptModule):
torch_out = model(*args)
alt_proto = load_bytes(alt_proto_bytes)
if proto.SerializeToString() != alt_proto.SerializeToString():
# OK, let's try to figure out what happened.
msg = "When I exported your model with different inputs, the result was different."
if not verbose:
msg += "\n(To get more information, run torch.onnx.verify(..., verbose=True))"
with Errors(msg, rtol=rtol, atol=atol) as errs:
# First, check if we have the same number of parameters, and
# that they"re the same order. If they don"t, something has *really* gone wrong.
initializer_order_hint = (
"This is really strange! The second time I exported your model,\n"
"it had a different set of parameters. Are you assigning Parameters\n"
"in the forward() of your model definition?"
)
with errs.addErrCtxt(initializer_order_hint):
errs.requireEqual(
[x.name for x in proto.graph.initializer],
[x.name for x in alt_proto.graph.initializer],
msg="Parameters list differs",
)
# Now check if the embedded parameters are actually the same
initializer_hint = (
"A difference in embedded parameters usually means that\n"
"your model is updating parameters/buffers even in inference\n"
"mode. Look for a buggy nn.Module which isn't respecting train().\n"
)
with errs.recover(), errs.addErrCtxt(initializer_hint):
for x, y in zip(
proto.graph.initializer, alt_proto.graph.initializer
):
errs.checkEqual(x, y)
# Next, check if the model structure lines up.
structure_hint = (
"A difference in model structure usually means that\n"
"your model has dynamic control flow. These models are not\n"
"currently supported by the exporter."
)
with errs.recover(), errs.addErrCtxt(structure_hint):
# Delete initializers since we already tested them
stripped_proto = onnx.ModelProto()
stripped_proto.CopyFrom(proto)
del stripped_proto.graph.initializer[:]
stripped_alt_proto = onnx.ModelProto()
stripped_alt_proto.CopyFrom(alt_proto)
del stripped_alt_proto.graph.initializer[:]
# Compare the printable graph representations first
errs.requireMultiLineEqual(
onnx.helper.printable_graph(stripped_proto.graph),
onnx.helper.printable_graph(stripped_alt_proto.graph),
)
# Compare the actual protobuf text formats now (not
# very user-friendly!)
errs.requireMultiLineEqual(
str(stripped_proto), str(stripped_alt_proto)
)
# One last ditch effort, using built-in equality on
# protobufs
errs.requireEqual(stripped_proto, stripped_alt_proto)
errs.failIfErrs()
# At this point, we should have figured out why the binary
# protobufs differed, and short-circuited out of this code
# with a helpful error message. But what if we didn't?
# We better still try to give a good error message in this
# case. We EXPECT these requires to fail. If they don't,
# that is a bug in verify
errs.requireEqual(proto, alt_proto)
errs.requireEqual(
proto_bytes.getvalue(), alt_proto_bytes.getvalue()
)
raise AssertionError()
# TODO: test that the traced model also returns the same thing...
run_helper(torch_out, args, remained_onnx_input_idx)
# Factored out so we can avoid one run of the model
def run_helper(torch_out, args, remained_onnx_input_idx):
onnx_input = backend_args(args)
if remained_onnx_input_idx is not None:
input_onnx = []
for idx in remained_onnx_input_idx:
input_onnx.append(onnx_input[idx])
onnx_input = tuple(input_onnx)
backend_out = prepared.run(onnx_input)
if isinstance(torch_out, torch.Tensor):
torch_out = (torch_out,)
torch_out, _ = torch._C._jit_flatten(torch_out)
# NB: onnx backend NEVER returns bare numpy array
msg = "ONNX backend returned different results from PyTorch"
result_hint = (
"If you are not using trained parameters, a difference in results\n"
"could mean that your network is numerically unstable. Otherwise\n"
"it indicates a bug in PyTorch/ONNX; please file a bug report."
)
with Errors(msg, rtol=rtol, atol=atol) as errs, errs.addErrCtxt(
result_hint
):
for i, (x, y) in enumerate(zip(torch_out, backend_out)):
errs.checkAlmostEqual(x.data.cpu().numpy(), y, f"In output {i}")
run_helper(torch_out, args, remained_onnx_input_idx)
if isinstance(test_args, int):
for i in range(test_args):
run(randomize_args(args), remained_onnx_input_idx)
else:
for test_arg in test_args:
run(test_arg, remained_onnx_input_idx)
|
pytorch-master
|
test/onnx/verify.py
|
# Owner(s): ["module: onnx"]
import io
import itertools
import onnx
import torch
import torch.onnx
from torch.nn import Module
from torch.onnx import producer_name, producer_version
from torch.onnx._globals import GLOBALS
from torch.testing._internal import common_utils
def check_onnx_opset_operator(
model, ops, opset_version=GLOBALS.export_onnx_opset_version
):
# check_onnx_components
assert (
model.producer_name == producer_name
and model.producer_version == producer_version
and model.opset_import[0].version == opset_version
)
# check the schema with the onnx checker
onnx.checker.check_model(model)
# check target type and attributes
graph = model.graph
# ops should contain an object for each node
# in graph.node, in the right order.
# At least the op_name should be specified,
# but the op's attributes can optionally be
# specified as well
assert len(ops) == len(graph.node)
for i in range(0, len(ops)):
assert graph.node[i].op_type == ops[i]["op_name"]
if "attributes" in ops[i]:
attributes = ops[i]["attributes"]
assert len(attributes) == len(graph.node[i].attribute)
for j in range(0, len(attributes)):
for attribute_field in attributes[j].keys():
assert attributes[j][attribute_field] == getattr(
graph.node[i].attribute[j], attribute_field
)
def check_onnx_opsets_operator(
module,
x,
ops,
opset_versions,
training=torch.onnx.TrainingMode.EVAL,
input_names=None,
dynamic_axes=None,
):
for opset_version in opset_versions:
f = io.BytesIO()
torch.onnx.export(
module,
x,
f,
opset_version=opset_version,
training=training,
input_names=input_names,
dynamic_axes=dynamic_axes,
)
model = onnx.load(io.BytesIO(f.getvalue()))
check_onnx_opset_operator(model, ops[opset_version], opset_version)
class TestONNXOpset(common_utils.TestCase):
def test_opset_fallback(self):
class MyModule(Module):
def forward(self, x):
return torch.isnan(x)
ops = [{"op_name": "IsNaN"}]
ops = {9: ops, 10: ops}
x = torch.tensor([1.0, float("nan"), 2.0])
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
def test_topk(self):
class MyModule(Module):
def forward(self, x):
return torch.topk(x, 3)
ops_9 = [
{
"op_name": "TopK",
"attributes": [
{"name": "axis", "i": -1, "type": 2},
{"name": "k", "i": 3, "type": 2},
],
}
]
ops_10 = [
{"op_name": "Constant"},
{"op_name": "TopK", "attributes": [{"name": "axis", "i": -1, "type": 2}]},
]
ops = {9: ops_9, 10: ops_10}
x = torch.arange(1.0, 6.0, requires_grad=True)
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
# test with dynamic k
class MyModuleDynamic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input, k):
return torch.topk(input, k)
ops_10 = [
{"op_name": "Constant", "attributes": [{"name": "value", "type": 4}]},
{"op_name": "Reshape"},
{"op_name": "TopK", "attributes": [{"name": "axis", "i": -1, "type": 2}]},
]
ops = {10: ops_10}
x = torch.arange(1.0, 6.0, requires_grad=True)
k = torch.tensor(3)
module = MyModuleDynamic()
check_onnx_opsets_operator(module, [x, k], ops, opset_versions=[10])
def test_maxpool(self):
module = torch.nn.MaxPool1d(2, stride=1)
ops_9 = [
{
"op_name": "MaxPool",
"attributes": [
{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7},
],
}
]
ops_10 = [
{
"op_name": "MaxPool",
"attributes": [
{"name": "ceil_mode", "i": 0, "type": 2},
{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7},
],
}
]
ops = {9: ops_9, 10: ops_10}
x = torch.randn(20, 16, 50)
check_onnx_opsets_operator(module, x, ops, opset_versions=[9, 10])
# add test with dilations
module = torch.nn.MaxPool1d(2, stride=1, dilation=2)
ops_10 = [
{
"op_name": "MaxPool",
"attributes": [
{"name": "ceil_mode", "i": 0, "type": 2},
{"name": "dilations", "ints": [2], "type": 7},
{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7},
],
}
]
ops = {10: ops_10}
x = torch.randn(20, 16, 50)
check_onnx_opsets_operator(module, x, ops, opset_versions=[10])
def test_upsample(self):
class MyModule(Module):
def __init__(self):
super().__init__()
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
size = [int(i) for i in size]
return torch.nn.functional.interpolate(x, size=size, mode="nearest")
module = MyModule()
ops8 = [
{
"op_name": "Upsample",
"attributes": [
{"name": "mode", "s": (b"nearest"), "type": 3},
{"name": "scales", "floats": [1.0, 1.0, 2.0, 2.0], "type": 6},
],
}
]
ops9 = [
{"op_name": "Constant"},
{
"op_name": "Upsample",
"attributes": [{"name": "mode", "s": (b"nearest"), "type": 3}],
},
]
ops = {8: ops8, 9: ops9}
x = torch.randn(2, 2, 2, 2)
check_onnx_opsets_operator(module, x, ops, opset_versions=[8, 9])
def test_cast_constant(self):
class MyModule(Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x - 1
module = MyModule()
ops_8 = [
{"op_name": "Constant"},
{"op_name": "Cast", "attributes": [{"name": "to", "i": 7, "type": 2}]},
{"op_name": "Sub"},
]
ops_9 = [{"op_name": "Constant"}, {"op_name": "Sub"}]
ops = {8: ops_8, 9: ops_9}
x = torch.ones(5, 6, dtype=torch.long)
check_onnx_opsets_operator(module, x, ops, opset_versions=[8, 9])
def test_slice(self):
class MyModule(Module):
def forward(self, x):
return x[0:1]
ops_9 = [
{
"op_name": "Slice",
"attributes": [
{"name": "axes", "ints": [0], "type": 7},
{"name": "ends", "ints": [1], "type": 7},
{"name": "starts", "ints": [0], "type": 7},
],
}
]
ops_10 = [
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Slice", "attributes": []},
]
ops = {9: ops_9, 10: ops_10}
x = torch.randn(3)
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
class DynamicSliceModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x[1 : x.size(0)]
module = DynamicSliceModel()
x = torch.rand(1, 2)
ops_10 = [
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Gather", "attributes": [{"name": "axis", "i": 0, "type": 2}]},
{"op_name": "Constant"},
{
"op_name": "Unsqueeze",
"attributes": [{"name": "axes", "i": 0, "type": 7}],
},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Slice", "attributes": []},
]
ops = {10: ops_10}
check_onnx_opsets_operator(
module,
x,
ops,
opset_versions=[10],
input_names=["x"],
dynamic_axes={"x": [0, 1]},
)
ops_10 = [
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Slice", "attributes": []},
]
ops = {10: ops_10}
check_onnx_opsets_operator(module, x, ops, opset_versions=[10])
def test_flip(self):
class MyModule(Module):
def forward(self, x):
return torch.flip(x, dims=[0])
ops_10 = [
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Slice", "attributes": []},
]
ops = {10: ops_10}
import numpy
x = torch.tensor(numpy.arange(6.0).reshape(2, 3))
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[10])
def test_dropout(self):
class MyModule(Module):
def __init__(self):
super().__init__()
self.dropout = torch.nn.Dropout(0.5)
def forward(self, x):
return self.dropout(x)
x = torch.randn(1, 2, 3)
# we should only export the onnx Dropout op in training mode; test both modes
# test training mode
ops = [
{
"op_name": "Dropout",
"attributes": [{"name": "ratio", "f": 0.5, "type": 1}],
}
]
ops = {9: ops, 10: ops}
check_onnx_opsets_operator(
MyModule(),
x,
ops,
opset_versions=[9, 10],
training=torch.onnx.TrainingMode.TRAINING,
)
# test eval mode
ops = [{"op_name": "Identity"}]
ops = {9: ops, 10: ops}
check_onnx_opsets_operator(
MyModule(),
x,
ops,
opset_versions=[9, 10],
training=torch.onnx.TrainingMode.EVAL,
)
def test_full(self):
class MyModule(Module):
def forward(self, x):
return torch.full((3, 4), x)
ops = [
{"op_name": "Constant"},
{"op_name": "ConstantOfShape"},
{"op_name": "Add"},
]
ops = {9: ops, 10: ops}
x = torch.tensor(12.0)
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
def test_interpolate(self):
class MyModel(torch.nn.Module):
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
return torch.nn.functional.interpolate(x, size=size, mode="nearest")
ops_9 = [
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Gather"},
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Gather"},
{"op_name": "Constant"},
{"op_name": "Mul"},
{"op_name": "Constant"},
{"op_name": "Mul"},
{"op_name": "Unsqueeze"},
{"op_name": "Unsqueeze"},
{"op_name": "Concat"},
{"op_name": "Cast"},
{"op_name": "Shape"},
{"op_name": "Slice"},
{"op_name": "Cast"},
{"op_name": "Div"},
{"op_name": "Constant"},
{"op_name": "Concat"},
{
"op_name": "Upsample",
"attributes": [{"name": "mode", "s": (b"nearest"), "type": 3}],
},
]
ops_10 = [
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Gather"},
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Gather"},
{"op_name": "Constant"},
{"op_name": "Mul"},
{"op_name": "Constant"},
{"op_name": "Mul"},
{"op_name": "Unsqueeze"},
{"op_name": "Unsqueeze"},
{"op_name": "Concat"},
{"op_name": "Cast"},
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Slice"},
{"op_name": "Cast"},
{"op_name": "Div"},
{"op_name": "Constant"},
{"op_name": "Concat"},
{
"op_name": "Resize",
"attributes": [{"name": "mode", "s": (b"nearest"), "type": 3}],
},
]
ops = {9: ops_9, 10: ops_10}
x = torch.randn(1, 2, 3, 4, requires_grad=True)
check_onnx_opsets_operator(
MyModel(),
x,
ops,
opset_versions=[9, 10],
input_names=["x"],
dynamic_axes={"x": [0, 1, 2, 3]},
)
ops_9 = [
{"op_name": "Constant"},
{"op_name": "Shape"},
{"op_name": "Slice"},
{"op_name": "Cast"},
{"op_name": "Div"},
{"op_name": "Constant"},
{"op_name": "Concat"},
{
"op_name": "Upsample",
"attributes": [{"name": "mode", "s": (b"nearest"), "type": 3}],
},
]
ops_10 = [
{"op_name": "Constant"},
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Slice"},
{"op_name": "Cast"},
{"op_name": "Div"},
{"op_name": "Constant"},
{"op_name": "Concat"},
{"op_name": "Resize"},
]
ops = {9: ops_9, 10: ops_10}
x = torch.randn(1, 2, 3, 4, requires_grad=True)
check_onnx_opsets_operator(MyModel(), x, ops, opset_versions=[9, 10])
class MyDynamicModel(torch.nn.Module):
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
# work around for now: turn the dynamic sizes into constant
size = [int(i) for i in size]
return torch.nn.functional.interpolate(x, size=size, mode="nearest")
ops_9 = [
{"op_name": "Constant"},
{
"op_name": "Upsample",
"attributes": [{"name": "mode", "s": (b"nearest"), "type": 3}],
},
]
ops_10 = [
{"op_name": "Constant"},
{
"op_name": "Resize",
"attributes": [{"name": "mode", "s": (b"nearest"), "type": 3}],
},
]
ops = {9: ops_9, 10: ops_10}
x = torch.randn(20, 16, 50)
check_onnx_opsets_operator(MyDynamicModel(), x, ops, opset_versions=[9, 10])
def test_grid_sample(self):
n, c, h_in, w_in, h_out, w_out = 1, 1, 3, 2, 2, 4
ops = {16: [{"op_name": "GridSample"}]}
class MyModule(Module):
def forward(self, x, grid, mode, padding_mode, align_corers):
return torch.nn.functional.grid_sample(
x, grid, mode, padding_mode, align_corners
)
for mode, padding_mode, align_corners in itertools.product(
("bilinear", "nearest", "bicubic"),
("zeros", "border", "reflection"),
(True, False),
):
args = (
torch.randn(n, c, h_in, w_in), # x
torch.randn(n, h_out, w_out, 2), # grid,
mode,
padding_mode,
align_corners,
)
check_onnx_opsets_operator(
MyModule(),
args,
ops,
opset_versions=[16],
training=torch.onnx.TrainingMode.TRAINING,
)
check_onnx_opsets_operator(
MyModule(),
args,
ops,
opset_versions=[16],
training=torch.onnx.TrainingMode.EVAL,
)
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_onnx_opset.py
|
# Owner(s): ["module: onnx"]
import onnxruntime
import torch
from pytorch_test_common import skipIfNoCuda
from torch.onnx import verification
from torch.testing._internal import common_utils
def _jit_graph_to_onnx_model(graph, operator_export_type, opset_version):
r"""
This function exports torch::jit::Graph object
to serialized ONNX ModelProto.
This function is for testing purpose.
It only keeps the essential parts for IR graph conversions.
It also does not interact with actual PyTorch modules nor
PyTorch tensor inputs.
"""
torch.onnx.symbolic_helper._set_opset_version(opset_version)
graph = torch.onnx.utils._optimize_graph(
graph, operator_export_type, params_dict={}
)
proto, _, _, _ = graph._export_onnx(
{},
opset_version,
{},
False,
operator_export_type,
False,
False,
{},
True,
"",
{},
)
return proto
class _TestJITIRToONNX:
"""Abstract base class for test cases.
Intentionally not a sub-class of unittest.TestCase so that unittest / pytest
don't run it directly. unitest.TestCase is mixed in as another base class when
creating concrete sub-types. See MakeTestCase().
"""
opset_version = -1 # Sub-classes must override
ort_providers = ["CPUExecutionProvider"]
check_shape = True
check_dtype = True
def run_test(self, graph_ir, example_inputs):
graph = torch._C.parse_ir(graph_ir)
jit_outs = torch._C._jit_interpret_graph(graph, example_inputs)
onnx_proto = _jit_graph_to_onnx_model(
graph, torch.onnx.OperatorExportTypes.ONNX, self.opset_version
)
ort_sess = onnxruntime.InferenceSession(
onnx_proto, providers=self.ort_providers
)
ort_outs = verification._run_ort(ort_sess, example_inputs)
verification._compare_ort_pytorch_outputs(
ort_outs,
jit_outs,
rtol=1e-3,
atol=1e-7,
check_shape=self.check_shape,
check_dtype=self.check_dtype,
acceptable_error_percentage=None,
)
def test_example_ir(self):
graph_ir = """
graph(%1 : Float(2, 3),
%2 : Float(2, 3)):
%3 : int = prim::Constant[value=1]()
%4 : Float(2, 3) = aten::add(%1, %2, %3)
return (%4)
"""
a = torch.randn(2, 3)
b = torch.randn(2, 3)
self.run_test(graph_ir, (a, b))
def test_add_sub_with_graph_inputs(self):
for op in ["add", "sub", "rsub"]:
graph_ir = f"""
graph(%1 : Float(2, 3),
%2 : Float(2, 3),
%3 : int):
%4 : Float(2, 3) = aten::{op}(%1, %2, %3)
return (%4)
"""
a = torch.randn(2, 3)
b = torch.randn(2, 3)
self.run_test(graph_ir, (a, b, 2))
def test_native_layer_norm(self):
graph_ir = """
graph(%x : Float(2, 3, 2),
%w : Float(3, 2),
%b : Float(3, 2)):
%5 : int = prim::Constant[value=3]()
%6 : int = prim::Constant[value=2]()
%7 : int[] = prim::ListConstruct(%5, %6)
%10 : float = prim::Constant[value=1.0000000000000001e-05]()
%11 : Float(2, 3, 2), %12 : Float(2, 1, 1), %13 : Float(2, 1, 1) = aten::native_layer_norm(%x, %7, %w, %b, %10)
return (%11, %12, %13)
"""
x = torch.randn(2, 3, 2)
w = torch.randn(3, 2)
b = torch.randn(3, 2)
self.run_test(graph_ir, (x, w, b))
def test_convolution(self):
graph_ir = """
graph(%1 : Tensor,
%2 : Tensor):
%3 : NoneType = prim::Constant()
%4 : int[] = prim::Constant[value=[1, 1]]()
%5 : int[] = prim::Constant[value=[0, 0]]()
%6 : bool = prim::Constant[value=0]()
%7 : int = prim::Constant[value=1]()
%8 : Tensor = aten::convolution(%1, %2, %3, %4, %5, %4, %6, %5, %7)
return (%8)
"""
x = torch.randn(8, 1, 5, 5)
w = torch.randn(4, 1, 3, 3)
self.run_test(graph_ir, (x, w))
def test_log_softmax(self):
graph_ir = """
graph(%x: Tensor):
%half_to_float: bool = prim::Constant[value=0]()
%dim: int = prim::Constant[value=1]()
%y = aten::_log_softmax(%x, %dim, %half_to_float)
return (%y)
"""
x = torch.randn(5, 2)
self.run_test(graph_ir, (x,))
@skipIfNoCuda
def test_log_softmax_half_to_float(self):
graph_ir = """
graph(%x: Tensor):
%half_to_float: bool = prim::Constant[value=1]()
%dim: int = prim::Constant[value=1]()
%y = aten::_log_softmax(%x, %dim, %half_to_float)
return (%y)
"""
x = torch.randn(5, 2).half().to("cuda")
self.run_test(graph_ir, (x,))
def test_native_dropout(self):
graph_ir = """
graph(%1 : Float(2, 3)):
%2 : float = prim::Constant[value=0.0]()
%training : bool = prim::Constant[value=1]()
%3 : Tensor, %4 : Tensor = aten::native_dropout(%1, %2, %training)
return (%3, %4)
"""
a = torch.randn(2, 3)
self.run_test(graph_ir, (a,))
def MakeTestCase(opset_version: int) -> type:
name = f"TestJITIRToONNX_opset{opset_version}"
return type(
str(name),
(common_utils.TestCase,),
dict(_TestJITIRToONNX.__dict__, opset_version=opset_version),
)
TestJITIRToONNX_opset14 = MakeTestCase(14)
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_pytorch_jit_onnx.py
|
# Owner(s): ["module: onnx"]
"""Tests for onnx export that don't run the exported model."""
import contextlib
import io
import itertools
import unittest
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Type, Union
import onnx
import onnx.numpy_helper
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.onnx import symbolic_helper, symbolic_registry, utils
from torch.onnx._globals import GLOBALS
from torch.testing._internal import common_utils
def export_to_onnx(
model: Union[torch.nn.Module, torch.jit.ScriptFunction],
input: Tuple[torch.Tensor],
custom_ops: Optional[
Iterable[
Union[contextlib.AbstractContextManager, contextlib.ContextDecorator],
]
] = None,
mocks: Optional[Iterable] = None,
operator_export_type: torch.onnx.OperatorExportTypes = torch.onnx.OperatorExportTypes.ONNX,
opset_version: int = GLOBALS.export_onnx_opset_version,
) -> onnx.ModelProto:
"""Exports `model(input)` to ONNX and returns it.
Custom operators and/or unittest patches can be used help reproducing specific behaviors.
Args:
model: model to export
input: model input with same format as `torch.onnx.export(..,args,...)`
custom_ops: list of custom operators to use during export
mocks: list of mocks to use during export
operator_export_type: export type as described by `torch.onnx.export(...operator_export_type,...)`
opset_version: ONNX opset version as described by `torch.onnx.export(...opset_version,...)`
Returns:
A valid ONNX model (`onnx.ModelProto`)
"""
custom_ops = custom_ops or []
mocks = mocks or []
with contextlib.ExitStack() as stack:
for ctx in itertools.chain(custom_ops, mocks):
stack.enter_context(ctx)
f = io.BytesIO()
torch.onnx.export(
model,
input,
f,
operator_export_type=operator_export_type,
opset_version=opset_version,
)
# Validate ONNX graph before returning it
onnx_model = onnx.load_from_string(f.getvalue())
onnx.checker.check_model(onnx_model)
return onnx_model
@common_utils.instantiate_parametrized_tests
class TestOptionalOutput(common_utils.TestCase):
# TODO: Move these tests to test_pytorch_onnx_onnxruntime once
# ONNX Runtime 1.11 is released and supports opset 16.
class IfNoneInput(torch.nn.Module):
def forward(self, x) -> Optional[Tensor]:
y: Optional[Tensor] = None
if x.size(0) > 1:
y = x
return y
class IfNoneOutput(torch.nn.Module):
def forward(self, x) -> Optional[Tensor]:
y: Optional[Tensor] = x
if x.size(0) > 1:
y = None
return y
class LoopNoneInput(torch.nn.Module):
def forward(self, x) -> Optional[Tensor]:
y: Optional[Tensor] = None
for _ in range(x.size(0)):
y = x
return y
class LoopNoneOutput(torch.nn.Module):
def forward(self, x) -> Optional[Tensor]:
y: Optional[Tensor] = x
for _ in range(x.size(0)):
y = None
return y
@common_utils.parametrize(
"module_class",
(IfNoneInput, IfNoneOutput, LoopNoneInput, LoopNoneOutput),
name_fn=lambda module_class: module_class.__name__,
)
@common_utils.parametrize("x_size", (0, 1), name_fn=lambda x_size: str(x_size))
def test_optional_output(self, module_class: Type[torch.nn.Module], x_size: int):
# Need scripting to preserve control flow for this test to be
# meaningful.
model = torch.jit.script(module_class())
f = io.BytesIO()
x = torch.ones(x_size)
dynamic_axis_name = "condition"
torch.onnx.export(
model,
(x,),
f,
opset_version=15,
# Ensure condition is not constant
dynamic_axes={"x": {0: dynamic_axis_name}},
input_names=["x"],
)
exported = onnx.load_from_string(f.getvalue())
expected_elem_type = torch.onnx.JitScalarType.from_dtype(x.dtype).onnx_type()
expected_output_type = onnx.helper.make_optional_type_proto(
onnx.helper.make_tensor_type_proto(expected_elem_type, (dynamic_axis_name,))
)
self.assertEqual(expected_output_type, exported.graph.output[0].type)
for node in exported.graph.node:
# Both branches output types should match.
if node.op_type == "If":
for attr in node.attribute:
if attr.name in ("then_branch", "else_branch"):
self.assertEqual(expected_output_type, attr.g.output[0].type)
class TestONNXExport(common_utils.TestCase):
def test_fuse_addmm(self):
class AddmmModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x
x = torch.ones(3, 3)
f = io.BytesIO()
torch.onnx._export(AddmmModel(), x, f, verbose=False)
def test_onnx_transpose_incomplete_tensor_type(self):
# Smoke test to get us into the state where we are attempting to export
# a transpose op, where the input is a TensorType without size information.
# This would previously not work, since we would
# take the size of the input and use the length of its sizes as the
# number of dimensions in the permutation.
class Foo(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.contiguous().transpose(0, 1).sum()
class TraceMe(torch.nn.Module):
def __init__(self):
super(TraceMe, self).__init__()
self.foo = Foo()
def forward(self, x):
return self.foo(x)
tm = TraceMe()
tm = torch.jit.trace(tm, torch.rand(3, 4))
f = io.BytesIO()
torch.onnx.export(tm, (torch.rand(3, 4),), f)
def test_export_tensoroption_to(self):
def foo(x):
return x[0].clone().detach().cpu() + x
traced = torch.jit.trace(foo, (torch.rand([2])))
torch.onnx.export_to_pretty_string(traced, (torch.rand([2]),))
def test_onnx_export_script_module(self):
class ModuleToExport(torch.jit.ScriptModule):
def __init__(self):
super(ModuleToExport, self).__init__()
@torch.jit.script_method
def forward(self, x):
y = x - x
return x + x
mte = ModuleToExport()
torch.onnx.export_to_pretty_string(mte, (torch.zeros(1, 2, 3),), verbose=False)
@common_utils.suppress_warnings
def test_onnx_export_func_with_warnings(self):
@torch.jit.script
def func_with_warning(inp):
return torch.nn.functional.sigmoid(inp) # triggers a deprecation warning
class WarningTest(torch.nn.Module):
def __init__(self):
super(WarningTest, self).__init__()
def forward(self, x):
return func_with_warning(x)
# no exception
torch.onnx.export_to_pretty_string(
WarningTest(), torch.randn(42), verbose=False
)
def test_onnx_export_script_python_fail(self):
class PythonModule(torch.jit.ScriptModule):
def __init__(self):
super(PythonModule, self).__init__()
@torch.jit.ignore
def forward(self, x):
return torch.neg(x)
class ModuleToExport(torch.jit.ScriptModule):
def __init__(self):
super(ModuleToExport, self).__init__()
self.mod = PythonModule()
@torch.jit.script_method
def forward(self, x):
y = self.mod(x)
return y + y
mte = ModuleToExport()
f = io.BytesIO()
with self.assertRaisesRegex(RuntimeError, "Couldn't export Python"):
torch.onnx._export(mte, (torch.zeros(1, 2, 3),), f, verbose=False)
def test_onnx_export_script_inline_trace(self):
class ModuleToInline(torch.nn.Module):
def __init__(self):
super(ModuleToInline, self).__init__()
def forward(self, x):
return torch.neg(x)
class ModuleToExport(torch.jit.ScriptModule):
def __init__(self):
super(ModuleToExport, self).__init__()
self.mod = torch.jit.trace(ModuleToInline(), torch.zeros(1, 2, 3))
@torch.jit.script_method
def forward(self, x):
y = self.mod(x)
return y + y
mte = ModuleToExport()
torch.onnx.export_to_pretty_string(mte, (torch.zeros(1, 2, 3),), verbose=False)
def test_onnx_export_script_inline_script(self):
class ModuleToInline(torch.jit.ScriptModule):
def __init__(self):
super(ModuleToInline, self).__init__()
@torch.jit.script_method
def forward(self, x):
return torch.neg(x)
class ModuleToExport(torch.jit.ScriptModule):
def __init__(self):
super(ModuleToExport, self).__init__()
self.mod = ModuleToInline()
@torch.jit.script_method
def forward(self, x):
y = self.mod(x)
return y + y
mte = ModuleToExport()
torch.onnx.export_to_pretty_string(mte, (torch.zeros(1, 2, 3),), verbose=False)
def test_onnx_export_script_module_loop(self):
class ModuleToExport(torch.jit.ScriptModule):
def __init__(self):
super(ModuleToExport, self).__init__()
@torch.jit.script_method
def forward(self, x):
# test if we support end to end onnx export on loop and
# nested loops with and without loop index
for _ in range(5):
for i in range(3):
x = x + i
return x
mte = ModuleToExport()
torch.onnx.export_to_pretty_string(mte, (torch.zeros(1, 2, 3),), verbose=False)
@common_utils.suppress_warnings
def test_onnx_export_script_truediv(self):
class ModuleToExport(torch.jit.ScriptModule):
def __init__(self):
super(ModuleToExport, self).__init__()
@torch.jit.script_method
def forward(self, x):
z = x.size(0) / 2
return x + z
mte = ModuleToExport()
torch.onnx.export_to_pretty_string(
mte, (torch.zeros(1, 2, 3, dtype=torch.float),), verbose=False
)
def test_onnx_export_script_non_alpha_add_sub(self):
class ModuleToExport(torch.jit.ScriptModule):
def __init__(self):
super(ModuleToExport, self).__init__()
@torch.jit.script_method
def forward(self, x):
bs = x.size(0) + 1
return bs - 1
mte = ModuleToExport()
torch.onnx.export_to_pretty_string(mte, (torch.rand(3, 4),), verbose=False)
def test_onnx_export_script_module_if(self):
class ModuleToExport(torch.jit.ScriptModule):
def __init__(self):
super(ModuleToExport, self).__init__()
@torch.jit.script_method
def forward(self, x):
if bool(torch.sum(x) > 0):
x = torch.neg(x)
return x
mte = ModuleToExport()
torch.onnx.export_to_pretty_string(mte, (torch.zeros(1, 2, 3),), verbose=False)
def test_onnx_export_script_inline_params(self):
class ModuleToInline(torch.jit.ScriptModule):
def __init__(self):
super(ModuleToInline, self).__init__()
self.m = torch.nn.Parameter(torch.ones(3, 3))
self.unused = torch.nn.Parameter(torch.ones(1, 2, 3))
@torch.jit.script_method
def forward(self, x):
return torch.mm(x, self.m)
class ModuleToExport(torch.jit.ScriptModule):
def __init__(self):
super(ModuleToExport, self).__init__()
self.mod = ModuleToInline()
self.param = torch.nn.Parameter(torch.ones(3, 4))
@torch.jit.script_method
def forward(self, x):
y = self.mod(x)
return torch.mm(y, self.param)
mte = ModuleToExport()
result = mte(torch.zeros(2, 3))
reference = torch.mm(
torch.mm(torch.zeros(2, 3), torch.ones(3, 3)), torch.ones(3, 4)
)
self.assertEqual(result, reference)
torch.onnx.export_to_pretty_string(mte, (torch.ones(2, 3),), verbose=False)
def test_onnx_export_speculate(self):
class Foo(torch.jit.ScriptModule):
def __init__(self, m):
super(Foo, self).__init__()
self.m = m
@torch.jit.script_method
def forward(self, x):
x += x
# because we are testing if we emit `if` statement correctly
# we cannot use `True` as the condition. Constant prop
# would remove the `if` statements.
c = torch.sum(x) > 4
if bool(c):
if bool(c):
y = self.m(x)
else:
y = self.m(x)
else:
y = self.m(x)
return y
linear = torch.jit.trace(
torch.nn.Linear(10, 20).float(), torch.zeros(1, 10, dtype=torch.float)
)
@torch.jit.script
def transpose(x):
return x.t()
f1 = Foo(transpose)
f2 = Foo(linear)
torch.onnx.export_to_pretty_string(f1, (torch.ones(1, 10, dtype=torch.float),))
torch.onnx.export_to_pretty_string(f2, (torch.ones(1, 10, dtype=torch.float),))
def test_onnx_export_shape_reshape(self):
class Foo(torch.nn.Module):
def forward(self, x):
import torch.onnx.operators
x = x.repeat(5, 1, 1)
shape = torch.onnx.operators.shape_as_tensor(x)
reshaped = torch.onnx.operators.reshape_from_tensor_shape(x, shape)
return reshaped
foo = torch.jit.trace(Foo(), torch.zeros(1, 2, 3))
torch.onnx.export_to_pretty_string(foo, (torch.zeros(1, 2, 3)))
def test_listconstruct_erasure(self):
class FooMod(torch.nn.Module):
def forward(self, x):
mask = x < 0.0
return x[mask]
torch.onnx.export_to_pretty_string(
FooMod(),
(torch.rand(3, 4),),
add_node_names=False,
do_constant_folding=False,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
def test_export_dynamic_slice(self):
class DynamicSliceExportMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
retval = x[0]
for i in range(x.size(1)):
retval += torch.sum(x[0:i], dim=0)
return retval
mod = DynamicSliceExportMod()
input = torch.rand(3, 4, 5)
torch.onnx.export_to_pretty_string(
DynamicSliceExportMod(), (input,), opset_version=10
)
def test_export_dict(self):
class DictModule(torch.nn.Module):
def forward(self, x_in: torch.Tensor) -> Dict[str, torch.Tensor]:
return {"test_key_out": x_in}
x_in = torch.tensor(1)
mod = DictModule()
mod.train(False)
torch.onnx.export_to_pretty_string(mod, (x_in,))
with self.assertRaisesRegex(RuntimeError, r"DictConstruct.+is not supported."):
torch.onnx.export_to_pretty_string(torch.jit.script(mod), (x_in,))
def test_source_range_propagation(self):
class ExpandingModule(torch.nn.Module):
def __init__(self):
super().__init__()
# Will be expanded during ONNX export
self.ln = torch.nn.LayerNorm([1])
def forward(self, input):
return self.ln(input)
mod = ExpandingModule()
graph, _, _ = utils._model_to_graph(
mod,
(torch.zeros(1),),
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
)
# Ensure that every node in the graph has a valid source range
for node in graph.nodes():
self.assertTrue(node.sourceRange())
@common_utils.skipIfCaffe2
def test_clip_aten_fallback_due_exception(self):
def bad_clamp(g, self, min, max):
return symbolic_helper._onnx_unsupported("Bad boy!")
class MyClip(torch.nn.Module):
def forward(self, x):
return torch.clamp(x, min=-0.5, max=0.5)
onnx_model = export_to_onnx(
MyClip(),
torch.randn(3, 4, requires_grad=True),
custom_ops=[common_utils.custom_op("aten::clamp", bad_clamp, 9)],
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
self.assertAtenOp(onnx_model, "clamp", "Tensor")
@common_utils.skipIfCaffe2
def test_clip_aten_fallback_explicit_request(self):
class MyClip(torch.nn.Module):
def forward(self, x):
return torch.clamp(x, min=-0.5, max=0.5)
def break_is_registered_op_api(opname, domain, version):
fake_missing_symbolics = ("clamp",)
if opname in fake_missing_symbolics:
return False
return (
(domain, version) in symbolic_registry._registry
and opname in symbolic_registry._registry[(domain, version)]
)
# Force missing symbolic for well-known op using a mock
onnx_model = export_to_onnx(
MyClip(),
torch.randn(3, 4, requires_grad=True),
mocks=[
unittest.mock.patch(
"torch.onnx.symbolic_registry.is_registered_op",
side_effect=break_is_registered_op_api,
)
],
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
self.assertAtenOp(onnx_model, "clamp", "Tensor")
def _helper_test_to_(self, cast_fn: Callable[[torch.Tensor], torch.Tensor]):
"""Helper to test aten::to(device) variants.
`cast_fn` is converted into a `torch.jit.script`. It wraps `aten::to`
during export to preventing the devices to be hard-coded.
Needed by detectron2 after https://github.com/facebookresearch/detectron2/pull/4132/
"""
cast_fn = torch.jit.script(cast_fn)
onnx_model = export_to_onnx(cast_fn, torch.zeros([1, 3, 32, 32]))
for n in onnx_model.graph.node:
self.assertNotEqual(n.op_type, "To")
self.assertNotEqual(n.op_type, "Cast")
def test_to__cpu_string(self):
def cast_cpu_string(src: torch.Tensor) -> torch.Tensor:
return src.to("cpu")
self._helper_test_to_(cast_cpu_string)
def test_to__device_cpu_string(self):
def cast_device_cpu_string(src: torch.Tensor) -> torch.Tensor:
return src.to(device="cpu")
self._helper_test_to_(cast_device_cpu_string)
def test_script_custom_class_error(self):
class BoxCoder:
def __init__(self, bbox_xform_clip: float) -> None:
self.bbox_xform_clip = bbox_xform_clip
def decode(self, rel_codes: Tensor, boxes: List[Tensor]) -> Tensor:
boxes = torch.cat(boxes, dim=0)
pred_ctr_x = (
torch.clamp(rel_codes[:, 0::4], max=self.bbox_xform_clip)
* boxes[:, 2]
)
return pred_ctr_x
class MyModule(torch.nn.Module):
__annotations__ = {
"box_coder": BoxCoder,
}
def __init__(self):
super().__init__()
self.box_coder = BoxCoder(1.4)
def forward(self, box_regression: Tensor, proposals: List[Tensor]):
return self.box_coder.decode(box_regression, proposals)
model = torch.jit.script(MyModule())
box_regression = torch.randn([4, 4])
proposal = [torch.randn(2, 4), torch.randn(2, 4)]
with self.assertRaises(RuntimeError) as cm:
onnx_model = io.BytesIO()
torch.onnx.export(
model,
(box_regression, proposal),
onnx_model,
)
def test_initializer_sequence(self):
class MyModule(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super().__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
test_model = MyModule(3, 4, 10)
state_dict_list = [k for (k, v) in test_model.state_dict().items()]
named_params_list = [k for (k, v) in test_model.named_parameters()]
x = torch.randn(32, 3)
f = io.BytesIO()
torch.onnx._export(test_model, (x,), f, do_constant_folding=False)
loaded_model = onnx.load_from_string(f.getvalue())
actual_list = [p.name for p in loaded_model.graph.initializer]
assert actual_list == state_dict_list, (
"Initializers' sequence is not as same as state_dict(). Expected: ("
+ ", ".join(state_dict_list)
+ "). Actual:("
+ ", ".join(actual_list)
+ ")."
)
assert actual_list == named_params_list, (
"Initializers' sequence is not as same as named_parameters(). Expected: ("
+ ", ".join(named_params_list)
+ "). Actual:("
+ ", ".join(actual_list)
+ ")."
)
def test_initializer_sequence_script_model(self):
def list_is_expected(short_list, long_list) -> bool:
if len(short_list) > len(long_list):
return False
for i in range(len(short_list)):
if short_list[i] not in long_list[i]:
return False
return True
def loop(x, y):
for i in range(int(y)):
x = x + i
return x
class MyModule(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super().__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, x, y):
x = loop(x, y)
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
test_model = torch.jit.script(MyModule(3, 4, 10))
state_dict_list = [k for (k, v) in test_model.state_dict().items()]
named_params_list = [k for (k, v) in test_model.named_parameters()]
x = torch.ones(2, 3, dtype=torch.float)
y = torch.tensor(5, dtype=torch.long)
f = io.BytesIO()
torch.onnx.export(test_model, (x, y), f, do_constant_folding=False)
loaded_model = onnx.load_from_string(f.getvalue())
actual_list = [p.name for p in loaded_model.graph.initializer]
assert list_is_expected(state_dict_list, actual_list), (
"ScriptModel - Initializers' sequence is not as same as state_dict(). Expected: ("
+ ", ".join(state_dict_list)
+ "). Actual:("
+ ", ".join(actual_list)
+ ")."
)
assert list_is_expected(named_params_list, actual_list), (
"ScriptModel - Initializers' sequence is not as same as named_parameters(). Expected: ("
+ ", ".join(named_params_list)
+ "). Actual:("
+ ", ".join(actual_list)
+ ")."
)
def test_onnx_checker_invalid_graph(self):
class CustomAddModule(torch.nn.Module):
def forward(self, x, y):
return torch.add(x, y)
def symbolic_custom_invalid_add(g, input, other, alpha=None):
return g.op("Add", input, other, invalid_attr_i=1)
torch.onnx.register_custom_op_symbolic("::add", symbolic_custom_invalid_add, 1)
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
test_model = CustomAddModule()
f = io.BytesIO()
try:
with self.assertRaises(torch.onnx.errors.CheckerError):
torch.onnx.export(test_model, (x, y), f)
finally:
torch.onnx.unregister_custom_op_symbolic("::add", 1)
self.assertTrue(f.getvalue(), "ONNX graph was not exported.")
loaded_model = onnx.load_from_string(f.getvalue())
def test_shape_value_map(self):
class RSoftMax(torch.nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
return x
radix = 2
cardinality = 1
x = torch.randn(10, 1, 128, 1)
f = io.BytesIO()
torch.onnx.export(
RSoftMax(radix, cardinality),
(x,),
f,
input_names=["x"],
dynamic_axes={"x": [0]},
)
loaded_model = onnx.load_from_string(f.getvalue())
self.assertEqual(
loaded_model.graph.output[0].type.tensor_type.shape.dim[1].dim_value, 128
)
def test_onnx_proto_checker(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return 2 * x
x = torch.randn(1, 2, 3, requires_grad=True)
f = io.BytesIO()
torch.onnx.export(Model(), x, f)
model = onnx.load(f)
model.ir_version = 0
def check_proto():
torch._C._check_onnx_proto(model.SerializeToString())
self.assertRaises(RuntimeError, check_proto)
def test_maintain_dynamic_shapes_of_unreliable_nodes(self):
def symbolic_pythonop(ctx: torch.onnx.SymbolicContext, g, *args, **kwargs):
return g.op("com.microsoft::PythonOp")
torch.onnx.register_custom_op_symbolic("prim::PythonOp", symbolic_pythonop, 1)
self.addCleanup(torch.onnx.unregister_custom_op_symbolic, "prim::PythonOp", 1)
# necessay parameters for transformer embeddings
hidden_size = 48
max_position_embeddings = 32
batch_size = 2
# issue found that autograd.function making downstream
# node unreliable but with static shape. The issue was first
# discovered with using Apex FusedLayerNorm in Transformers
class CustomLayerNorm(torch.autograd.Function):
@staticmethod
def forward(ctx, embedding):
layer_norm = torch.nn.LayerNorm(hidden_size, eps=1e-12)
return layer_norm(embedding)
class EmbeddingModule(torch.nn.Module):
def forward(
self,
embeddings=None,
):
embedding_output = CustomLayerNorm.apply(embeddings)
query = embedding_output.transpose(0, 1)
target_len, batch_size, embedding_dim = query.size()
# Reshape is used for consuming batch_size, and if it is static,
# this will be a Constant node in the graph
query = query.reshape(target_len, batch_size, embedding_dim)
return query
embeddings = torch.randn(batch_size, max_position_embeddings, hidden_size)
f = io.BytesIO()
torch.onnx.export(
EmbeddingModule().eval(),
(embeddings,),
f,
input_names=["embeddings"],
dynamic_axes={
"embeddings": {
0: "batch_size",
1: "max_position_embeddings",
2: "hidden_size",
}
},
custom_opsets={"com.microsoft": 1},
)
model = onnx.load(io.BytesIO(f.getvalue()))
# If there is a constant node with dim=3 and max_position_embeddings,
# batch_size, hidden_size as shape, it means the shape becomes static.
# Normally, with dynamic batch size, this constant node should not exist.
const_node = [n for n in model.graph.node if n.op_type == "Constant"]
self.assertNotEqual(len(const_node), 0)
for node in const_node:
for a in node.attribute:
if a.name == "value":
shape = onnx.numpy_helper.to_array(a.t)
self.assertNotEqual(
shape.tolist(),
[max_position_embeddings, batch_size, hidden_size],
)
def test_is_fp_for_C_TypeList(self):
class M(torch.nn.Module):
def forward(self, x):
x = x.squeeze(1)
w = x.shape[2]
pos = x.view(2, -1).argmax(1)
x_int = pos % w
y_int = (pos - x_int) // w
return y_int, x_int
model = torch.jit.script(M())
inputs = torch.randn(2, 4, 6)
f = io.BytesIO()
torch.onnx.export(
model, inputs, f, dynamic_axes={"x": [0, 1]}, input_names=["x"]
)
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_pytorch_onnx_no_runtime.py
|
# Owner(s): ["module: onnx"]
"""Unit tests on `torch.onnx.symbolic_helper`."""
import torch
from torch.onnx import symbolic_helper
from torch.onnx._globals import GLOBALS
from torch.testing._internal import common_utils
class TestHelperFunctions(common_utils.TestCase):
def setUp(self):
super().setUp()
self._initial_training_mode = GLOBALS.training_mode
def tearDown(self):
GLOBALS.training_mode = self._initial_training_mode
@common_utils.parametrize(
"op_train_mode,export_mode",
[
common_utils.subtest(
[1, torch.onnx.TrainingMode.PRESERVE], name="export_mode_is_preserve"
),
common_utils.subtest(
[0, torch.onnx.TrainingMode.EVAL],
name="modes_match_op_train_mode_0_export_mode_eval",
),
common_utils.subtest(
[1, torch.onnx.TrainingMode.TRAINING],
name="modes_match_op_train_mode_1_export_mode_training",
),
],
)
def test_check_training_mode_does_not_warn_when(
self, op_train_mode: int, export_mode: torch.onnx.TrainingMode
):
GLOBALS.training_mode = export_mode
self.assertNotWarn(
lambda: symbolic_helper.check_training_mode(op_train_mode, "testop")
)
@common_utils.parametrize(
"op_train_mode,export_mode",
[
common_utils.subtest(
[0, torch.onnx.TrainingMode.TRAINING],
name="modes_do_not_match_op_train_mode_0_export_mode_training",
),
common_utils.subtest(
[1, torch.onnx.TrainingMode.EVAL],
name="modes_do_not_match_op_train_mode_1_export_mode_eval",
),
],
)
def test_check_training_mode_warns_when(
self,
op_train_mode: int,
export_mode: torch.onnx.TrainingMode,
):
with self.assertWarnsRegex(
UserWarning, f"ONNX export mode is set to {export_mode}"
):
GLOBALS.training_mode = export_mode
symbolic_helper.check_training_mode(op_train_mode, "testop")
common_utils.instantiate_parametrized_tests(TestHelperFunctions)
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_symbolic_helper.py
|
# Owner(s): ["module: onnx"]
import torch
# Autograd funtion that is a replica of the autograd funtion in
# test_utility_funs.py (test_autograd_module_name)
class CustomFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input
|
pytorch-master
|
test/onnx/autograd_helper.py
|
# Owner(s): ["module: onnx"]
# Some standard imports
import unittest
import numpy as np
import torch.nn.init as init
import torch.onnx
from caffe2.python.core import workspace
from caffe2.python.model_helper import ModelHelper
from pytorch_helper import PyTorchModule
from torch import nn
from torch.testing._internal import common_utils
from torch.testing._internal.common_utils import skipIfNoLapack
class TestCaffe2Backend(common_utils.TestCase):
@skipIfNoLapack
@unittest.skip("test broken because Lapack was always missing.")
def test_helper(self):
class SuperResolutionNet(nn.Module):
def __init__(self, upscale_factor, inplace=False):
super().__init__()
self.relu = nn.ReLU(inplace=inplace)
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, upscale_factor**2, (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal(self.conv1.weight, init.calculate_gain("relu"))
init.orthogonal(self.conv2.weight, init.calculate_gain("relu"))
init.orthogonal(self.conv3.weight, init.calculate_gain("relu"))
init.orthogonal(self.conv4.weight)
torch_model = SuperResolutionNet(upscale_factor=3)
fake_input = torch.randn(1, 1, 224, 224, requires_grad=True)
# use ModelHelper to create a C2 net
helper = ModelHelper(name="test_model")
start = helper.Sigmoid(["the_input"])
# Embed the ONNX-converted pytorch net inside it
(toutput,) = PyTorchModule(helper, torch_model, (fake_input,), [start])
output = helper.Sigmoid(toutput)
workspace.RunNetOnce(helper.InitProto())
workspace.FeedBlob("the_input", fake_input.data.numpy())
# print([ k for k in workspace.blobs ])
workspace.RunNetOnce(helper.Proto())
c2_out = workspace.FetchBlob(str(output))
torch_out = torch.sigmoid(torch_model(torch.sigmoid(fake_input)))
np.testing.assert_almost_equal(torch_out.data.cpu().numpy(), c2_out, decimal=3)
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/test_pytorch_helper.py
|
# Owner(s): ["module: onnx"]
import glob
import os
import caffe2.python.onnx.backend as c2
import numpy as np
import onnx.backend.test
from onnx import numpy_helper
def load_tensor_as_numpy_array(f):
tensor = onnx.TensorProto()
with open(f, "rb") as file:
tensor.ParseFromString(file.read())
return tensor
def assert_similar(ref, real):
np.testing.assert_equal(len(ref), len(real))
for i in range(len(ref)):
np.testing.assert_allclose(ref[i], real[i], rtol=1e-3)
def run_generated_test(model_file, data_dir, device="CPU"):
model = onnx.load(model_file)
input_num = len(glob.glob(os.path.join(data_dir, "input_*.pb")))
inputs = []
for i in range(input_num):
inputs.append(
numpy_helper.to_array(
load_tensor_as_numpy_array(os.path.join(data_dir, f"input_{i}.pb"))
)
)
output_num = len(glob.glob(os.path.join(data_dir, "output_*.pb")))
outputs = []
for i in range(output_num):
outputs.append(
numpy_helper.to_array(
load_tensor_as_numpy_array(os.path.join(data_dir, f"output_{i}.pb"))
)
)
prepared = c2.prepare(model, device=device)
c2_outputs = prepared.run(inputs)
assert_similar(outputs, c2_outputs)
|
pytorch-master
|
test/onnx/test_caffe2_common.py
|
# Owner(s): ["module: onnx"]
import numpy as np
import torch
from torch.onnx import _experimental, verification
from torch.testing._internal import common_utils
class TestVerification(common_utils.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
def test_check_export_model_diff_returns_diff_when_constant_mismatch(self):
class UnexportableModel(torch.nn.Module):
def forward(self, x, y):
# tensor.data() will be exported as a constant,
# leading to wrong model output under different inputs.
return x + y.data
test_input_groups = [
((torch.randn(2, 3), torch.randn(2, 3)), {}),
((torch.randn(2, 3), torch.randn(2, 3)), {}),
]
results = verification.check_export_model_diff(
UnexportableModel(), test_input_groups
)
self.assertRegex(
results,
r"Graph diff:(.|\n)*"
r"First diverging operator:(.|\n)*"
r"prim::Constant(.|\n)*"
r"Former source location:(.|\n)*"
r"Latter source location:",
)
def test_check_export_model_diff_returns_diff_when_dynamic_controlflow_mismatch(
self,
):
class UnexportableModel(torch.nn.Module):
def forward(self, x, y):
for i in range(x.size(0)):
y = x[i] + y
return y
test_input_groups = [
((torch.randn(2, 3), torch.randn(2, 3)), {}),
((torch.randn(4, 3), torch.randn(2, 3)), {}),
]
export_options = _experimental.ExportOptions(
input_names=["x", "y"], dynamic_axes={"x": [0]}
)
results = verification.check_export_model_diff(
UnexportableModel(), test_input_groups, export_options
)
self.assertRegex(
results,
r"Graph diff:(.|\n)*"
r"First diverging operator:(.|\n)*"
r"prim::Constant(.|\n)*"
r"Latter source location:(.|\n)*",
)
def test_check_export_model_diff_returns_empty_when_correct_export(self):
class SupportedModel(torch.nn.Module):
def forward(self, x, y):
return x + y
test_input_groups = [
((torch.randn(2, 3), torch.randn(2, 3)), {}),
((torch.randn(2, 3), torch.randn(2, 3)), {}),
]
results = verification.check_export_model_diff(
SupportedModel(), test_input_groups
)
self.assertEqual(results, "")
def test_compare_ort_pytorch_outputs_no_raise_with_acceptable_error_percentage(
self,
):
ort_outs = [np.array([[1.0, 2.0], [3.0, 4.0]])]
pytorch_outs = [torch.tensor([[1.0, 2.0], [3.0, 1.0]])]
verification._compare_ort_pytorch_outputs(
ort_outs,
pytorch_outs,
rtol=1e-5,
atol=1e-6,
check_shape=True,
check_dtype=False,
acceptable_error_percentage=0.3,
)
def test_compare_ort_pytorch_outputs_raise_without_acceptable_error_percentage(
self,
):
ort_outs = [np.array([[1.0, 2.0], [3.0, 4.0]])]
pytorch_outs = [torch.tensor([[1.0, 2.0], [3.0, 1.0]])]
with self.assertRaises(AssertionError):
verification._compare_ort_pytorch_outputs(
ort_outs,
pytorch_outs,
rtol=1e-5,
atol=1e-6,
check_shape=True,
check_dtype=False,
acceptable_error_percentage=None,
)
|
pytorch-master
|
test/onnx/test_verification.py
|
import torch.nn as nn
class EmbeddingNetwork1(nn.Module):
def __init__(self, dim=5):
super().__init__()
self.emb = nn.Embedding(10, dim)
self.lin1 = nn.Linear(dim, 1)
self.seq = nn.Sequential(
self.emb,
self.lin1,
)
def forward(self, input):
return self.seq(input)
class EmbeddingNetwork2(nn.Module):
def __init__(self, in_space=10, dim=3):
super().__init__()
self.embedding = nn.Embedding(in_space, dim)
self.seq = nn.Sequential(self.embedding, nn.Linear(dim, 1), nn.Sigmoid())
def forward(self, indices):
return self.seq(indices)
|
pytorch-master
|
test/onnx/model_defs/emb_seq.py
|
import torch.nn as nn
import torch.nn.init as init
class SuperResolutionNet(nn.Module):
def __init__(self, upscale_factor):
super().__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, upscale_factor**2, (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain("relu"))
init.orthogonal_(self.conv2.weight, init.calculate_gain("relu"))
init.orthogonal_(self.conv3.weight, init.calculate_gain("relu"))
init.orthogonal_(self.conv4.weight)
|
pytorch-master
|
test/onnx/model_defs/super_resolution.py
|
# The model is from here:
# https://github.com/pytorch/examples/blob/master/word_language_model/model.py
from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(
self,
rnn_type,
ntoken,
ninp,
nhid,
nlayers,
dropout=0.5,
tie_weights=False,
batchsize=2,
):
super().__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ["LSTM", "GRU"]:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {"RNN_TANH": "tanh", "RNN_RELU": "relu"}[rnn_type]
except KeyError:
raise ValueError(
"""An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']"""
) from None
self.rnn = nn.RNN(
ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout
)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError(
"When using the tied flag, nhid must be equal to emsize"
)
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
self.hidden = self.init_hidden(batchsize)
@staticmethod
def repackage_hidden(h):
"""Detach hidden states from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple([RNNModel.repackage_hidden(v) for v in h])
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(
output.view(output.size(0) * output.size(1), output.size(2))
)
self.hidden = RNNModel.repackage_hidden(hidden)
return decoded.view(output.size(0), output.size(1), decoded.size(1))
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == "LSTM":
return (
weight.new(self.nlayers, bsz, self.nhid).zero_(),
weight.new(self.nlayers, bsz, self.nhid).zero_(),
)
else:
return weight.new(self.nlayers, bsz, self.nhid).zero_()
class RNNModelWithTensorHidden(RNNModel):
"""Supports GRU scripting."""
@staticmethod
def repackage_hidden(h):
"""Detach hidden states from their history."""
return h.detach()
def forward(self, input: Tensor, hidden: Tensor):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(
output.view(output.size(0) * output.size(1), output.size(2))
)
self.hidden = RNNModelWithTensorHidden.repackage_hidden(hidden)
return decoded.view(output.size(0), output.size(1), decoded.size(1))
class RNNModelWithTupleHidden(RNNModel):
"""Supports LSTM scripting."""
@staticmethod
def repackage_hidden(h: Tuple[Tensor, Tensor]):
"""Detach hidden states from their history."""
return (h[0].detach(), h[1].detach())
def forward(self, input: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(
output.view(output.size(0) * output.size(1), output.size(2))
)
self.hidden = self.repackage_hidden(tuple(hidden))
return decoded.view(output.size(0), output.size(1), decoded.size(1))
|
pytorch-master
|
test/onnx/model_defs/word_language_model.py
|
import torch
import torch.nn as nn
import torch.nn.init as init
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes):
super().__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(
squeeze_planes, expand3x3_planes, kernel_size=3, padding=1
)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.squeeze_activation(self.squeeze(x))
return torch.cat(
[
self.expand1x1_activation(self.expand1x1(x)),
self.expand3x3_activation(self.expand3x3(x)),
],
1,
)
class SqueezeNet(nn.Module):
def __init__(self, version=1.0, num_classes=1000, ceil_mode=False):
super().__init__()
if version not in [1.0, 1.1]:
raise ValueError(
"Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version)
)
self.num_classes = num_classes
if version == 1.0:
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=ceil_mode),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=ceil_mode),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=ceil_mode),
Fire(512, 64, 256, 256),
)
else:
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=ceil_mode),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=ceil_mode),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=ceil_mode),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
# Final convolution is initialized differently from the rest
final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5), final_conv, nn.ReLU(inplace=True), nn.AvgPool2d(13)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m is final_conv:
init.normal_(m.weight.data, mean=0.0, std=0.01)
else:
init.kaiming_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x.view(x.size(0), self.num_classes)
|
pytorch-master
|
test/onnx/model_defs/squeezenet.py
|
from torch import nn
from torch.nn.utils.rnn import PackedSequence
class LstmFlatteningResult(nn.LSTM):
def forward(self, input, *fargs, **fkwargs):
output, (hidden, cell) = nn.LSTM.forward(self, input, *fargs, **fkwargs)
return output, hidden, cell
class LstmFlatteningResultWithSeqLength(nn.Module):
def __init__(self, input_size, hidden_size, layers, bidirect, dropout, batch_first):
super().__init__()
self.batch_first = batch_first
self.inner_model = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=layers,
bidirectional=bidirect,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input: PackedSequence, hx=None):
output, (hidden, cell) = self.inner_model.forward(input, hx)
return output, hidden, cell
class LstmFlatteningResultWithoutSeqLength(nn.Module):
def __init__(self, input_size, hidden_size, layers, bidirect, dropout, batch_first):
super().__init__()
self.batch_first = batch_first
self.inner_model = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=layers,
bidirectional=bidirect,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input, hx=None):
output, (hidden, cell) = self.inner_model.forward(input, hx)
return output, hidden, cell
|
pytorch-master
|
test/onnx/model_defs/lstm_flattening_result.py
|
from .op_test import * # noqa: F403
from .squeezenet import * # noqa: F403
from .srresnet import * # noqa: F403
from .super_resolution import * # noqa: F403
|
pytorch-master
|
test/onnx/model_defs/__init__.py
|
import torch
import torch.nn as nn
# configurable
bsz = 64
imgsz = 64
nz = 100
ngf = 64
ndf = 64
nc = 3
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class _netG(nn.Module):
def __init__(self, ngpu):
super().__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
if self.ngpu > 1 and isinstance(input.data, torch.cuda.FloatTensor):
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
class _netD(nn.Module):
def __init__(self, ngpu):
super().__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid(),
)
def forward(self, input):
if self.ngpu > 1 and isinstance(input.data, torch.cuda.FloatTensor):
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1)
|
pytorch-master
|
test/onnx/model_defs/dcgan.py
|
import math
from torch import nn
from torch.nn import init
def _initialize_orthogonal(conv):
prelu_gain = math.sqrt(2)
init.orthogonal(conv.weight, gain=prelu_gain)
if conv.bias is not None:
conv.bias.data.zero_()
class ResidualBlock(nn.Module):
def __init__(self, n_filters):
super().__init__()
self.conv1 = nn.Conv2d(
n_filters, n_filters, kernel_size=3, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(n_filters)
self.prelu = nn.PReLU(n_filters)
self.conv2 = nn.Conv2d(
n_filters, n_filters, kernel_size=3, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(n_filters)
# Orthogonal initialisation
_initialize_orthogonal(self.conv1)
_initialize_orthogonal(self.conv2)
def forward(self, x):
residual = self.prelu(self.bn1(self.conv1(x)))
residual = self.bn2(self.conv2(residual))
return x + residual
class UpscaleBlock(nn.Module):
def __init__(self, n_filters):
super().__init__()
self.upscaling_conv = nn.Conv2d(
n_filters, 4 * n_filters, kernel_size=3, padding=1
)
self.upscaling_shuffler = nn.PixelShuffle(2)
self.upscaling = nn.PReLU(n_filters)
_initialize_orthogonal(self.upscaling_conv)
def forward(self, x):
return self.upscaling(self.upscaling_shuffler(self.upscaling_conv(x)))
class SRResNet(nn.Module):
def __init__(self, rescale_factor, n_filters, n_blocks):
super().__init__()
self.rescale_levels = int(math.log(rescale_factor, 2))
self.n_filters = n_filters
self.n_blocks = n_blocks
self.conv1 = nn.Conv2d(3, n_filters, kernel_size=9, padding=4)
self.prelu1 = nn.PReLU(n_filters)
for residual_block_num in range(1, n_blocks + 1):
residual_block = ResidualBlock(self.n_filters)
self.add_module(
"residual_block" + str(residual_block_num),
nn.Sequential(residual_block),
)
self.skip_conv = nn.Conv2d(
n_filters, n_filters, kernel_size=3, padding=1, bias=False
)
self.skip_bn = nn.BatchNorm2d(n_filters)
for upscale_block_num in range(1, self.rescale_levels + 1):
upscale_block = UpscaleBlock(self.n_filters)
self.add_module(
"upscale_block" + str(upscale_block_num), nn.Sequential(upscale_block)
)
self.output_conv = nn.Conv2d(n_filters, 3, kernel_size=9, padding=4)
# Orthogonal initialisation
_initialize_orthogonal(self.conv1)
_initialize_orthogonal(self.skip_conv)
_initialize_orthogonal(self.output_conv)
def forward(self, x):
x_init = self.prelu1(self.conv1(x))
x = self.residual_block1(x_init)
for residual_block_num in range(2, self.n_blocks + 1):
x = getattr(self, "residual_block" + str(residual_block_num))(x)
x = self.skip_bn(self.skip_conv(x)) + x_init
for upscale_block_num in range(1, self.rescale_levels + 1):
x = getattr(self, "upscale_block" + str(upscale_block_num))(x)
return self.output_conv(x)
|
pytorch-master
|
test/onnx/model_defs/srresnet.py
|
# Owner(s): ["module: onnx"]
import torch
import torch.nn as nn
class DummyNet(nn.Module):
def __init__(self, num_classes=1000):
super().__init__()
self.features = nn.Sequential(
nn.LeakyReLU(0.02),
nn.BatchNorm2d(3),
nn.AvgPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False),
)
def forward(self, x):
output = self.features(x)
return output.view(-1, 1).squeeze(1)
class ConcatNet(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs):
return torch.cat(inputs, 1)
class PermuteNet(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input.permute(2, 3, 0, 1)
class PReluNet(nn.Module):
def __init__(self):
super().__init__()
self.features = nn.Sequential(
nn.PReLU(3),
)
def forward(self, x):
output = self.features(x)
return output
class FakeQuantNet(nn.Module):
def __init__(self):
super().__init__()
self.fake_quant = torch.ao.quantization.FakeQuantize()
self.fake_quant.disable_observer()
def forward(self, x):
output = self.fake_quant(x)
return output
|
pytorch-master
|
test/onnx/model_defs/op_test.py
|
import torch.nn as nn
import torch.nn.functional as F
class MNIST(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
|
pytorch-master
|
test/onnx/model_defs/mnist.py
|
from torch import nn
from torch.nn.utils import rnn as rnn_utils
class RnnModelWithPackedSequence(nn.Module):
def __init__(self, model, batch_first):
super().__init__()
self.model = model
self.batch_first = batch_first
def forward(self, input, *args):
args, seq_lengths = args[:-1], args[-1]
input = rnn_utils.pack_padded_sequence(input, seq_lengths, self.batch_first)
rets = self.model(input, *args)
ret, rets = rets[0], rets[1:]
ret, _ = rnn_utils.pad_packed_sequence(ret, self.batch_first)
return tuple([ret] + list(rets))
class RnnModelWithPackedSequenceWithoutState(nn.Module):
def __init__(self, model, batch_first):
super().__init__()
self.model = model
self.batch_first = batch_first
def forward(self, input, seq_lengths):
input = rnn_utils.pack_padded_sequence(input, seq_lengths, self.batch_first)
rets = self.model(input)
ret, rets = rets[0], rets[1:]
ret, _ = rnn_utils.pad_packed_sequence(ret, self.batch_first)
return list([ret] + list(rets))
class RnnModelWithPackedSequenceWithState(nn.Module):
def __init__(self, model, batch_first):
super().__init__()
self.model = model
self.batch_first = batch_first
def forward(self, input, hx, seq_lengths):
input = rnn_utils.pack_padded_sequence(input, seq_lengths, self.batch_first)
rets = self.model(input, hx)
ret, rets = rets[0], rets[1:]
ret, _ = rnn_utils.pad_packed_sequence(ret, self.batch_first)
return list([ret] + list(rets))
|
pytorch-master
|
test/onnx/model_defs/rnn_model_with_packed_sequence.py
|
# Owner(s): ["module: onnx"]
"""Tests for `torch.onnx.symbolic_opset9`."""
import torch
from torch import _C
from torch.onnx import symbolic_opset9 as opset9
from torch.testing._internal import common_utils
class TestPrim(common_utils.TestCase):
def setUp(self):
super().setUp()
self.graph = _C.Graph()
def test_list_unpack_returns_all_list_elements_when_previous_node_is_list_construct(
self,
):
# Build the graph
input_1 = self.graph.addInput()
input_1.setType(input_1.type().with_dtype(torch.float).with_sizes([2, 42]))
input_2 = self.graph.addInput()
input_2.setType(input_2.type().with_dtype(torch.float).with_sizes([3, 42]))
constructed_list = self.graph.op("prim::ListConstruct", input_1, input_2)
# Test the op
outputs = opset9.Prim.ListUnpack(self.graph, constructed_list)
self.assertNotEqual(outputs, None)
self.assertEqual(outputs[0], input_1)
self.assertEqual(outputs[1], input_2)
if __name__ == "__main__":
common_utils.run_tests()
|
pytorch-master
|
test/onnx/symbolic_opsets/test_symbolic_opset9.py
|
import argparse
import os.path
import sys
import torch
def get_custom_op_library_path():
if sys.platform.startswith("win32"):
library_filename = "custom_ops.dll"
elif sys.platform.startswith("darwin"):
library_filename = "libcustom_ops.dylib"
else:
library_filename = "libcustom_ops.so"
path = os.path.abspath("build/{}".format(library_filename))
assert os.path.exists(path), path
return path
class Model(torch.jit.ScriptModule):
def __init__(self):
super(Model, self).__init__()
self.p = torch.nn.Parameter(torch.eye(5))
@torch.jit.script_method
def forward(self, input):
return torch.ops.custom.op_with_defaults(input)[0] + 1
def main():
parser = argparse.ArgumentParser(
description="Serialize a script module with custom ops"
)
parser.add_argument("--export-script-module-to", required=True)
options = parser.parse_args()
torch.ops.load_library(get_custom_op_library_path())
model = Model()
model.save(options.export_script_module_to)
if __name__ == "__main__":
main()
|
pytorch-master
|
test/custom_operator/model.py
|
# Owner(s): ["module: unknown"]
import os.path
import tempfile
import torch
from torch import ops
from model import Model, get_custom_op_library_path
from torch.testing._internal.common_utils import TestCase, run_tests
class TestCustomOperators(TestCase):
def setUp(self):
self.library_path = get_custom_op_library_path()
ops.load_library(self.library_path)
def test_custom_library_is_loaded(self):
self.assertIn(self.library_path, ops.loaded_libraries)
def test_calling_custom_op_string(self):
output = ops.custom.op2("abc", "def")
self.assertLess(output, 0)
output = ops.custom.op2("abc", "abc")
self.assertEqual(output, 0)
def test_calling_custom_op(self):
output = ops.custom.op(torch.ones(5), 2.0, 3)
self.assertEqual(type(output), list)
self.assertEqual(len(output), 3)
for tensor in output:
self.assertTrue(tensor.allclose(torch.ones(5) * 2))
output = ops.custom.op_with_defaults(torch.ones(5))
self.assertEqual(type(output), list)
self.assertEqual(len(output), 1)
self.assertTrue(output[0].allclose(torch.ones(5)))
def test_calling_custom_op_with_autograd(self):
x = torch.randn((5, 5), requires_grad=True)
y = torch.randn((5, 5), requires_grad=True)
output = ops.custom.op_with_autograd(x, 2, y)
self.assertTrue(output.allclose(x + 2 * y + x * y))
go = torch.ones((), requires_grad=True)
output.sum().backward(go, False, True)
grad = torch.ones(5, 5)
self.assertEqual(x.grad, y + grad)
self.assertEqual(y.grad, x + grad * 2)
# Test with optional arg.
x.grad.zero_()
y.grad.zero_()
z = torch.randn((5, 5), requires_grad=True)
output = ops.custom.op_with_autograd(x, 2, y, z)
self.assertTrue(output.allclose(x + 2 * y + x * y + z))
go = torch.ones((), requires_grad=True)
output.sum().backward(go, False, True)
self.assertEqual(x.grad, y + grad)
self.assertEqual(y.grad, x + grad * 2)
self.assertEqual(z.grad, grad)
def test_calling_custom_op_with_autograd_in_nograd_mode(self):
with torch.no_grad():
x = torch.randn((5, 5), requires_grad=True)
y = torch.randn((5, 5), requires_grad=True)
output = ops.custom.op_with_autograd(x, 2, y)
self.assertTrue(output.allclose(x + 2 * y + x * y))
def test_calling_custom_op_inside_script_module(self):
model = Model()
output = model.forward(torch.ones(5))
self.assertTrue(output.allclose(torch.ones(5) + 1))
def test_saving_and_loading_script_module_with_custom_op(self):
model = Model()
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually.
file = tempfile.NamedTemporaryFile(delete=False)
try:
file.close()
model.save(file.name)
loaded = torch.jit.load(file.name)
finally:
os.unlink(file.name)
output = loaded.forward(torch.ones(5))
self.assertTrue(output.allclose(torch.ones(5) + 1))
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/custom_operator/test_custom_ops.py
|
# Owner(s): ["module: unknown"]
import unittest
import torch
from torch import ops
import torch.jit as jit
import glob
import os
from torch.testing._internal.common_utils import TestCase, run_tests
def get_custom_class_library_path():
library_filename = glob.glob("build/*custom_class*")
assert (len(library_filename) == 1)
library_filename = library_filename[0]
path = os.path.abspath(library_filename)
assert os.path.exists(path), path
return path
def test_equality(f, cmp_key):
obj1 = f()
obj2 = jit.script(f)()
return (cmp_key(obj1), cmp_key(obj2))
class TestCustomOperators(TestCase):
def setUp(self):
ops.load_library(get_custom_class_library_path())
def test_no_return_class(self):
def f():
val = torch.classes._TorchScriptTesting._Foo(5, 3)
return val.info()
self.assertEqual(*test_equality(f, lambda x: x))
def test_constructor_with_args(self):
def f():
val = torch.classes._TorchScriptTesting._Foo(5, 3)
return val
self.assertEqual(*test_equality(f, lambda x: x.info()))
def test_function_call_with_args(self):
def f():
val = torch.classes._TorchScriptTesting._Foo(5, 3)
val.increment(1)
return val
self.assertEqual(*test_equality(f, lambda x: x.info()))
def test_function_method_wrong_type(self):
def f():
val = torch.classes._TorchScriptTesting._Foo(5, 3)
val.increment("asdf")
return val
with self.assertRaisesRegex(RuntimeError, "Expected"):
jit.script(f)()
@unittest.skip("We currently don't support passing custom classes to custom methods.")
def test_input_class_type(self):
def f():
val = torch.classes._TorchScriptTesting._Foo(1, 2)
val2 = torch.classes._TorchScriptTesting._Foo(2, 3)
val.combine(val2)
return val
self.assertEqual(*test_equality(f, lambda x: x.info()))
def test_stack_string(self):
def f():
val = torch.classes._TorchScriptTesting._StackString(["asdf", "bruh"])
return val.pop()
self.assertEqual(*test_equality(f, lambda x: x))
def test_stack_push_pop(self):
def f():
val = torch.classes._TorchScriptTesting._StackString(["asdf", "bruh"])
val2 = torch.classes._TorchScriptTesting._StackString(["111", "222"])
val.push(val2.pop())
return val.pop() + val2.pop()
self.assertEqual(*test_equality(f, lambda x: x))
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/custom_operator/test_custom_classes.py
|
pytorch-master
|
test/cpp_api_parity/__init__.py
|
|
from collections import namedtuple
ParityStatus = namedtuple('ParityStatus', ['has_impl_parity', 'has_doc_parity'])
'''
This function expects the parity tracker Markdown file to have the following format:
```
## package1_name
API | Implementation Parity | Doc Parity
------------- | ------------- | -------------
API_Name|No|No
...
## package2_name
API | Implementation Parity | Doc Parity
------------- | ------------- | -------------
API_Name|No|No
...
```
The returned dict has the following format:
```
Dict[package_name]
-> Dict[api_name]
-> ParityStatus
```
'''
def parse_parity_tracker_table(file_path):
def parse_parity_choice(str):
if str in ['Yes', 'No']:
return str == 'Yes'
else:
raise RuntimeError(
'{} is not a supported parity choice. The valid choices are "Yes" and "No".'.format(str))
parity_tracker_dict = {}
with open(file_path, 'r') as f:
all_text = f.read()
packages = all_text.split('##')
for package in packages[1:]:
lines = [line.strip() for line in package.split('\n') if line.strip() != '']
package_name = lines[0]
if package_name in parity_tracker_dict:
raise RuntimeError("Duplicated package name `{}` found in {}".format(package_name, file_path))
else:
parity_tracker_dict[package_name] = {}
for api_status in lines[3:]:
api_name, has_impl_parity_str, has_doc_parity_str = [x.strip() for x in api_status.split('|')]
parity_tracker_dict[package_name][api_name] = ParityStatus(
has_impl_parity=parse_parity_choice(has_impl_parity_str),
has_doc_parity=parse_parity_choice(has_doc_parity_str))
return parity_tracker_dict
|
pytorch-master
|
test/cpp_api_parity/parity_table_parser.py
|
# The purpose of this test is to check that we have implementation parity between
# a Python `torch.nn.functional` function and its corresponding C++ `torch::nn::functional`
# function. Concretely, this test does the following:
#
# 1. Get a test params dict from common_nn.py, run forward pass on the Python functional
# created using the test params.
#
# 2. Serialize the Python functional's forward input arguments, deserialize them
# in C++ and use them as input for the C++ functional's forward pass.
#
# 3. Run the forward pass on the C++ functional, and serialize the C++ functional's
# forward output.
#
# 4. Compare Python/C++ functional's forward output. If they are the same, then we
# have implementation parity between Python/C++ module.
import tempfile
from string import Template
import re
import pprint
import os
import torch
from cpp_api_parity.utils import TorchNNFunctionalTestParams, TORCH_NN_COMMON_TEST_HARNESS, \
compile_cpp_code_inline, set_python_tensors_requires_grad, move_python_tensors_to_device, \
add_test, compute_cpp_args_construction_stmts_and_forward_arg_symbols, serialize_arg_dict_as_script_module, \
compute_arg_dict, decorate_test_fn, compute_temp_file_path, generate_error_msg, is_torch_nn_functional_test, \
try_remove_folder
from cpp_api_parity.sample_functional import SAMPLE_FUNCTIONAL_CPP_SOURCE
# Expected substitutions:
#
# ${functional_variant_name} (e.g. `BCELoss_no_reduce`)
# ${cpp_args_construction_stmts}
# ${cpp_function_call}
TORCH_NN_FUNCTIONAL_TEST_FORWARD = Template("""
void ${functional_variant_name}_test_forward(
const std::string& arg_dict_file_path,
const std::string& forward_output_file_path) {
pybind11::gil_scoped_release no_gil;
namespace F = torch::nn::functional;
// Declare arguments
auto arg_dict = load_dict_from_file(arg_dict_file_path);
${cpp_args_construction_stmts};
// Some functionals (such as `F::rrelu`) create random tensors in their call path.
// To make sure the random tensors created are the same in Python/C++, we need
// to set the RNG seed manually.
torch::manual_seed(0);
// Run function with arguments
auto cpp_output = ${cpp_function_call};
// Save the output into a file to be compared in Python later
write_ivalue_to_file(torch::IValue(cpp_output), forward_output_file_path);
}
""")
def run_forward(unit_test_class, test_params):
device = test_params.device
inputs = set_python_tensors_requires_grad(move_python_tensors_to_device(
[arg_value for _, arg_value in test_params.arg_dict['input']], device))
inputs += move_python_tensors_to_device(
[arg_value for _, arg_value in test_params.arg_dict['target']], device)
inputs += move_python_tensors_to_device(
[arg_value for _, arg_value in test_params.arg_dict['extra_args']], device)
# Some functionals (such as `F.rrelu`) create random tensors in their call path.
# To make sure the random tensors created are the same in Python/C++, we need
# to set the RNG seed manually.
torch.manual_seed(0)
python_output = test_params.test_instance.constructor()(*inputs)
return python_output
def test_forward(unit_test_class, test_params):
functional_variant_name = test_params.functional_variant_name
cpp_tmp_folder = test_params.cpp_tmp_folder
# Remove the temporary folder if it exists already
try_remove_folder(cpp_tmp_folder)
os.mkdir(cpp_tmp_folder)
# Run forward on Python functional
python_output = run_forward(unit_test_class, test_params)
# Save Python arguments to be used from C++ function
arg_dict_file_path = compute_temp_file_path(cpp_tmp_folder, functional_variant_name, 'arg_dict')
serialize_arg_dict_as_script_module(test_params.arg_dict).save(arg_dict_file_path)
cpp_test_name = '{}_test_forward'.format(test_params.functional_variant_name)
cpp_test_fn = getattr(unit_test_class.functional_impl_check_cpp_module, cpp_test_name)
def run_cpp_test_fn_and_check_output():
forward_output_file_path = compute_temp_file_path(cpp_tmp_folder, functional_variant_name, 'forward_output')
cpp_test_fn(arg_dict_file_path, forward_output_file_path)
cpp_output = torch.load(forward_output_file_path)
# Check that forward outputs are equal
unit_test_class.assertEqual(
python_output, cpp_output,
msg=generate_error_msg("forward output", cpp_output, python_output))
run_cpp_test_fn_and_check_output()
# Remove temporary folder that stores C++ outputs
try_remove_folder(cpp_tmp_folder)
def compute_functional_name(test_params_dict):
def camel_case_to_snake_case(camel_case_str):
return re.sub(r'(?<!^)(?=[A-Z])', '_', camel_case_str).lower()
if 'cpp_options_args' in test_params_dict:
# Expected format for `cpp_options_args`: `F::FunctionalFuncOptions(...)`
# Example output: `binary_cross_entropy`
return camel_case_to_snake_case(
test_params_dict['cpp_options_args'].split('(')[0].replace('F::', '').replace('FuncOptions', ''))
elif 'cpp_function_call' in test_params_dict:
# Expected format for `cpp_function_call`: `F::functional_name(...)`
# Example output: `binary_cross_entropy`
return test_params_dict['cpp_function_call'].split('(')[0].replace('F::', '')
else:
raise RuntimeError(
"`cpp_options_args` or `cpp_function_call` entry must be present in test params dict:\n{}".format(
pprint.pformat(test_params_dict)))
def compute_cpp_function_call(test_params_dict, arg_dict, functional_name):
if 'cpp_function_call' in test_params_dict:
return test_params_dict['cpp_function_call']
elif 'cpp_options_args' in test_params_dict:
cpp_forward_args_symbols = [arg_name for arg_name, _ in
arg_dict['input'] + arg_dict['target'] + arg_dict['extra_args']]
return 'F::{}({}, {})'.format(
functional_name, ", ".join(cpp_forward_args_symbols), test_params_dict['cpp_options_args'])
else:
raise RuntimeError(
"`cpp_options_args` or `cpp_function_call` entry must be present in test params dict:\n{}".format(
pprint.pformat(test_params_dict)))
def process_test_params_for_functional(test_params_dict, device, test_instance_class):
test_instance = test_instance_class(**test_params_dict)
functional_name = compute_functional_name(test_params_dict)
assert test_instance.get_name().startswith('test_')
# Example output: `BCELoss_no_reduce_cuda`
functional_variant_name = test_instance.get_name()[5:] + (('_' + device) if device != 'cpu' else '')
arg_dict = compute_arg_dict(test_params_dict, test_instance)
return TorchNNFunctionalTestParams(
functional_name=functional_name,
functional_variant_name=functional_variant_name,
test_instance=test_instance,
cpp_function_call=compute_cpp_function_call(test_params_dict, arg_dict, functional_name),
arg_dict=arg_dict,
has_parity=test_params_dict.get('has_parity', True),
device=device,
cpp_tmp_folder=tempfile.mkdtemp(),
)
def write_test_to_test_class(
unit_test_class, test_params_dict, test_instance_class, parity_table, devices):
assert is_torch_nn_functional_test(test_params_dict)
assert 'cpp_options_args' in test_params_dict or 'cpp_function_call' in test_params_dict, (
"To enable C++ API parity test, "
"`cpp_options_args` or `cpp_function_call` entry must be present in test params dict:\n{}. \n"
"If you are interested in adding the C++ API parity test, please see:\n"
"NOTE [How to check NN module / functional API parity between Python and C++ frontends]. \n"
"If not, please add `test_cpp_api_parity=False` to the test params dict and file an issue about this."
).format(pprint.pformat(test_params_dict))
assert not ('cpp_options_args' in test_params_dict and 'cpp_function_call' in test_params_dict), (
"Only one of `cpp_options_args` and `cpp_function_call` entries "
"should be present in test params dict:\n{}").format(pprint.pformat(test_params_dict))
functional_name = compute_functional_name(test_params_dict)
assert hasattr(torch.nn.functional, functional_name), \
"`torch.nn.functional` doesn't have function `{}`. (Discovered while processing\n{}.)".format(
functional_name, pprint.pformat(test_params_dict))
functional_full_name = 'F::' + functional_name
assert functional_full_name in parity_table['torch::nn::functional'], (
"Please add `{}` entry to `torch::nn::functional` section of `test/cpp_api_parity/parity-tracker.md`. "
"(Discovered while processing\n{}.)").format(functional_full_name, pprint.pformat(test_params_dict))
for device in devices:
test_params = process_test_params_for_functional(
test_params_dict=test_params_dict,
device=device,
test_instance_class=test_instance_class,
)
try_remove_folder(test_params.cpp_tmp_folder)
unit_test_name = 'test_torch_nn_functional_{}'.format(test_params.functional_variant_name)
unit_test_class.functional_test_params_map[unit_test_name] = test_params
def test_fn(self):
test_forward(
unit_test_class=self, test_params=unit_test_class.functional_test_params_map[self._testMethodName])
test_fn = decorate_test_fn(
test_fn=test_fn,
test_cuda=test_params_dict.get('test_cuda', True),
has_impl_parity=parity_table['torch::nn::functional'][functional_full_name][0] and
test_params_dict.get('has_parity', True),
device=device)
add_test(unit_test_class, unit_test_name, test_fn)
def generate_test_cpp_sources(test_params, template):
cpp_args_construction_stmts, _ = compute_cpp_args_construction_stmts_and_forward_arg_symbols(test_params)
test_cpp_sources = template.substitute(
functional_variant_name=test_params.functional_variant_name,
cpp_args_construction_stmts=";\n ".join(cpp_args_construction_stmts),
cpp_function_call=test_params.cpp_function_call,
)
return test_cpp_sources
# Build all C++ tests together, instead of once per test.
def build_cpp_tests(unit_test_class, print_cpp_source=False):
assert len(unit_test_class.functional_test_params_map) > 0
cpp_sources = TORCH_NN_COMMON_TEST_HARNESS + SAMPLE_FUNCTIONAL_CPP_SOURCE
functions = []
for test_name, test_params in unit_test_class.functional_test_params_map.items():
cpp_sources += generate_test_cpp_sources(test_params=test_params, template=TORCH_NN_FUNCTIONAL_TEST_FORWARD)
functions.append('{}_test_forward'.format(test_params.functional_variant_name))
if print_cpp_source:
print(cpp_sources)
cpp_module = compile_cpp_code_inline(
name='functional_impl_check',
cpp_sources=cpp_sources,
functions=functions)
unit_test_class.functional_impl_check_cpp_module = cpp_module
|
pytorch-master
|
test/cpp_api_parity/functional_impl_check.py
|
import torch
import torch.nn.functional as F
from torch.testing._internal.common_nn import wrap_functional
'''
`sample_functional` is used by `test_cpp_api_parity.py` to test that Python / C++ API
parity test harness works for `torch.nn.functional` functions.
When `has_parity=true` is passed to `sample_functional`, behavior of `sample_functional`
is the same as the C++ equivalent.
When `has_parity=false` is passed to `sample_functional`, behavior of `sample_functional`
is different from the C++ equivalent.
'''
def sample_functional(x, has_parity):
if has_parity:
return x * 2
else:
return x * 4
torch.nn.functional.sample_functional = sample_functional
SAMPLE_FUNCTIONAL_CPP_SOURCE = """\n
namespace torch {
namespace nn {
namespace functional {
struct C10_EXPORT SampleFunctionalFuncOptions {
SampleFunctionalFuncOptions(bool has_parity) : has_parity_(has_parity) {}
TORCH_ARG(bool, has_parity);
};
Tensor sample_functional(Tensor x, SampleFunctionalFuncOptions options) {
return x * 2;
}
} // namespace functional
} // namespace nn
} // namespace torch
"""
functional_tests = [
dict(
constructor=wrap_functional(F.sample_functional, has_parity=True),
cpp_options_args='F::SampleFunctionalFuncOptions(true)',
input_size=(1, 2, 3),
fullname='sample_functional_has_parity',
has_parity=True,
),
dict(
constructor=wrap_functional(F.sample_functional, has_parity=False),
cpp_options_args='F::SampleFunctionalFuncOptions(false)',
input_size=(1, 2, 3),
fullname='sample_functional_no_parity',
has_parity=False,
),
# This is to test that setting the `test_cpp_api_parity=False` flag skips
# the C++ API parity test accordingly (otherwise this test would run and
# throw a parity error).
dict(
constructor=wrap_functional(F.sample_functional, has_parity=False),
cpp_options_args='F::SampleFunctionalFuncOptions(false)',
input_size=(1, 2, 3),
fullname='sample_functional_THIS_TEST_SHOULD_BE_SKIPPED',
test_cpp_api_parity=False,
),
]
|
pytorch-master
|
test/cpp_api_parity/sample_functional.py
|
from collections import namedtuple
import unittest
import os
import warnings
import shutil
import torch
import torch.utils.cpp_extension
import torch.testing._internal.common_nn as common_nn
from torch.testing._internal.common_cuda import TEST_CUDA
# Note that this namedtuple is for C++ parity test mechanism's internal use.
# For guidance on how to add a new C++ parity test, please see
# NOTE [How to check NN module / functional API parity between Python and C++ frontends]
TorchNNModuleTestParams = namedtuple(
'TorchNNModuleTestParams',
[
# NN module name (e.g. "BCELoss")
'module_name',
# Unique identifier for this module config (e.g. "BCELoss_weights_cuda")
'module_variant_name',
# An instance of an NN test class (e.g. `CriterionTest`) which stores
# necessary information (e.g. input / target / extra_args) for running the Python test
'test_instance',
# Constructor arguments passed to the C++ module constructor, which must be
# strictly equivalent to the Python module constructor arguments
# (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`,
# which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss`
# constructor in Python)
'cpp_constructor_args',
# All arguments used in NN module's forward pass.
# Please see `compute_arg_dict` function for details on how we construct this dict.
# (e.g.
# ```
# arg_dict = {
# 'input': [python_input_tensor],
# 'target': [python_target_tensor],
# 'extra_args': [],
# 'other': [],
# }
# ```
# )
'arg_dict',
# Whether we expect this NN module test to pass the Python/C++ parity test
# (e.g. `True`)
'has_parity',
# Device (e.g. "cuda")
'device',
# Temporary folder to store C++ outputs (to be compared with Python outputs later)
'cpp_tmp_folder',
]
)
# Note that this namedtuple is for C++ parity test mechanism's internal use.
# For guidance on how to add a new C++ parity test, please see
# NOTE [How to check NN module / functional API parity between Python and C++ frontends]
TorchNNFunctionalTestParams = namedtuple(
'TorchNNFunctionalTestParams',
[
# NN functional name (e.g. "binary_cross_entropy")
'functional_name',
# Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda")
'functional_variant_name',
# An instance of an NN test class (e.g. `NewModuleTest`) which stores
# necessary information (e.g. input / target / extra_args) for running the Python test
'test_instance',
# The C++ function call that is strictly equivalent to the Python function call
# (e.g. "F::binary_cross_entropy(
# i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))",
# which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python)
'cpp_function_call',
# All arguments used in NN functional's function call.
# Please see `compute_arg_dict` function for details on how we construct this dict.
# (e.g.
# ```
# arg_dict = {
# 'input': [python_input_tensor],
# 'target': [python_target_tensor],
# 'extra_args': [],
# 'other': [],
# }
# ```
# )
'arg_dict',
# Whether we expect this NN functional test to pass the Python/C++ parity test
# (e.g. `True`)
'has_parity',
# Device (e.g. "cuda")
'device',
# Temporary folder to store C++ outputs (to be compared with Python outputs later)
'cpp_tmp_folder',
]
)
CppArg = namedtuple('CppArg', ['name', 'value'])
TORCH_NN_COMMON_TEST_HARNESS = """
#include <torch/script.h>
void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) {
auto bytes = torch::jit::pickle_save(ivalue);
std::ofstream fout(file_path, std::ios::out | std::ios::binary);
fout.write(bytes.data(), bytes.size());
fout.close();
}
c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) {
c10::Dict<std::string, torch::Tensor> arg_dict;
auto arg_dict_module = torch::jit::load(file_path);
for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) {
arg_dict.insert(p.name, p.value);
}
return arg_dict;
}
// Generates rand tensor with non-equal values. This ensures that duplicate
// values won't be causing test failure for modules like MaxPooling.
// size should be small, otherwise randperm fails / long overflows.
torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) {
int64_t total = 1;
for (int64_t elem : size) {
total *= elem;
}
return torch::randperm(total).view(size).to(torch::kDouble);
}
"""
def compile_cpp_code_inline(name, cpp_sources, functions):
cpp_module = torch.utils.cpp_extension.load_inline(
name=name,
cpp_sources=cpp_sources,
extra_cflags=['-g'], # Enable debug symbols by default for debugging test failures.
functions=functions,
verbose=False,
)
return cpp_module
def compute_temp_file_path(cpp_tmp_folder, variant_name, file_suffix):
return os.path.join(cpp_tmp_folder, '{}_{}.pt'.format(variant_name, file_suffix))
def is_torch_nn_functional_test(test_params_dict):
return 'wrap_functional' in str(test_params_dict.get('constructor', ''))
def convert_to_list(python_input):
if isinstance(python_input, torch.Tensor):
return [python_input]
else:
return list(python_input)
def set_python_tensors_requires_grad(python_tensors):
return [tensor.requires_grad_(True) if tensor.dtype != torch.long else tensor for tensor in python_tensors]
def move_python_tensors_to_device(python_tensors, device):
return [tensor.to(device) for tensor in python_tensors]
def has_test(unit_test_class, test_name):
return hasattr(unit_test_class, test_name)
def add_test(unit_test_class, test_name, test_fn):
if has_test(unit_test_class, test_name):
raise RuntimeError("Found two tests with the same name: " + test_name)
setattr(unit_test_class, test_name, test_fn)
def set_cpp_tensors_requires_grad(cpp_tensor_stmts, python_tensors):
assert len(cpp_tensor_stmts) == len(python_tensors)
return ['{}.requires_grad_(true)'.format(tensor_stmt) if tensor.dtype != torch.long else tensor_stmt
for tensor_stmt, (_, tensor) in zip(cpp_tensor_stmts, python_tensors)]
def move_cpp_tensors_to_device(cpp_tensor_stmts, device):
return ['{}.to("{}")'.format(tensor_stmt, device) for tensor_stmt in cpp_tensor_stmts]
def is_criterion_test(test_instance):
return isinstance(test_instance, common_nn.CriterionTest)
# This function computes the following:
# - What variable declaration statements should show up in the C++ parity test function
# - What arguments should be passed into the C++ module/functional's forward function
#
# For example, for the "L1Loss" test, the return values from this function are:
# ```
# // Note that `arg_dict` stores all tensor values we transfer from Python to C++
# cpp_args_construction_stmts = [
# "auto i0 = arg_dict.at("i0").to("cpu").requires_grad_(true)",
# "auto t0 = arg_dict.at("t0").to("cpu")",
# ],
# cpp_forward_args_symbols = [
# "i0",
# "t0",
# ]
# ```
def compute_cpp_args_construction_stmts_and_forward_arg_symbols(test_params):
device = test_params.device
cpp_forward_args_symbols = []
def add_cpp_forward_args(args):
args_stmts = []
for arg_name, _ in args:
args_stmts.append('auto {} = arg_dict.at("{}")'.format(arg_name, arg_name))
cpp_forward_args_symbols.append(arg_name)
return args_stmts
cpp_forward_input_args_stmts = set_cpp_tensors_requires_grad(move_cpp_tensors_to_device(
add_cpp_forward_args(test_params.arg_dict['input']), device), test_params.arg_dict['input'])
cpp_forward_target_args_stmts = move_cpp_tensors_to_device(
add_cpp_forward_args(test_params.arg_dict['target']), device)
cpp_forward_extra_args_stmts = move_cpp_tensors_to_device(
add_cpp_forward_args(test_params.arg_dict['extra_args']), device)
# Build the list of other arguments needed
cpp_other_args_stmts = []
for arg_name, _ in test_params.arg_dict['other']:
cpp_other_args_stmts.append('auto {} = arg_dict.at("{}")'.format(arg_name, arg_name))
cpp_other_args_stmts = move_cpp_tensors_to_device(cpp_other_args_stmts, device)
cpp_args_construction_stmts = cpp_forward_input_args_stmts + cpp_forward_target_args_stmts + \
cpp_forward_extra_args_stmts + cpp_other_args_stmts
return cpp_args_construction_stmts, cpp_forward_args_symbols
def serialize_arg_dict_as_script_module(arg_dict):
arg_dict_flat = {arg_name: arg_value
for arg_name, arg_value in
arg_dict['input'] + arg_dict['target'] + arg_dict['extra_args'] + arg_dict['other']}
arg_dict_module = torch.nn.Module()
for arg_name, arg_value in arg_dict_flat.items():
assert isinstance(arg_value, torch.Tensor)
arg_dict_module.register_buffer(arg_name, arg_value)
return torch.jit.script(arg_dict_module)
# NOTE: any argument symbol used in `cpp_constructor_args` / `cpp_options_args` / `cpp_function_call`
# must have a mapping in `cpp_var_map`.
#
# The mapping can take one of the following formats:
#
# 1. `argument_name` -> Python value
# 2. `argument_name` -> '_get_input()' (which means `argument_name` in C++ will be bound to `test_instance._get_input()`)
#
# For example:
# ```
# def bceloss_weights_no_reduce_test():
# t = torch.randn(15, 10).gt(0).double()
# weights = torch.rand(10)
# return dict(
# fullname='BCELoss_weights_no_reduce',
# constructor=wrap_functional(
# lambda i: F.binary_cross_entropy(i, t.type_as(i),
# weight=weights.type_as(i), reduction='none')),
# cpp_function_call='''F::binary_cross_entropy(
# i, t.to(i.options()),
# F::BinaryCrossEntropyFuncOptions()
# .weight(weights.to(i.options()))
# .reduction(torch::kNone))''',
# input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),
# cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights},
# reference_fn=lambda i, p, m: -(t * i.log() + (1 - t) * (1 - i).log()) * weights,
# )
# ```
def compute_arg_dict(test_params_dict, test_instance):
arg_dict = {
'input': [],
'target': [],
'extra_args': [],
'other': [],
}
def put_args_into_arg_dict(arg_type, arg_type_prefix, args):
for i, arg in enumerate(args):
arg_dict[arg_type].append(CppArg(name=arg_type_prefix + str(i), value=arg))
put_args_into_arg_dict('input', 'i', convert_to_list(test_instance._get_input()))
if is_criterion_test(test_instance):
put_args_into_arg_dict('target', 't', convert_to_list(test_instance._get_target()))
if test_instance.extra_args:
put_args_into_arg_dict('extra_args', 'e', convert_to_list(test_instance.extra_args))
cpp_var_map = test_params_dict.get('cpp_var_map', {})
for arg_name, arg_value in cpp_var_map.items():
if isinstance(arg_value, str):
if arg_value == '_get_input()':
arg_dict['other'].append(CppArg(name=arg_name, value=test_instance._get_input()))
else:
raise RuntimeError("`{}` has unsupported string value: {}".format(arg_name, arg_value))
elif isinstance(arg_value, torch.Tensor):
arg_dict['other'].append(CppArg(name=arg_name, value=arg_value))
else:
raise RuntimeError("`{}` has unsupported value: {}".format(arg_name, arg_value))
return arg_dict
def decorate_test_fn(test_fn, test_cuda, has_impl_parity, device):
if device == 'cuda':
test_fn = unittest.skipIf(not TEST_CUDA, "CUDA unavailable")(test_fn)
test_fn = unittest.skipIf(not test_cuda, "Excluded from CUDA tests")(test_fn)
# If `Implementation Parity` entry in parity table for this module is `No`,
# or `has_parity` entry in test params dict is `False`, we mark the test as
# expected failure.
if not has_impl_parity:
test_fn = unittest.expectedFailure(test_fn)
return test_fn
MESSAGE_HOW_TO_FIX_CPP_PARITY_TEST_FAILURE = '''
What should I do when C++ API parity test is failing?
- If you are changing the implementation of an existing `torch.nn` module / `torch.nn.functional` function:
Answer: Ideally you should also change the C++ API implementation for that module / function
(you can start by searching for the module / function name in `torch/csrc/api/` folder).
- If you are adding a new test for an existing `torch.nn` module / `torch.nn.functional` function:
Answer: Ideally you should fix the C++ API implementation for that module / function
to exactly match the Python API implementation (you can start by searching for the module /
function name in `torch/csrc/api/` folder).
- If you are adding a test for a *new* `torch.nn` module / `torch.nn.functional` function:
Answer: Ideally you should add the corresponding C++ API implementation for that module / function,
and it should exactly match the Python API implementation. (We have done a large effort on this
which is tracked at https://github.com/pytorch/pytorch/issues/25883.)
However, if any of the above is proven to be too complicated, you can just add
`test_cpp_api_parity=False` to any failing test in `torch/testing/_internal/common_nn.py`,
and the C++ API parity test will be skipped accordingly. Note that you should
also file an issue when you do this.
For more details on how to add a C++ API parity test, please see:
NOTE [How to check NN module / functional API parity between Python and C++ frontends]
'''
def generate_error_msg(name, cpp_value, python_value):
return (
"Parity test failed: {} in C++ has value: {}, "
"which does not match the corresponding value in Python: {}.\n{}").format(
name, cpp_value, python_value, MESSAGE_HOW_TO_FIX_CPP_PARITY_TEST_FAILURE)
def try_remove_folder(folder_path):
if os.path.exists(folder_path):
# Don't block the process if this fails, but show the error message as warning.
try:
shutil.rmtree(folder_path)
except Exception as e:
warnings.warn("Non-blocking folder removal fails with the following error:\n{}".format(str(e)))
|
pytorch-master
|
test/cpp_api_parity/utils.py
|
# The purpose of this test is to check that we have implementation parity between
# a Python `torch.nn` module and its corresponding C++ `torch::nn` module. Concretely,
# this test does the following:
#
# 1. Get a test params dict from common_nn.py, run forward and backward on the
# Python module created using the test params.
#
# 2. Serialize the Python module's parameters / buffers and its forward input
# arguments, deserialize them in C++ and load them into the C++ module.
#
# 3. Run the same forward and backward passes on the C++ module, and serialize
# the C++ module's forward output and backward gradients.
#
# 4. Compare Python/C++ module's forward output and backward gradients. If they
# are the same, then we have implementation parity between Python/C++ module.
import tempfile
from string import Template
import types
import pprint
import os
import torch
from cpp_api_parity.utils import TorchNNModuleTestParams, TORCH_NN_COMMON_TEST_HARNESS, \
compile_cpp_code_inline, set_python_tensors_requires_grad, move_python_tensors_to_device, \
add_test, compute_cpp_args_construction_stmts_and_forward_arg_symbols, serialize_arg_dict_as_script_module, \
compute_arg_dict, decorate_test_fn, compute_temp_file_path, generate_error_msg, is_torch_nn_functional_test, \
try_remove_folder
from cpp_api_parity.sample_module import SAMPLE_MODULE_CPP_SOURCE
# Expected substitutions:
#
# ${module_variant_name} (e.g. `Linear_no_bias_cpu`)
# ${module_qualified_name} (e.g. `torch::nn::Linear`)
# ${cpp_args_construction_stmts}
# ${cpp_constructor_args}
# ${device}
# ${cpp_forward_args_symbols}
TORCH_NN_MODULE_TEST_FORWARD_BACKWARD = Template("""
void ${module_variant_name}_test_forward_backward(
const std::string& arg_dict_file_path,
const std::string& module_file_path,
const std::string& forward_output_file_path,
const std::string& backward_grad_dict_file_path) {
pybind11::gil_scoped_release no_gil;
// Declare arguments
auto arg_dict = load_dict_from_file(arg_dict_file_path);
${cpp_args_construction_stmts};
// Construct module and load params/buffers from Python module
${module_qualified_name} module${cpp_constructor_args};
module->to(std::string("${device}"));
torch::load(module, module_file_path);
// Some modules (such as `RReLU`) create random tensors in their forward pass.
// To make sure the random tensors created are the same in Python/C++, we need
// to set the RNG seed manually.
torch::manual_seed(0);
// Forward pass
auto cpp_output = module(${cpp_forward_args_symbols});
// Save the output into a file to be compared in Python later
write_ivalue_to_file(torch::IValue(cpp_output), forward_output_file_path);
// Backward pass
cpp_output.sum().backward();
// Put all gradients into a c10::Dict, save it into a file to be compared in Python later
c10::Dict<std::string, torch::Tensor> grad_dict;
for (const auto& param : module->named_parameters()) {
torch::Tensor grad = param.value().grad();
if (grad.is_sparse()) {
grad_dict.insert(param.key() + "_grad_indices", grad.coalesce().indices());
grad_dict.insert(param.key() + "_grad_values", grad.coalesce().values());
} else {
grad_dict.insert(param.key() + "_grad", grad);
}
}
write_ivalue_to_file(torch::IValue(grad_dict), backward_grad_dict_file_path);
}
""")
def run_python_forward_backward(unit_test_class, test_params):
device = test_params.device
module = test_params.test_instance.constructor(*test_params.test_instance.constructor_args).to(device)
inputs = set_python_tensors_requires_grad(move_python_tensors_to_device(
[arg_value for _, arg_value in test_params.arg_dict['input']], device))
inputs += move_python_tensors_to_device(
[arg_value for _, arg_value in test_params.arg_dict['target']], device)
inputs += move_python_tensors_to_device(
[arg_value for _, arg_value in test_params.arg_dict['extra_args']], device)
# Some modules (such as `RReLU`) create random tensors in their forward pass.
# To make sure the random tensors created are the same in Python/C++, we need
# to set the RNG seed manually.
torch.manual_seed(0)
# Forward pass
python_output = module(*inputs)
# NOTE: This is a workaround to allow any module to be traced.
# We can do this because we are only interested in transferring
# the Python module's parameters and buffers to the C++ module.
module.forward = types.MethodType(lambda self, input: input, module)
script_module = torch.jit.trace(module, torch.tensor(0))
# Backward pass
python_output.sum().backward()
# Put all gradients into a dict, to be compared later
python_grad_dict = {}
for name, param in module.named_parameters():
grad = param.grad
if grad.is_sparse:
python_grad_dict[name + "_grad_indices"] = grad.coalesce().indices()
python_grad_dict[name + "_grad_values"] = grad.coalesce().values()
else:
python_grad_dict[name + "_grad"] = grad
return script_module, python_output, python_grad_dict
def test_forward_backward(unit_test_class, test_params):
module_variant_name = test_params.module_variant_name
cpp_tmp_folder = test_params.cpp_tmp_folder
# Remove the temporary folder if it exists already
try_remove_folder(cpp_tmp_folder)
os.mkdir(cpp_tmp_folder)
# Run forward and backward on Python module
script_module, python_output, python_grad_dict = run_python_forward_backward(unit_test_class, test_params)
# Save Python module and arguments to be used from C++ function
module_file_path = compute_temp_file_path(cpp_tmp_folder, module_variant_name, 'module')
arg_dict_file_path = compute_temp_file_path(cpp_tmp_folder, module_variant_name, 'arg_dict')
script_module.save(module_file_path)
serialize_arg_dict_as_script_module(test_params.arg_dict).save(arg_dict_file_path)
cpp_test_name = '{}_test_forward_backward'.format(test_params.module_variant_name)
cpp_test_fn = getattr(unit_test_class.module_impl_check_cpp_module, cpp_test_name)
def run_cpp_test_fn_and_check_output():
forward_output_file_path = compute_temp_file_path(cpp_tmp_folder, module_variant_name, 'forward_output')
backward_grad_dict_file_path = compute_temp_file_path(cpp_tmp_folder, module_variant_name, 'backward_grad_dict')
cpp_test_fn(arg_dict_file_path, module_file_path, forward_output_file_path, backward_grad_dict_file_path)
cpp_output = torch.load(forward_output_file_path)
cpp_grad_dict = torch.load(backward_grad_dict_file_path)
# Check that forward outputs are equal
unit_test_class.assertEqual(python_output, cpp_output,
msg=generate_error_msg("forward output", cpp_output, python_output))
# Check that module parameter gradients are equal after backward pass
unit_test_class.assertEqual(
len(python_grad_dict), len(cpp_grad_dict),
msg=generate_error_msg("# of parameters", len(cpp_grad_dict), len(python_grad_dict)))
for key in python_grad_dict:
param_name = None
for suffix in ['_grad', '_grad_indices', '_grad_values']:
if key.endswith(suffix):
param_name = key[:-len(suffix)]
break
assert param_name is not None
sparsity_str = 'sparse' if key.endswith('_grad_indices') or key.endswith('_grad_values') else 'dense'
unit_test_class.assertTrue(
key in cpp_grad_dict,
msg=generate_error_msg(
"\"Does module have a parameter named `{}` with {} gradient?\"".format(param_name, sparsity_str),
False, True))
unit_test_class.assertEqual(
python_grad_dict[key], cpp_grad_dict[key],
msg=generate_error_msg(
"`{}`'s {} gradient (`{}`)".format(param_name, sparsity_str, key),
cpp_grad_dict[key], python_grad_dict[key]))
run_cpp_test_fn_and_check_output()
# Remove temporary folder that stores C++ outputs
try_remove_folder(cpp_tmp_folder)
def compute_module_name(test_params_dict):
fullname = test_params_dict.get('fullname', None)
if fullname:
module_name = fullname.split('_')[0]
else:
module_name = test_params_dict.get('module_name')
return module_name
def process_test_params_for_module(test_params_dict, device, test_instance_class):
module_name = compute_module_name(test_params_dict)
test_params_dict['constructor'] = test_params_dict.get('constructor', getattr(torch.nn, module_name))
test_instance = test_instance_class(**test_params_dict)
assert test_instance.get_name().startswith('test_')
# Example output: `BCELoss_weights_cuda`
module_variant_name = test_instance.get_name()[5:] + (('_' + device) if device != 'cpu' else '')
if 'constructor_args' in test_params_dict:
assert 'cpp_constructor_args' in test_params_dict, (
"If `constructor_args` is present in test params dict, to enable C++ API parity test, "
"`cpp_constructor_args` must be present in:\n{}"
"If you are interested in adding the C++ API parity test, please see:\n"
"NOTE [How to check NN module / functional API parity between Python and C++ frontends]. \n"
"If not, please add `test_cpp_api_parity=False` to the test params dict and file an issue about this."
).format(pprint.pformat(test_params_dict))
return TorchNNModuleTestParams(
module_name=module_name,
module_variant_name=module_variant_name,
test_instance=test_instance,
cpp_constructor_args=test_params_dict.get('cpp_constructor_args', ''),
arg_dict=compute_arg_dict(test_params_dict, test_instance),
has_parity=test_params_dict.get('has_parity', True),
device=device,
cpp_tmp_folder=tempfile.mkdtemp(),
)
def write_test_to_test_class(
unit_test_class, test_params_dict, test_instance_class, parity_table, devices):
assert not is_torch_nn_functional_test(test_params_dict)
module_name = compute_module_name(test_params_dict)
assert hasattr(torch.nn, module_name), (
"`torch.nn` doesn't have module `{}`. "
"If you are adding a new test, please set `fullname` using format `ModuleName_desc` "
"or set `module_name` using format `ModuleName` in the module test dict:\n{}"
).format(module_name, pprint.pformat(test_params_dict))
module_full_name = 'torch::nn::' + module_name
assert module_full_name in parity_table['torch::nn'], (
"Please add `{}` entry to `torch::nn` section of `test/cpp_api_parity/parity-tracker.md`. "
"(Discovered while processing\n{}.)").format(module_full_name, pprint.pformat(test_params_dict))
for device in devices:
test_params = process_test_params_for_module(
test_params_dict=test_params_dict,
device=device,
test_instance_class=test_instance_class,
)
try_remove_folder(test_params.cpp_tmp_folder)
unit_test_name = 'test_torch_nn_{}'.format(test_params.module_variant_name)
unit_test_class.module_test_params_map[unit_test_name] = test_params
def test_fn(self):
test_forward_backward(
unit_test_class=self, test_params=unit_test_class.module_test_params_map[self._testMethodName])
test_fn = decorate_test_fn(
test_fn=test_fn,
test_cuda=test_params_dict.get('test_cuda', True),
has_impl_parity=parity_table['torch::nn'][module_full_name][0] and
test_params_dict.get('has_parity', True),
device=device)
add_test(unit_test_class, unit_test_name, test_fn)
def generate_test_cpp_sources(test_params, template):
device = test_params.device
cpp_constructor_args = test_params.cpp_constructor_args
if cpp_constructor_args != '':
cpp_constructor_args = '({})'.format(cpp_constructor_args)
cpp_args_construction_stmts, cpp_forward_args_symbols = \
compute_cpp_args_construction_stmts_and_forward_arg_symbols(test_params)
test_cpp_sources = template.substitute(
module_variant_name=test_params.module_variant_name,
module_qualified_name='torch::nn::{}'.format(test_params.module_name),
cpp_args_construction_stmts=";\n ".join(cpp_args_construction_stmts),
cpp_constructor_args=cpp_constructor_args,
cpp_forward_args_symbols=", ".join(cpp_forward_args_symbols),
device=device,
)
return test_cpp_sources
# Build all C++ tests together, instead of once per test.
def build_cpp_tests(unit_test_class, print_cpp_source=False):
assert len(unit_test_class.module_test_params_map) > 0
cpp_sources = TORCH_NN_COMMON_TEST_HARNESS + SAMPLE_MODULE_CPP_SOURCE
functions = []
for test_name, test_params in unit_test_class.module_test_params_map.items():
cpp_sources += generate_test_cpp_sources(
test_params=test_params, template=TORCH_NN_MODULE_TEST_FORWARD_BACKWARD)
functions.append('{}_test_forward_backward'.format(test_params.module_variant_name))
if print_cpp_source:
print(cpp_sources)
cpp_module = compile_cpp_code_inline(
name='module_impl_check',
cpp_sources=cpp_sources,
functions=functions)
unit_test_class.module_impl_check_cpp_module = cpp_module
|
pytorch-master
|
test/cpp_api_parity/module_impl_check.py
|
import torch
'''
`SampleModule` is used by `test_cpp_api_parity.py` to test that Python / C++ API
parity test harness works for `torch.nn.Module` subclasses.
When `SampleModule.has_parity` is true, behavior of `forward` / `backward`
is the same as the C++ equivalent.
When `SampleModule.has_parity` is false, behavior of `forward` / `backward`
is different from the C++ equivalent.
'''
class SampleModule(torch.nn.Module):
def __init__(self, has_parity, has_submodule):
super(SampleModule, self).__init__()
self.has_parity = has_parity
if has_submodule:
self.submodule = SampleModule(self.has_parity, False)
self.has_submodule = has_submodule
self.register_parameter('param', torch.nn.Parameter(torch.empty(3, 4)))
self.reset_parameters()
def reset_parameters(self):
with torch.no_grad():
self.param.fill_(1)
def forward(self, x):
submodule_forward_result = self.submodule(x) if hasattr(self, 'submodule') else 0
if self.has_parity:
return x + self.param * 2 + submodule_forward_result
else:
return x + self.param * 4 + submodule_forward_result + 3
torch.nn.SampleModule = SampleModule
SAMPLE_MODULE_CPP_SOURCE = """\n
namespace torch {
namespace nn {
struct C10_EXPORT SampleModuleOptions {
SampleModuleOptions(bool has_parity, bool has_submodule) : has_parity_(has_parity), has_submodule_(has_submodule) {}
TORCH_ARG(bool, has_parity);
TORCH_ARG(bool, has_submodule);
};
struct C10_EXPORT SampleModuleImpl : public torch::nn::Cloneable<SampleModuleImpl> {
explicit SampleModuleImpl(SampleModuleOptions options) : options(std::move(options)) {
if (options.has_submodule()) {
submodule = register_module(
"submodule",
std::make_shared<SampleModuleImpl>(SampleModuleOptions(options.has_parity(), false)));
}
reset();
}
void reset() {
param = register_parameter("param", torch::ones({3, 4}));
}
torch::Tensor forward(torch::Tensor x) {
return x + param * 2 + (submodule ? submodule->forward(x) : torch::zeros_like(x));
}
SampleModuleOptions options;
torch::Tensor param;
std::shared_ptr<SampleModuleImpl> submodule{nullptr};
};
TORCH_MODULE(SampleModule);
} // namespace nn
} // namespace torch
"""
module_tests = [
dict(
module_name='SampleModule',
desc='has_parity',
constructor_args=(True, True),
cpp_constructor_args='torch::nn::SampleModuleOptions(true, true)',
input_size=(3, 4),
cpp_input_args=['torch::randn({3, 4})'],
has_parity=True,
),
dict(
fullname='SampleModule_no_parity',
constructor=lambda: SampleModule(has_parity=False, has_submodule=True),
cpp_constructor_args='torch::nn::SampleModuleOptions(false, true)',
input_size=(3, 4),
cpp_input_args=['torch::randn({3, 4})'],
has_parity=False,
),
# This is to test that setting the `test_cpp_api_parity=False` flag skips
# the C++ API parity test accordingly (otherwise this test would run and
# throw a parity error).
dict(
fullname='SampleModule_THIS_TEST_SHOULD_BE_SKIPPED',
constructor=lambda: SampleModule(False, True),
cpp_constructor_args='torch::nn::SampleModuleOptions(false, true)',
input_size=(3, 4),
cpp_input_args=['torch::randn({3, 4})'],
test_cpp_api_parity=False,
),
]
|
pytorch-master
|
test/cpp_api_parity/sample_module.py
|
# Owner(s): ["oncall: distributed"]
import os
import torch
import torch.distributed as dist
from torch.testing._internal.common_utils import (
run_tests,
)
from torch.futures import Future
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
import test_c10d_common
import weakref
from torch._C._distributed_c10d import _create_work_from_future
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
)
def create_work(result):
future = Future()
future.set_result(result)
return _create_work_from_future(future)
class MyWork(dist._Work):
def __init__(self, result, pg):
super().__init__()
self.result_ = result
self.future_ = torch.futures.Future()
self.future_.set_result(result)
self.pg_ = weakref.ref(pg)
def wait(self, timeout):
self.pg_().wait_count += 1
return True
def get_future(self):
self.pg_().get_future_count += 1
return self.future_
class LonelyRankProcessGroup(dist.ProcessGroup):
"""
This PG only supports world_size of 1
"""
def __init__(self, rank, world, use_wrapper):
super(LonelyRankProcessGroup, self).__init__(rank, world)
assert rank == 0
assert world == 1
self._rank = rank
self._world = world
self.wait_count = 0
self.get_future_count = 0
self.use_wrapper = use_wrapper
self._work = []
def broadcast(self, tensor_list, opts):
if self.use_wrapper:
return create_work(tensor_list)
res = MyWork(tensor_list, self)
self._work.append(res)
return res
def allgather(self, output_tensors, input_tensor, opts):
for o, i in zip(output_tensors[0], input_tensor):
o.copy_(i)
if self.use_wrapper:
return create_work(output_tensors)
res = MyWork(output_tensors, self)
self._work.append(res)
return res
def allreduce(self, tensors, opts):
if self.use_wrapper:
return create_work(tensors)
res = MyWork(tensors, self)
self._work.append(res)
return res
def size(self):
return self._world
def getBackendName(self):
return "lonely-pg"
def __repr__(self):
return f"PLG w:{self._world} r:{self._rank}"
# We cannot use parametrize as some tests are defined on the base class and use _get_process_group
class AbstractDDPSingleRank(test_c10d_common.CommonDistributedDataParallelTest):
def setUp(self):
super(AbstractDDPSingleRank, self).setUp()
self._spawn_processes()
@property
def world_size(self):
return 1
def tearDown(self):
super(AbstractDDPSingleRank, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _get_process_group(self):
return LonelyRankProcessGroup(self.rank, self.world_size, self.use_wrapper)
def test_ddp_invoke_work_object(self):
pg = self._get_process_group()
torch.manual_seed(123)
model = nn.Sequential(
nn.Linear(2, 2),
nn.ReLU()
)
wrapped_model = model
input_tensor = torch.rand(2)
model = DDP(model, process_group=pg)
model(input_tensor).sum().backward()
ddp_grad = wrapped_model[0].bias.grad.clone()
wrapped_model.zero_grad()
wrapped_model(input_tensor).sum().backward()
self.assertEqual(wrapped_model[0].bias.grad, ddp_grad)
if not self.use_wrapper:
self.assertTrue(pg.wait_count > 0)
self.assertTrue(pg.get_future_count > 0)
def test_ddp_with_pypg(self):
pg = self._get_process_group()
self._test_ddp_with_process_group(pg, [torch.device("cpu")], device_ids=None)
def test_ddp_with_pypg_with_grad_views(self):
pg = self._get_process_group()
self._test_ddp_with_process_group(pg, [torch.device("cpu")], device_ids=None, gradient_as_bucket_view=True)
class TestDDPWithWorkSubclass(AbstractDDPSingleRank, MultiProcessTestCase):
@property
def use_wrapper(self):
return False
class TestDDPWithWorkWrapper(AbstractDDPSingleRank, MultiProcessTestCase):
@property
def use_wrapper(self):
return True
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/distributed/test_c10d_pypg.py
|
# Owner(s): ["oncall: distributed"]
import os
import sys
import torch
import torch.distributed as dist
torch.backends.cuda.matmul.allow_tf32 = False
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN, NO_MULTIPROCESSING_SPAWN
from torch.testing._internal.distributed.distributed_test import (
DistributedTest, TestDistBackend
)
if TEST_WITH_DEV_DBG_ASAN:
print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
sys.exit(0)
if NO_MULTIPROCESSING_SPAWN:
print("Spawn not available, skipping tests.", file=sys.stderr)
sys.exit(0)
BACKEND = os.environ["BACKEND"]
if BACKEND == "gloo" or BACKEND == "nccl":
class TestDistBackendWithSpawn(TestDistBackend, DistributedTest._DistTestBase):
def setUp(self):
super().setUp()
self._spawn_processes()
torch.backends.cudnn.flags(allow_tf32=False).__enter__()
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/test_distributed_spawn.py
|
# Owner(s): ["oncall: distributed"]
import os
import sys
from functools import wraps, partial
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
TEST_SKIPS
)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
if TEST_WITH_DEV_DBG_ASAN:
print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
sys.exit(0)
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO
WORLD_SIZE = min(4, max(2, torch.cuda.device_count()))
def with_comms(func=None):
if func is None:
return partial(
with_comms,
)
@wraps(func)
def wrapper(self, *args, **kwargs):
if BACKEND == dist.Backend.NCCL and torch.cuda.device_count() < self.world_size:
sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
self.dist_init()
func(self)
self.destroy_comms()
return wrapper
class TestObjectCollectives(MultiProcessTestCase):
def setUp(self):
super(TestObjectCollectives, self).setUp()
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["BACKEND"] = BACKEND
self._spawn_processes()
@property
def device(self):
return torch.device(self.rank) if BACKEND == dist.Backend.NCCL \
else torch.device("cpu")
@property
def world_size(self):
return WORLD_SIZE
@property
def process_group(self):
return dist.group.WORLD
def destroy_comms(self):
# Wait for all ranks to reach here before starting shutdown.
dist.barrier()
dist.destroy_process_group()
def dist_init(self):
dist.init_process_group(
backend=BACKEND,
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
# set device for nccl pg for collectives
if BACKEND == "nccl":
torch.cuda.set_device(self.rank)
@with_comms()
def test_all_gather_object(self):
output = [None] * dist.get_world_size()
dist.all_gather_object(
object_list=output,
obj=self.rank)
for i, v in enumerate(output):
self.assertEqual(i, v, f"rank: {self.rank}")
@with_comms()
def test_gather_object(self):
output = [None] * dist.get_world_size() if self.rank == 0 else None
dist.gather_object(
obj=self.rank,
object_gather_list=output)
if self.rank == 0:
for i, v in enumerate(output):
self.assertEqual(i, v, f"rank: {self.rank}")
@with_comms()
def test_broadcast_object_list(self):
val = 99 if self.rank == 0 else None
object_list = [val] * dist.get_world_size()
# TODO test with broadcast_object_list's device argument
dist.broadcast_object_list(object_list=object_list)
self.assertEqual(99, object_list[0])
@with_comms()
def test_scatter_object_list(self):
input_list = list(range(dist.get_world_size())) if self.rank == 0 else None
output_list = [None]
dist.scatter_object_list(
scatter_object_output_list=output_list,
scatter_object_input_list=input_list)
self.assertEqual(self.rank, output_list[0])
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/test_c10d_object_collectives.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.cuda.nccl as nccl
import torch.cuda
import torch.distributed as c10d
from torch.testing._internal.common_utils import (TestCase, run_tests,
IS_WINDOWS, load_tests,
TEST_WITH_ROCM,
sandcastle_skip_if)
from torch.testing._internal.common_cuda import CUDA11OrLater, TEST_CUDA, TEST_MULTIGPU
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
import re
HIP_VERSION = 0.0 if torch.version.hip is None else float(re.search(r"^\d+\.\d+", torch.version.hip)[0])
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
nGPUs = torch.cuda.device_count()
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
datatypes = [torch.float]
if (TEST_CUDA and CUDA11OrLater and c10d.is_nccl_available() and nccl.version() >= (2, 10)) or TEST_WITH_ROCM:
datatypes.append(torch.bfloat16)
class TestNCCL(TestCase):
@sandcastle_skip_if(IS_WINDOWS, "NCCL doesn't support Windows")
def test_unique_id(self, device):
uid = nccl.unique_id()
self.assertIsInstance(uid, bytes)
self.assertGreater(len(uid), 1)
@sandcastle_skip_if(TEST_WITH_ROCM and HIP_VERSION < 3.5, 'Skip NCCL tests for ROCm')
@sandcastle_skip_if(IS_WINDOWS, "NCCL doesn't support Windows")
@sandcastle_skip_if(not TEST_MULTIGPU, "only one GPU detected")
@dtypes(*datatypes)
def test_broadcast(self, device, dtype):
expected = torch.zeros(128).uniform_().to(dtype=dtype)
tensors = [expected.cuda()]
for device in range(1, torch.cuda.device_count()):
tensors.append(torch.zeros(128, dtype=dtype, device=device))
nccl.broadcast(tensors)
for i in range(torch.cuda.device_count()):
self.assertEqual(tensors[i], expected)
# Test with tuple
tensors = [expected.cuda()]
for device in range(1, torch.cuda.device_count()):
tensors.append(torch.zeros(128, dtype=dtype, device=device))
nccl.broadcast(tuple(tensors))
for i in range(torch.cuda.device_count()):
self.assertEqual(tensors[i], expected)
@sandcastle_skip_if(TEST_WITH_ROCM and HIP_VERSION < 3.5, 'Skip NCCL tests for ROCm')
@sandcastle_skip_if(IS_WINDOWS, "NCCL doesn't support Windows")
@sandcastle_skip_if(not TEST_MULTIGPU, "only one GPU detected")
@dtypes(*datatypes)
def test_reduce(self, device, dtype):
cpu_tensors = [torch.zeros(128).uniform_().to(dtype=dtype) for i in range(nGPUs)]
expected = torch.zeros(128, dtype=dtype)
for t in cpu_tensors:
expected.add_(t)
tensors = [cpu_tensors[i].cuda(i) for i in range(nGPUs)]
nccl.reduce(tensors)
self.assertEqual(tensors[0], expected)
# Test with tuple
tensors = [cpu_tensors[i].cuda(i) for i in range(nGPUs)]
nccl.reduce(tuple(tensors))
self.assertEqual(tensors[0], expected)
@sandcastle_skip_if(IS_WINDOWS, "NCCL doesn't support Windows")
@sandcastle_skip_if(not TEST_MULTIGPU, "only one GPU detected")
@sandcastle_skip_if(TEST_WITH_ROCM and HIP_VERSION < 3.5 and dtype == torch.bfloat16, "Skip bfloat16 test for ROCm < 3.5")
@dtypes(*datatypes)
def test_all_reduce(self, device, dtype):
cpu_tensors = [torch.zeros(128).uniform_().to(dtype=dtype) for i in range(nGPUs)]
expected = torch.zeros(128, dtype=dtype)
for t in cpu_tensors:
expected.add_(t)
tensors = [cpu_tensors[i].cuda(i) for i in range(nGPUs)]
nccl.all_reduce(tensors)
for tensor in tensors:
self.assertEqual(tensor, expected)
# Test with tuple.
tensors = tuple(cpu_tensors[i].cuda(i) for i in range(nGPUs))
nccl.all_reduce(tensors)
for tensor in tensors:
self.assertEqual(tensor, expected)
# Test with set.
tensors = {cpu_tensors[i].cuda(i) for i in range(nGPUs)}
nccl.all_reduce(tensors)
for tensor in tensors:
self.assertEqual(tensor, expected)
@sandcastle_skip_if(TEST_WITH_ROCM and HIP_VERSION < 3.5, 'Skip NCCL tests for ROCm')
@sandcastle_skip_if(IS_WINDOWS, "NCCL doesn't support Windows")
def test_collective_errors(self, device):
t = torch.rand(10).cuda(0)
with self.assertRaisesRegex(TypeError, "Inputs should be a collection of tensors"):
nccl.all_reduce(t)
with self.assertRaisesRegex(TypeError, "Inputs should be a collection of tensors"):
nccl.reduce(t)
with self.assertRaisesRegex(TypeError, "Inputs should be a collection of tensors"):
nccl.broadcast(t)
with self.assertRaisesRegex(TypeError, "Inputs should be a collection of tensors"):
nccl.all_gather(t, t)
with self.assertRaisesRegex(TypeError, "Inputs should be a collection of tensors"):
nccl.reduce_scatter(t, t)
@sandcastle_skip_if(TEST_WITH_ROCM and HIP_VERSION < 3.5, 'Skip NCCL tests for ROCm')
@sandcastle_skip_if(IS_WINDOWS, "NCCL doesn't support Windows")
@sandcastle_skip_if(not TEST_MULTIGPU, "only one GPU detected")
@dtypes(*datatypes)
def test_all_gather(self, device, dtype):
cpu_inputs = [torch.zeros(128).uniform_().to(dtype=dtype) for i in range(nGPUs)]
expected = torch.cat(cpu_inputs, 0)
inputs = [cpu_inputs[i].cuda(i) for i in range(nGPUs)]
outputs = [torch.zeros(128 * nGPUs, device=i, dtype=dtype)
for i in range(nGPUs)]
nccl.all_gather(inputs, outputs)
for tensor in outputs:
self.assertEqual(tensor, expected)
# Test with tuple.
inputs = [cpu_inputs[i].cuda(i) for i in range(nGPUs)]
outputs = [torch.zeros(128 * nGPUs, device=i, dtype=dtype)
for i in range(nGPUs)]
nccl.all_gather(tuple(inputs), tuple(outputs))
for tensor in outputs:
self.assertEqual(tensor, expected)
@sandcastle_skip_if(TEST_WITH_ROCM and HIP_VERSION < 3.5, 'Skip NCCL tests for ROCm')
@sandcastle_skip_if(IS_WINDOWS, "NCCL doesn't support Windows")
@sandcastle_skip_if(not TEST_MULTIGPU, "only one GPU detected")
@dtypes(*datatypes)
def test_reduce_scatter(self, device, dtype):
in_size = 32 * nGPUs
out_size = 32
cpu_inputs = [torch.zeros(in_size).uniform_().to(dtype=dtype) for i in range(nGPUs)]
expected = torch.zeros(in_size, dtype=dtype)
for t in cpu_inputs:
expected.add_(t)
expected = expected.view(nGPUs, 32)
inputs = [cpu_inputs[i].cuda(i) for i in range(nGPUs)]
outputs = [torch.zeros(out_size, device=i, dtype=dtype)
for i in range(nGPUs)]
nccl.reduce_scatter(inputs, outputs)
for i in range(nGPUs):
self.assertEqual(outputs[i], expected[i])
# Test with tuple
inputs = [cpu_inputs[i].cuda(i) for i in range(nGPUs)]
outputs = [torch.zeros(out_size, device=i, dtype=dtype)
for i in range(nGPUs)]
nccl.reduce_scatter(tuple(inputs), tuple(outputs))
for i in range(nGPUs):
self.assertEqual(outputs[i], expected[i])
instantiate_device_type_tests(TestNCCL, globals(), only_for='cuda')
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/distributed/test_nccl.py
|
# Owner(s): ["oncall: distributed"]
import contextlib
import io
from copy import deepcopy
from collections import OrderedDict
from itertools import product
import functools
import torch
from torch import nn
from torch.cuda.amp import autocast
import torch.nn.parallel as dp
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_utils import sandcastle_skip_if
import torch.nn.functional as F
torch.set_default_dtype(torch.double)
NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL")
# batched grad doesn't support data parallel
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
_assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False)
class TestDataParallel(TestCase):
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_buffers_requiring_grad(self):
class TestModule(nn.Module):
def __init__(self, t):
super(TestModule, self).__init__()
self.register_buffer('t_rg', t)
self.register_buffer('t_not_rg', t.clone().detach())
def forward(self, x):
return x * self.t_rg + self.t_not_rg
m = TestModule(torch.randn(100, device='cuda', requires_grad=True))
self.assertTrue(m.t_rg.requires_grad)
dpm = nn.DataParallel(m, [0, 1])
inp = torch.randn(2, 100, device='cuda')
def fn(t):
return dpm(inp)
gradcheck(fn, (m.t_rg,))
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_rnn(self):
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.rnn = torch.nn.LSTM(300, 1024, 1, batch_first=True, bidirectional=True)
def forward(self, x):
self.rnn.flatten_parameters()
return self.rnn(x)
def step(model):
opt = torch.optim.SGD(model.parameters(), lr=10)
input = torch.ones(4, 4, 300).to(0)
output = model(input)
loss = F.mse_loss(output[0], torch.zeros_like(output[0]))
loss.backward()
opt.step()
with torch.no_grad():
model = TestModule().to(0)
model_dp = torch.nn.DataParallel(deepcopy(model))
# make sure DP does not crash when grad is disabled.
# See #21108
model_dp(torch.rand(2, 4, 300).to(0))
step(model)
step(model_dp)
for p1, p2 in zip(model.parameters(), model_dp.parameters()):
self.assertTrue(p1.allclose(p2))
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_lazy_linear(self):
with self.assertRaisesRegex(RuntimeError, 'Modules with uninitialized parameters'):
model_dp = torch.nn.DataParallel(torch.nn.LazyLinear(10).to(0))
model_dp(torch.rand(10, 10).to(0))
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_parallel_apply(self):
l1 = nn.Linear(10, 5).to("cuda:0", torch.float)
l2 = nn.Linear(10, 5).to("cuda:1", torch.float)
i1 = torch.randn(2, 10, device="cuda:0", dtype=torch.float)
i2 = torch.randn(2, 10, device="cuda:1", dtype=torch.float)
expected1 = l1(i1)
expected2 = l2(i2)
modules = (l1, l2)
expected_outputs = (expected1, expected2)
# each input can be either a collection of positional arguments
# or an object representing the single argument
for inputs in [((i1,), (i2,)), (i1, i2)]:
outputs = dp.parallel_apply(modules, inputs, None)
for out, expected in zip(outputs, expected_outputs):
self.assertEqual(out, expected)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_parallel_apply_autocast(self):
l1 = nn.Linear(10, 5).to("cuda:0", torch.float)
l2 = nn.Linear(10, 5).to("cuda:1", torch.float)
i1 = torch.randn(2, 10, device="cuda:0", dtype=torch.float)
i2 = torch.randn(2, 10, device="cuda:1", dtype=torch.float)
with autocast():
expected1 = l1(i1)
expected2 = l2(i2)
modules = (l1, l2)
expected_outputs = (expected1, expected2)
# each input can be either a collection of positional arguments
# or an object representing the single argument
for inputs in [((i1,), (i2,)), (i1, i2)]:
with autocast():
outputs = dp.parallel_apply(modules, inputs, None)
for out, expected in zip(outputs, expected_outputs):
self.assertEqual(out, expected)
@sandcastle_skip_if(not TEST_CUDA, "CUDA unavailable")
def test_parallel_apply_passes_exception(self):
# we define and instantiate a module that will throw a KeyError
class TestModule(nn.Module):
def forward(self, *args):
return {}['wonderful']
l1 = TestModule().to("cuda", torch.float)
# and check that parallel_apply passes on the exception
# (we can use a single device twice for this test)
with self.assertRaisesRegex(KeyError,
'Caught KeyError in replica \\d '
'on device 0.\nOriginal Traceback'
'[\\s\\S]+wonderful'):
dp.parallel_apply(modules=(l1, l1), inputs=(None, None))
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_multiple_input(self):
class TestModule(nn.Module):
def forward(self, var1, var2, float1, var3=None):
if var3 is None:
return float1 * (var1 * var2)
else:
return float1 * (var1 * var2 + var3)
m = TestModule()
var1 = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
var2 = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
var3 = torch.randn(5, 5, dtype=torch.float, requires_grad=False)
float1 = torch.randn(1).item()
expected = m(var1, var2, float1)
loss = expected.sum()
loss.backward()
gvar1_exp = var1.grad.clone()
gvar2_exp = var2.grad.clone()
def local_test(out):
with torch.no_grad():
var1.grad.fill_(0.0)
var2.grad.fill_(0.0)
loss = out.sum()
loss.backward()
self.assertEqual(out, expected)
self.assertEqual(gvar1_exp, var1.grad)
self.assertEqual(gvar2_exp, var2.grad)
out = dp.data_parallel(m, (var1, var2, float1), (0, 1))
local_test(out)
out = dp.data_parallel(m, (var1, var2, float1), (1, 0))
local_test(out)
out = dp.data_parallel(m, (var1, var2, float1), (0,))
local_test(out)
with torch.no_grad():
var1.grad.fill_(0.0)
var2.grad.fill_(0.0)
expected = m(var1, var2, float1, var3=var3)
loss = expected.sum()
loss.backward()
gvar1_exp = var1.grad.clone()
gvar2_exp = var2.grad.clone()
dpm = nn.DataParallel(TestModule())
out = dpm(var1, var2, float1, var3=var3)
local_test(out)
dpm = nn.DataParallel(TestModule(), device_ids=[0])
out = dpm(var1, var2, float1, var3=var3)
local_test(out)
kwarg_wrap = {'var3': var3}
out = dp.data_parallel(
m, (var1, var2, float1), (0, 1), module_kwargs=kwarg_wrap)
local_test(out)
out = dp.data_parallel(
m, (var1, var2, float1), (0,), module_kwargs=kwarg_wrap)
local_test(out)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_small_back(self):
l = nn.Linear(10, 5).float().cuda()
i = torch.randn(20, 10, dtype=torch.float, device="cuda")
out = dp.data_parallel(l, i, (0, 1))
self.assertEqual(out, l(i))
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_model_device(self):
r"""Test device[0] check at forward time.
"""
l = nn.Linear(2, 2)
inp = torch.randn(2, 2)
inp_cuda0 = inp.cuda(0)
inp_cuda1 = inp.cuda(1)
error_msg = "module must have its parameters and buffers on device {}"
@contextlib.contextmanager
def dummy_ctx_manager():
yield
def test(inner_m, dp_device, inp, device_ids, should_fail):
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if isinstance(device_ids[0], torch.device):
expect_device = device_ids[0]
else:
expect_device = torch.device("cuda:{}".format(device_ids[0]))
if should_fail:
def assert_correct():
return self.assertRaisesRegex(RuntimeError, error_msg.format(expect_device))
else:
assert_correct = dummy_ctx_manager
# test DataParallel module
dpm = nn.DataParallel(inner_m, device_ids)
if dp_device is not None:
dpm = dpm.to(dp_device)
with assert_correct():
dpm(inp)
# test functional
with assert_correct():
nn.parallel.data_parallel(inner_m.to(dp_device), inp, device_ids)
test(l.to('cpu'), None, inp, None, should_fail=True)
test(l.cuda(1), None, inp_cuda0, None, should_fail=True)
test(l.cuda(), None, inp_cuda0, [1, 0], should_fail=True)
test(l.cuda(), None, inp_cuda0, None, should_fail=False)
test(l.cpu(), 'cuda', inp_cuda0, None, should_fail=False)
test(l.cuda(1), None, inp_cuda1, [1, 0], should_fail=False)
test(l.cpu(), 'cuda:1', inp_cuda1, [1, 0], should_fail=False)
s = nn.Sequential(l.cpu())
test(s, None, inp, None, should_fail=True)
test(s, None, inp, [0, 1], should_fail=True)
test(s, None, inp, [1, 0], should_fail=True)
s = nn.Sequential(deepcopy(l).cpu(), l.cuda())
test(s, None, inp, None, should_fail=True)
test(s, None, inp, [0, 1], should_fail=True)
test(s, None, inp, [1, 0], should_fail=True)
s = nn.Sequential(l.cuda(), deepcopy(l).cuda(1))
test(s, None, inp, None, should_fail=True)
test(s, None, inp, [0, 1], should_fail=True)
test(s, None, inp, [1, 0], should_fail=True)
s = nn.Sequential(l.cuda(), deepcopy(l).cuda())
test(s, None, inp, None, should_fail=False)
test(s, None, inp, [0, 1], should_fail=False)
test(s, None, inp, [1, 0], should_fail=True)
test(s.cpu(), None, inp, [1, 0], should_fail=True)
test(s.cuda(1), None, inp, [1, 0], should_fail=False)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_model_no_refcycles(self):
# Python 2.7 will create reference cycles with the following
# Module on multiple GPUs, but Python 3 shouldn't unless
# there are refcycles on the PyTorch side (or the defined module)
import gc
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
return self.linear(x)
gc.collect()
model = nn.DataParallel(Model().cuda())
data = torch.randn(1, device="cuda")
model(data)
refcycles = gc.collect()
self.assertEqual(refcycles, 0)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_no_grad(self):
test = self
class Layer(nn.Module):
def forward(self, x):
test.assertFalse(torch.is_grad_enabled())
return x
l = Layer()
i = torch.randn(20, 10, dtype=torch.float, device="cuda")
with torch.no_grad():
dp.data_parallel(l, i, (0, 1))
self.assertRaises(AssertionError, lambda: dp.data_parallel(l, i, (0, 1)))
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel(self):
l = nn.Linear(10, 5).float().cuda()
i = torch.randn(20, 10, dtype=torch.float, device="cuda:1")
l.cuda(1)
expected_out = l(i)
loss = expected_out.sum()
loss.backward()
expected_grads = []
for param in l.parameters():
expected_grads.append(param.grad.clone())
dev_ids_list = [(0, 1), (1, 0)]
for dev_id in dev_ids_list:
with torch.cuda.device(dev_id[0]):
l.cuda()
l.zero_grad()
out = dp.data_parallel(l, i, dev_id)
loss = out.sum()
loss.backward()
self.assertEqual(out.get_device(), dev_id[0])
self.assertEqual(out, expected_out)
for expected, param in zip(expected_grads, l.parameters()):
self.assertEqual(param.grad, expected)
# Check for None device_ids
l = l.cuda()
out = dp.data_parallel(l, i)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_sparse(self):
l = nn.Embedding(10, 5, sparse=True).to("cuda:1")
i = torch.randint(10, (20, 5), device="cuda:1", dtype=torch.long)
expected_out = l(i)
loss = expected_out.sum()
loss.backward()
expected_grads = []
for param in l.parameters():
expected_grads.append(param.grad.clone())
dev_ids_list = [(0, 1), (1, 0)]
for dev_id in dev_ids_list:
with torch.cuda.device(dev_id[0]):
l.cuda()
l.zero_grad()
out = dp.data_parallel(l, i, dev_id)
loss = out.sum()
loss.backward()
self.assertEqual(out.get_device(), dev_id[0])
self.assertEqual(out, expected_out)
for expected, param in zip(expected_grads, l.parameters()):
self.assertEqual(param.grad.coalesce(), expected.coalesce())
# Check for None device_ids
l = l.cuda()
out = dp.data_parallel(l, i)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_nested_output(self):
def fn(input):
return [
input, (input.sin(), input.cos(), [input.add(1)]), input,
OrderedDict(a=input, b=[input.sin()])
]
class Net(nn.Module):
def forward(self, input):
return fn(input)
i = torch.randn(2, 2).float().cuda(1)
gpus = range(torch.cuda.device_count())
output = dp.data_parallel(Net(), i, gpus)
self.assertEqual(output, fn(i))
self.assertIsInstance(output[0], torch.Tensor)
self.assertIsInstance(output[1], tuple)
self.assertIsInstance(output[1][0], torch.Tensor)
self.assertIsInstance(output[1][1], torch.Tensor)
self.assertIsInstance(output[1][2], list)
self.assertIsInstance(output[1][2][0], torch.Tensor)
self.assertIsInstance(output[2], torch.Tensor)
self.assertIsInstance(output[3], dict)
self.assertEqual(len(output[3]), 2)
self.assertIn('a', output[3])
self.assertIn('b', output[3])
self.assertIsInstance(output[3]['a'], torch.Tensor)
self.assertIsInstance(output[3]['b'], list)
self.assertIsInstance(output[3]['b'][0], torch.Tensor)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_nested_input(self):
def fn(input):
return input[1][0]
class Net(nn.Module):
def forward(self, *input):
return fn(input)
i = torch.randn(20, 3, dtype=torch.float, device="cuda:1")
input = (i.cos(), (i.sin(), i), i.sin())
gpus = range(torch.cuda.device_count())
output = dp.data_parallel(Net(), input, gpus)
self.assertEqual(output, fn(input))
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_module_zero_inputs(self):
class TestModule(nn.Module):
def forward(self):
t = torch.eye(2, 3, device='cuda:0')
return t + (1 - t)
def test_helper(output, expected):
self.assertEqual(output.get_device(), 0)
self.assertEqual(output, expected)
expected = torch.ones(2, 3, device='cuda:0')
model = TestModule()
test_helper(nn.DataParallel(model, [0])(), expected)
test_helper(nn.DataParallel(model, [0, 1])(), expected)
test_helper(dp.data_parallel(model, None, [0]), expected)
test_helper(dp.data_parallel(model, (), [0, 1]), expected)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_device_args(self):
cuda0 = torch.device('cuda:0')
cuda1 = torch.device('cuda:1')
# test output_device
l = nn.Linear(10, 5).to(cuda0, torch.float)
i = torch.randn(20, 10, dtype=torch.float, device=cuda0, requires_grad=True)
out = dp.data_parallel(l, i, device_ids=(0, 1), output_device=cuda0)
self.assertEqual(out, l(i))
# test device_ids
l = nn.Linear(10, 5).to(cuda0, torch.float)
i = torch.randn(20, 10, dtype=torch.float, device=cuda0, requires_grad=True)
out = dp.data_parallel(l, i, device_ids=(cuda0, cuda1), output_device=cuda0)
self.assertEqual(out, l(i))
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_function_deletion(self):
# this test case is originated from #16532
def gradient_penalty(net, x):
output = net(x)
loss = torch.autograd.grad(
outputs=output, inputs=x,
grad_outputs=x.new_ones(output.size()),
create_graph=True, retain_graph=True)[0].mean()
return loss
net = nn.Linear(4, 1).cuda()
dpn = nn.DataParallel(net, [0, 1])
x = torch.ones(2, 4, requires_grad=True).cuda()
dpn.zero_grad()
loss = gradient_penalty(dpn, x)
loss.backward()
grads = [p.grad for p in net.parameters()]
self.assertEqual(2, len(grads))
self.assertEqual(
torch.tensor([[0.25, 0.25, 0.25, 0.25]], device='cuda:0'),
grads[0])
self.assertEqual(torch.tensor([0.0], device='cuda:0'), grads[1])
def _test_scatter(self, tensor):
x = tensor.detach().requires_grad_()
result = dp.scatter(x, (0, 1))
self.assertEqual(len(result), 2)
self.assertEqual(result[0], x[:2])
self.assertEqual(result[0].get_device(), 0)
self.assertEqual(result[1], x[2:])
self.assertEqual(result[1].get_device(), 1)
grad = result[0].detach().clone().fill_(2)
result[0].backward(grad)
self.assertEqual(x.grad[:2], grad)
self.assertEqual(x.grad[2:], grad.clone().zero_())
_assertGradAndGradgradChecks(self, lambda y: dp.scatter(y, (0, 1)), (x,))
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_scatter_cpu(self):
self._test_scatter(torch.randn((4, 4)))
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_scatter_gpu(self):
self._test_scatter(torch.randn((4, 4)).cuda())
@sandcastle_skip_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@sandcastle_skip_if(NO_NCCL, "NCCL needed")
def test_data_parallel_complex(self):
# We expect complex parameters to be broadcast by view_as_real, e.g. move from C to R^2
class Cplx(torch.nn.Module):
def __init__(self):
super().__init__()
self.cplx = torch.nn.Parameter(torch.zeros(1, 10, dtype=torch.cfloat).cuda())
def forward(self, x):
return x + self.cplx
cplx = torch.nn.DataParallel(Cplx().cuda())
input = torch.rand(1, 10, dtype=torch.cfloat).cuda()
result = cplx(input)
# 2 is the extra real view dimension here
self.assertEqual(result.size(), torch.Size([1, 10, 2]))
self.assertEqual(result, torch.view_as_real(input))
def _test_gather(self, output_device):
inputs = (
torch.randn(2, 4, device='cuda:0', requires_grad=True),
torch.randn(2, 4, device='cuda:1', requires_grad=True),
)
result = dp.gather(inputs, output_device)
self.assertEqual(result.size(), torch.Size([4, 4]))
self.assertEqual(result[:2], inputs[0])
self.assertEqual(result[2:], inputs[1])
if output_device != -1:
self.assertEqual(result.get_device(), output_device)
else:
self.assertFalse(result.is_cuda)
grad = torch.randn((4, 4))
if output_device != -1:
grad = grad.cuda(output_device)
result.backward(grad)
self.assertEqual(inputs[0].grad, grad[:2])
self.assertEqual(inputs[1].grad, grad[2:])
_assertGradAndGradgradChecks(self, lambda x, y: dp.gather((x, y), output_device), inputs)
# test scalar inputs, should stack into a vector in this case
inputs = (
torch.randn((), device='cuda:0', requires_grad=True),
torch.randn((), device='cuda:1', requires_grad=True),
)
result = dp.gather(inputs, output_device)
self.assertEqual(result.size(), torch.Size([2]))
self.assertEqual(result[0], inputs[0])
self.assertEqual(result[1], inputs[1])
if output_device != -1:
self.assertEqual(result.get_device(), output_device)
else:
self.assertFalse(result.is_cuda)
grad = torch.randn(2)
if output_device != -1:
grad = grad.cuda(output_device)
result.backward(grad)
self.assertEqual(inputs[0].grad, grad[0])
self.assertEqual(inputs[1].grad, grad[1])
_assertGradAndGradgradChecks(self, lambda x, y: dp.gather((x, y), output_device), inputs)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_gather_cpu(self):
self._test_gather(-1)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_gather_gpu(self):
self._test_gather(0)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_gather_different_len_dicts(self):
inputs = (
{'a': torch.randn(1, 2, requires_grad=True, device="cuda:0")},
{
'b': torch.randn(1, 2, requires_grad=True, device="cuda:1"),
'a': torch.randn(1, 2, requires_grad=True, device="cuda:1"),
}
)
with self.assertRaises(ValueError):
_ = dp.gather(inputs, target_device=0)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_replicate(self):
module = nn.Linear(10, 5).float().cuda()
input = torch.randn(2, 10, dtype=torch.float, device="cuda")
expected_output = module(input)
for devices in [(0, 1), [0, 1]]:
replicas = dp.replicate(module, devices)
for i, replica in enumerate(replicas):
for p in replica.parameters():
self.assertEqual(p.get_device(), i)
replica_input = input.cuda(i)
self.assertEqual(replica(replica_input), expected_output)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_replicate_buffers(self):
net = nn.Module()
net.bn = nn.BatchNorm2d(10)
net.cuda()
for devices in [(0, 1), [0, 1]]:
replicas = dp.replicate(net, devices)
for i, replica in enumerate(replicas):
self.assertEqual(replica.bn.running_mean.get_device(), i, msg='buffer on wrong device')
self.assertEqual(replica.bn.running_var.get_device(), i, msg='buffer on wrong device')
self.assertEqual(replica.bn.num_batches_tracked.get_device(), i, msg='buffer on wrong device')
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_zero_grad(self):
# zero_grad should warn about using gradients inside forward
class Net(torch.nn.Module):
def __init__(self, testcase):
super(Net, self).__init__()
self._testcase = testcase
def forward(self, x):
with self._testcase.assertWarnsRegex(
UserWarning,
r"Calling \.zero_grad\(\) from a module created with nn\.DataParallel\(\) has no effect."):
self.zero_grad()
return x
module = Net(self).cuda()
dpm = dp.DataParallel(module)
dpm(torch.rand(4, 3, 6, 5))
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_autocast(self):
class Model(torch.nn.Linear):
def __init__(self):
super(Model, self).__init__(8, 8)
@torch.cuda.amp.autocast()
def forward(self, input):
return super(Model, self).forward(input)
model = dp.DataParallel(Model().cuda().to(dtype=torch.float32))
input = torch.randn((8, 8), dtype=torch.float32, device="cuda")
self.assertTrue(model(input).dtype is torch.float16)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_save_replica_module(self):
# DataParallel replicas can be saved (gh-37182)
module = torch.nn.Linear(8, 8).cuda()
dpm = torch.nn.parallel.replicate(module, devices=[0, 1], detach=False)
data = io.BytesIO()
torch.save(dpm, data)
dpm = torch.nn.parallel.replicate(module, devices=[0, 1], detach=True)
torch.save(dpm, data)
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_strided_grad_layout(self):
class ConvNet(nn.Module):
def __init__(self, layouts, dtype_list):
super(ConvNet, self).__init__()
self.dtypes = dtype_list
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(memory_format=layouts[0], dtype=dtype_list[0])
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(memory_format=layouts[1], dtype=dtype_list[1])
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(memory_format=layouts[2], dtype=dtype_list[2])
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(memory_format=layouts[3], dtype=dtype_list[3])
def forward(self, x):
x = x.to(self.dtypes[0])
x = self.conv0(x).to(self.dtypes[1])
x = self.conv1(x).to(self.dtypes[2])
x = self.conv2(x).to(self.dtypes[3])
x = self.conv3(x)
return x
layer_formats = ([torch.contiguous_format] * 4,
[torch.channels_last] * 2 + [torch.contiguous_format] * 2,
[torch.channels_last] * 4,)
layer_dtypes = ([torch.float] * 4,
[torch.float] * 2 + [torch.half] * 2,
[torch.half] * 4,)
ndevs = torch.cuda.device_count()
input = torch.randn(ndevs * 8, 8, 8, 8, device="cuda:0", dtype=torch.float)
target = torch.randn(ndevs * 8, 8, 4, 4, device="cuda:0", dtype=torch.float)
device_ids = list(range(ndevs))
with torch.backends.cudnn.flags(enabled=True, deterministic=True, benchmark=False):
for formats, dtype_list in product(layer_formats, layer_dtypes):
model_msg = "formats = {} dtypes = {}".format(formats, dtypes)
try:
m = ConvNet(formats, dtype_list).cuda(device="cuda:0")
m_dp = dp.DataParallel(deepcopy(m), device_ids=device_ids)
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_dp = torch.optim.SGD(m_dp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
tol = 1.e-3 if has_half else 1.e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
print("Caught exception during model creation for " + model_msg, flush=True)
raise
# 2 iters: First iter creates grads, second iter tries zeroed grads.
for it in range(2):
iter_msg = "iter = {} ".format(it) + model_msg
named_msg = iter_msg
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(m_dp(input).float(), target).backward()
for i, ((layer_name, m_child), m_dp_child) in enumerate(zip(m.named_children(),
m_dp.module.children())):
named_msg = layer_name + ".weight " + iter_msg
self.assertTrue(m_child.weight.grad.is_contiguous(memory_format=formats[i]), named_msg)
self.assertTrue(m_dp_child.weight.grad.is_contiguous(memory_format=formats[i]), named_msg)
for j, ((param_name, p), p_dp) in enumerate(zip(m_child.named_parameters(),
m_dp_child.parameters())):
named_msg = layer_name + "." + param_name + " " + iter_msg
self.assertEqual(p.grad, p_dp.grad, rtol=tol, atol=tol)
opt.step()
opt_dp.step()
opt.zero_grad()
opt_dp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
print("Caught exception during iterations at " + named_msg, flush=True)
raise
@sandcastle_skip_if(not TEST_MULTIGPU, "multi-GPU not supported")
def test_parameter_list_dict_replica(self):
class MyMod(torch.nn.Module):
def __init__(self, data, check_fn):
super(MyMod, self).__init__()
self.data = data
self.check_fn = check_fn
def forward(self, inp):
self.check_fn(self)
return inp
p1 = torch.nn.Parameter(torch.rand(10))
p2 = torch.nn.Parameter(torch.rand(10))
key0 = 0
key1 = 1
def check_fn(self_):
self.assertEqual(p1, self_.data[key0])
self.assertEqual(p2, self_.data[key1])
self.assertTrue(self_.data[key0].requires_grad)
self.assertTrue(self_.data[key1].requires_grad)
self.assertIsNotNone(self_.data[key0].grad_fn)
self.assertIsNotNone(self_.data[key1].grad_fn)
module = MyMod(torch.nn.ParameterList([p1, p2]), check_fn).cuda()
model = dp.DataParallel(module)
input = torch.randn((8, 8), device="cuda")
# Runs the check_fn
model(input)
key0 = "0"
key1 = "1"
module = MyMod(torch.nn.ParameterDict({"0": p1, "1": p2}), check_fn).cuda()
model = dp.DataParallel(module)
input = torch.randn((8, 8), device="cuda")
# Runs the check_fn
model(input)
class TestDataParallelDeviceType(TestCase):
@onlyCUDA
@skipMeta
@dtypes(torch.float, torch.double, torch.half)
def test_data_parallel_module(self, device, dtype):
l = nn.Linear(10, 5).to(device, dtype)
i = torch.randn(20, 10, device=device, dtype=dtype)
expected_out = l(i)
net = nn.DataParallel(l)
out = net(i)
self.assertEqual(out.get_device(), 0)
self.assertEqual(out, expected_out, atol=dtype2prec_DONTUSE[dtype], rtol=0)
@onlyCUDA
@skipMeta
@dtypes(torch.float, torch.double, torch.half)
def test_data_parallel_module_kwargs_only(self, device, dtype):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l = l
def forward(self, input):
return self.l(input)
l = nn.Linear(10, 5).to(device, dtype)
i = torch.randn(20, 10, device=device, dtype=dtype)
expected_out = l(i)
n = nn.DataParallel(Net())
out = n(input=i)
self.assertEqual(out.get_device(), 0)
self.assertEqual(out, expected_out, atol=dtype2prec_DONTUSE[dtype], rtol=0)
@onlyCUDA
@skipMeta
@dtypes(torch.float, torch.double, torch.half)
def test_data_parallel_module_kwargs_only_empty_list(self, device, dtype):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l = l
def forward(self, input):
return self.l(input['data'])
l = nn.Linear(10, 5).to(device, dtype)
i = torch.randn(20, 10, device=device, dtype=dtype)
expected_out = l(i)
n = nn.DataParallel(Net())
out = n(input={'data': i, 'unused': []})
self.assertEqual(out.get_device(), 0)
self.assertEqual(out, expected_out, atol=dtype2prec_DONTUSE[dtype], rtol=0)
@onlyCUDA
@skipMeta
@dtypes(torch.float, torch.double, torch.half)
def test_data_parallel_module_kwargs_only_empty_dict(self, device, dtype):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l = l
def forward(self, input):
return self.l(input['data'])
l = nn.Linear(10, 5).to(device, dtype)
i = torch.randn(20, 10, device=device, dtype=dtype)
expected_out = l(i)
n = nn.DataParallel(Net())
out = n(input={'data': i, 'unused': {}})
self.assertEqual(out.get_device(), 0)
self.assertEqual(out, expected_out, atol=dtype2prec_DONTUSE[dtype], rtol=0)
@onlyCUDA
@skipMeta
@dtypes(torch.float, torch.double, torch.half)
def test_data_parallel_module_kwargs_only_empty_tuple(self, device, dtype):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l = l
def forward(self, input):
return self.l(input['data'])
l = nn.Linear(10, 5).to(device, dtype)
i = torch.randn(20, 10, device=device, dtype=dtype)
expected_out = l(i)
n = nn.DataParallel(Net())
out = n(input={'data': i, 'unused': ()})
self.assertEqual(out.get_device(), 0)
self.assertEqual(out, expected_out, atol=dtype2prec_DONTUSE[dtype], rtol=0)
instantiate_device_type_tests(TestDataParallelDeviceType, globals())
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/distributed/test_data_parallel.py
|
# Owner(s): ["oncall: distributed"]
import os
import sys
import tempfile
import time
from datetime import timedelta
from sys import platform
import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc
if not dist.is_available():
print("torch.distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.testing._internal.common_utils as common
from torch._six import string_classes
from torch.testing._internal.common_distributed import (
skip_if_win32,
create_tcp_store
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
retry_on_connect_failures,
ADDRESS_IN_USE,
CONNECT_TIMEOUT,
)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if platform == "darwin":
LOOPBACK = "lo0"
else:
LOOPBACK = "lo"
DEFAULT_HOSTNAME = "localhost"
torch.backends.cuda.matmul.allow_tf32 = False
def gpus_for_rank(world_size):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process: (rank + 1) * gpus_per_process]
)
return gpus_for_rank
class StoreTestBase(object):
def _create_store(self, i):
raise RuntimeError("not implemented")
def _test_set_get(self, fs):
fs.add("key", 1)
fs.add("key", 2)
fs.add("key", 3)
fs.set("key0", "value0")
fs.add("key3", 1)
fs.set("key1", "value1")
fs.add("key3", 2)
fs.set("key2", "value2")
fs.add("key3", 3)
fs.add("key3", 4)
fs.add("key3", 5)
fs.add("key3", 6)
self.assertEqual(fs.num_keys(), self.num_keys_total)
self.assertEqual(b"6", fs.get("key"))
self.assertEqual(b"value0", fs.get("key0"))
self.assertEqual(b"value1", fs.get("key1"))
self.assertEqual(b"value2", fs.get("key2"))
self.assertEqual(b"21", fs.get("key3"))
fs.set("-key3", "7")
self.assertEqual(b"7", fs.get("-key3"))
fs.delete_key("-key3")
self.assertEqual(fs.num_keys(), self.num_keys_total)
def test_set_get(self):
self._test_set_get(self._create_store())
def _test_compare_set(self, store):
missing_key_result = store.compare_set("cs_key0", "wrong_old_value", "new_value0")
self.assertEqual(b"wrong_old_value", missing_key_result)
store.set("cs_key0", "value0")
self.assertEqual(b"value0", store.get("cs_key0"))
old_value_result = store.compare_set("cs_key0", "wrong_old_value", "new_value0")
self.assertEqual(b"value0", old_value_result)
self.assertEqual(b"value0", store.get("cs_key0"))
new_value_result = store.compare_set("cs_key0", "value0", "new_value0")
self.assertEqual(b"new_value0", new_value_result)
self.assertEqual(b"new_value0", store.get("cs_key0"))
empty_old_value_result = store.compare_set("cs_key1", "", "new_value1")
self.assertEqual(b"new_value1", empty_old_value_result)
self.assertEqual(b"new_value1", store.get("cs_key1"))
def test_compare_set(self):
self._test_compare_set(self._create_store())
# This is the number of keys used in test_set_get. Adding this as a class
# property instead of hardcoding in the test since some Store
# implementations will have differing number of keys. In the base case,
# there will be 5 keys: key, key0, key1, key2, key3.
@property
def num_keys_total(self):
return 5
class FileStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(FileStoreTest, self).setUp()
self.file = tempfile.NamedTemporaryFile(delete=False)
def _create_store(self):
store = dist.FileStore(self.file.name, 1)
store.set_timeout(timedelta(seconds=300))
return store
@skip_if_win32()
class HashStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(HashStoreTest, self).setUp()
def _create_store(self):
store = dist.HashStore()
store.set_timeout(timedelta(seconds=300))
return store
class PrefixFileStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(PrefixFileStoreTest, self).setUp()
self.file = tempfile.NamedTemporaryFile(delete=False)
self.filestore = dist.FileStore(self.file.name, 1)
self.prefix = "test_prefix"
self.filestore.set_timeout(timedelta(seconds=300))
def _create_store(self):
return dist.PrefixStore(self.prefix, self.filestore)
class TCPStoreTest(TestCase, StoreTestBase):
def _create_store(self):
store = create_tcp_store()
store.set_timeout(timedelta(seconds=300))
return store
def test_address_already_in_use(self):
err_msg_reg = "^The server socket has failed to listen on any local "
with self.assertRaisesRegex(RuntimeError, err_msg_reg):
addr = DEFAULT_HOSTNAME
port = common.find_free_port()
# Use noqa to silence flake8.
# Need to store in an unused variable here to ensure the first
# object is not destroyed before the second object is created.
store1 = dist.TCPStore(addr, port, 1, True) # noqa: F841
store2 = dist.TCPStore(addr, port, 1, True) # noqa: F841
@retry_on_connect_failures
def test_multitenancy(self):
addr = DEFAULT_HOSTNAME
port = common.find_free_port()
# Use noqa to silence flake8.
# Need to store in an unused variable here to ensure the first
# object is not destroyed before the second object is created.
store1 = dist.TCPStore(addr, port, 1, True, multi_tenant=True) # type: ignore[call-arg] # noqa: F841
store2 = dist.TCPStore(addr, port, 1, True, multi_tenant=True) # type: ignore[call-arg] # noqa: F841
@skip_if_win32()
@retry_on_connect_failures
def test_init_pg_and_rpc_with_same_socket(self):
addr = DEFAULT_HOSTNAME
port = common.find_free_port()
os.environ["MASTER_ADDR"] = addr
os.environ["MASTER_PORT"] = str(port)
# We internally use a multi-tenant TCP store. Both PG and RPC should successfully
# initialize even when using the same socket address.
dist.init_process_group(
backend="gloo",
init_method="env://",
rank=0,
world_size=1,
)
backend_opts = rpc.TensorPipeRpcBackendOptions(
init_method=f"tcp://{addr}:{port}"
)
rpc.init_rpc(
name="worker0",
rank=0,
world_size=1,
rpc_backend_options=backend_opts,
)
rpc.shutdown()
# The TCPStore has 6 keys in test_set_get. It contains the 5 keys added by
# the user and one additional key used for coordinate all the workers.
@property
def num_keys_total(self):
return 6
def _test_numkeys_delkeys(self, fs):
# We start off with one init key in the store to coordinate workers
self.assertEqual(fs.num_keys(), 1)
fs.add("key", 1)
fs.add("key", 2)
fs.add("key", 3)
fs.set("key0", "value0")
fs.add("key3", 1)
fs.set("key1", "value1")
self.assertEqual(fs.num_keys(), 5)
fs.delete_key("key")
self.assertEqual(fs.num_keys(), 4)
fs.set_timeout(timedelta(seconds=2))
with self.assertRaises(RuntimeError):
fs.get("key")
fs.delete_key("key0")
fs.delete_key("key3")
self.assertEqual(fs.num_keys(), 2)
fs.set("key4", "value2")
self.assertEqual(fs.num_keys(), 3)
self.assertEqual(b"value1", fs.get("key1"))
self.assertEqual(b"value2", fs.get("key4"))
def test_numkeys_delkeys(self):
self._test_numkeys_delkeys(self._create_store())
def _create_client(self, index, addr, port, world_size):
client_store = dist.TCPStore(addr, port, world_size=world_size, timeout=timedelta(seconds=10))
self.assertEqual("value".encode(), client_store.get("key"))
client_store.set(f"new_key{index}", f"new_value{index}")
self.assertEqual(f"next_value{index}".encode(),
client_store.compare_set(f"new_key{index}", f"new_value{index}", f"next_value{index}"))
def _multi_worker_helper(self, world_size):
addr = DEFAULT_HOSTNAME
server_store = create_tcp_store(addr, world_size, wait_for_workers=False)
server_store.set("key", "value")
port = server_store.port
num_indices = world_size if world_size else 1
for i in range(num_indices):
self._create_client(i, addr, port, world_size)
def test_multi_worker_with_fixed_world_size(self):
self._multi_worker_helper(5)
def test_multi_worker_with_nonfixed_world_size(self):
self._multi_worker_helper(None)
class PrefixTCPStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(PrefixTCPStoreTest, self).setUp()
self.tcpstore = create_tcp_store()
self.prefix = "test_prefix"
self.tcpstore.set_timeout(timedelta(seconds=300))
def _create_store(self):
return dist.PrefixStore(self.prefix, self.tcpstore)
# The PrefixTCPStore has 6 keys in test_set_get. It contains the 5 keys
# added by the user and one additional key used for coordinate all the
# workers.
@property
def num_keys_total(self):
return 6
class MyPythonStore(dist.Store):
def __init__(self):
super(MyPythonStore, self).__init__()
self.store = dict()
def set(self, key, value):
if not isinstance(key, string_classes):
raise AssertionError("Expected set to be called with string key")
if type(value) is not bytes:
raise AssertionError("Expected set to be called with bytes value")
self.store[key] = value
def get(self, key):
value = self.store.get(key, b"")
if type(value) is not bytes:
raise AssertionError("Expected get to return bytes value")
return value
def add(self, key, value):
new = int(self.store.get(key, 0)) + value
self.set(key, bytes(str(new).encode("utf-8")))
return new
class PythonStoreTest(TestCase):
def setUp(self):
super(PythonStoreTest, self).setUp()
def test_set_get(self):
# If we were to inherit from StoreTestBase and try to use
# its test_set_get function, we would exercise the Python
# API directly, instead of going through the C++ trampoline.
# We care about testing the C++ trampoline, so run the
# equivalent of StoreTestBase.test_set_get from C++.
# See `torch/csrc/distributed/c10d/init.cpp` for the definition
# of this test function.
dist._test_python_store(MyPythonStore())
class RendezvousTest(TestCase):
def test_unknown_handler(self):
with self.assertRaisesRegex(RuntimeError, "^No rendezvous handler"):
dist.rendezvous("invalid://")
def test_url_with_node_params(self):
with self.assertRaisesRegex(AssertionError, "has node-specific arguments"):
dist.rendezvous("file://foo?rank=12&world_size=16", 12, 16)
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
def test_nominal(self):
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(common.find_free_port())
# Single rank
os.environ["RANK"] = "0"
gen0 = dist.rendezvous("env://")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(1, size0)
store0.set("key0", "value0")
# check with get
self.assertEqual(b"value0", store0.get("key0"))
class RendezvousFileTest(TestCase):
def test_common_errors(self):
with self.assertRaisesRegex(ValueError, "path missing"):
gen = dist.rendezvous("file://?rank=0&world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
gen = dist.rendezvous("file:///tmp/foo?world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "size parameter missing"):
gen = dist.rendezvous("file:///tmp/foo?rank=0")
next(gen)
def test_nominal(self):
with tempfile.NamedTemporaryFile(delete=False) as file:
url = f'file:///{file.name.replace(os.path.sep, "/")}?world_size=2'
gen0 = dist.rendezvous(url + "&rank=0")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(2, size0)
gen1 = dist.rendezvous(url + "&rank=1")
store1, rank1, size1 = next(gen1)
self.assertEqual(1, rank1)
self.assertEqual(2, size1)
# Set value on both stores
store0.set("key0", "value0")
store1.set("key1", "value1")
# Cross check with get
self.assertEqual(b"value0", store1.get("key0"))
self.assertEqual(b"value1", store0.get("key1"))
@skip_if_win32()
class RendezvousTCPTest(TestCase):
def create_tcp_url(self):
addr = DEFAULT_HOSTNAME
port = common.find_free_port()
url = "tcp://%s:%d?world_size=%d" % (addr, port, 1)
return url
def test_common_errors(self):
with self.assertRaisesRegex(ValueError, "port number missing"):
gen = dist.rendezvous("tcp://127.0.0.1?rank=0&world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
gen = dist.rendezvous("tcp://127.0.0.1:23456?world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "size parameter missing"):
gen = dist.rendezvous("tcp://127.0.0.1:23456?rank=0")
next(gen)
def test_dns_timeout(self):
with self.assertRaisesRegex(TimeoutError, "client socket has timed out after.*dnsnotexist"):
gen = dist.rendezvous(
"tcp://dnsnotexist:23456?world_size=2&rank=0",
timeout=timedelta(seconds=1),
)
next(gen)
@retry_on_connect_failures
def test_nominal(self):
url = self.create_tcp_url()
gen0 = dist.rendezvous(url + "&rank=0")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(1, size0)
# Set value on the single store
store0.set("key0", "value0")
# check with get
self.assertEqual(b"value0", store0.get("key0"))
@retry_on_connect_failures(connect_errors=(CONNECT_TIMEOUT, ADDRESS_IN_USE))
def test_tcp_store_timeout_set(self):
url = self.create_tcp_url()
test_store_timeout = timedelta(seconds=10)
gen0 = dist.rendezvous(url + "&rank=0", timeout=test_store_timeout)
store0, rank0, size0 = next(gen0)
# this should time out in 10s. If the timeout passed into rendezvous was
# not respected, it will take much longer to timeout.
start = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
store0.get("nonexistant key")
end = time.time()
time_diff = end - start
self.assertGreater(test_store_timeout.seconds * 10, time_diff)
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
pytorch-master
|
test/distributed/test_store.py
|
# Owner(s): ["oncall: distributed"]
import sys
import test_c10d_spawn
import torch
import torch.distributed as c10d
from test_c10d_spawn import _torch_dist_nn_available, TestDistributedNNFunctions
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
sandcastle_skip_if,
TEST_WITH_DEV_DBG_ASAN,
)
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
# Fails on Python-3.9, see https://github.com/pytorch/pytorch/issues/51619
if sys.version_info < (3, 9):
class ProcessGroupShareTensorTest(
test_c10d_spawn.AbstractProcessGroupShareTensorTest, TestCase
):
@classmethod
def _init_pg_nccl(cls, rank, filename, world_size):
store = c10d.FileStore(filename, world_size)
return c10d.ProcessGroupNCCL(store, rank, world_size)
@sandcastle_skip_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@sandcastle_skip_if(NO_NCCL, "NCCL needed")
def test_shared_broadcast_nccl(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_broadcast_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_nccl,
1,
)
@sandcastle_skip_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@sandcastle_skip_if(NO_NCCL, "NCCL needed")
def test_shared_allreduce_nccl(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allreduce_process,
[torch.ones(2, 2).to(i) for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_nccl,
1,
)
@classmethod
def _test_reduce_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c
):
pg = init_pg(rank, filename, world_size)
x = shared_tensors[rank]
pg.reduce(x, root=0, op=c10d.ReduceOp.SUM).wait()
if rank == 0:
c2p.put((rank, torch.ones(2, 2) * 2, x.to("cpu")))
else:
c2p.put((rank, torch.ones(2, 2), x.to("cpu")))
p2c.get()
@sandcastle_skip_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@sandcastle_skip_if(NO_NCCL, "NCCL needed")
def test_shared_reduce_nccl(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_reduce_process,
[torch.ones(2, 2).to(i) for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_nccl,
1,
)
@sandcastle_skip_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@sandcastle_skip_if(NO_NCCL, "NCCL needed")
def test_shared_allgather_nccl(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_nccl,
self.world_size,
)
# Skip dev-asan as torch + multiprocessing spawn have known issues
if not TEST_WITH_DEV_DBG_ASAN:
class TestDistributedNNFunctionsNccl(TestDistributedNNFunctions):
# Test Common Ops First.
@requires_nccl()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
not _torch_dist_nn_available, "torch.distributed.nn is not available"
)
def test_broadcast(self):
self._test_broadcast("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_reduce(self):
self._test_reduce("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_allreduce(self):
self._test_allreduce("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_gather(self):
self._test_all_gather("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_to_all(self):
self._test_all_to_all("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_to_all_single(self):
self._test_all_to_all_single("nccl")
# Test Ops only supported in NCCL.
@requires_nccl()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_reduce_scatter(self):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
x0 = torch.ones(5, 5, device=device) + self.rank
x1 = torch.ones(5, 5, device=device) + self.rank + 1
x0.requires_grad = True
x1.requires_grad = True
y = torch.empty_like(x0)
expected = (1 + self.world_size) * self.world_size / 2 + self.world_size * self.rank
y = torch.distributed.nn.reduce_scatter(y, [x0, x1])
self.assertEqual(y, torch.ones(5, 5, device=device) * expected)
z = y.sin().sum()
z.backward()
expected_0 = (1 + self.world_size) * self.world_size / 2
expected_1 = expected_0 + self.world_size
x_s_0 = (expected_0 * torch.ones(5, 5, device=device)).cos()
x_s_1 = (expected_1 * torch.ones(5, 5, device=device)).cos()
self.assertEqual(x0.grad, x_s_0)
self.assertEqual(x1.grad, x_s_1)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_reduce_scatter_non_contiguous(self):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
class NonContiguousGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
# Make grad non-contiguous
return grad_output.clone().transpose(0, 1)
x0 = torch.rand(5, 5, device=device, requires_grad=True)
x1 = torch.rand(5, 5, device=device, requires_grad=True)
y = torch.empty(5, 5, device=device)
y = torch.distributed.nn.reduce_scatter(y, [x0, x1])
NonContiguousGrad.apply(y).sum().backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_gather_base(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
x.requires_grad = True
output = torch.empty(5 * self.world_size, 5, device=device)
output = torch.distributed.nn.functional._all_gather_base(output, x)
self.assertEqual(output.size(), torch.Size((5 * self.world_size, 5)))
for idx in range(self.world_size):
self.assertEqual(output[5 * idx : 5 * (idx + 1)], torch.ones(5, 5, device=device) + idx)
y = torch.sum(output.view(self.world_size, 5, 5), axis=0)
z = y.sin().sum()
z.backward()
x_s = 2 * (3 * torch.ones(5, 5, device=device)).cos()
self.assertEqual(x.grad, x_s)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/test_c10d_spawn_nccl.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
from argparse import ArgumentParser
from torch.distributed.argparse_util import check_env, env
class ArgParseUtilTest(unittest.TestCase):
def setUp(self):
# remove any lingering environment variables
for e in os.environ.keys():
if e.startswith("PET_"):
del os.environ[e]
def test_env_string_arg_no_env(self):
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default="bar")
self.assertEqual("bar", parser.parse_args([]).foo)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def test_env_string_arg_env(self):
os.environ["PET_FOO"] = "env_baz"
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default="bar")
self.assertEqual("env_baz", parser.parse_args([]).foo)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def test_env_int_arg_no_env(self):
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default=1, type=int)
self.assertEqual(1, parser.parse_args([]).foo)
self.assertEqual(2, parser.parse_args(["-f", "2"]).foo)
self.assertEqual(2, parser.parse_args(["--foo", "2"]).foo)
def test_env_int_arg_env(self):
os.environ["PET_FOO"] = "3"
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default=1, type=int)
self.assertEqual(3, parser.parse_args([]).foo)
self.assertEqual(2, parser.parse_args(["-f", "2"]).foo)
self.assertEqual(2, parser.parse_args(["--foo", "2"]).foo)
def test_env_no_default_no_env(self):
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env)
self.assertIsNone(parser.parse_args([]).foo)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def test_env_no_default_env(self):
os.environ["PET_FOO"] = "env_baz"
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env)
self.assertEqual("env_baz", parser.parse_args([]).foo)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def test_env_required_no_env(self):
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, required=True)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def test_env_required_env(self):
os.environ["PET_FOO"] = "env_baz"
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default="bar", required=True)
self.assertEqual("env_baz", parser.parse_args([]).foo)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def test_check_env_no_env(self):
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", action=check_env)
self.assertFalse(parser.parse_args([]).verbose)
self.assertTrue(parser.parse_args(["-v"]).verbose)
self.assertTrue(parser.parse_args(["--verbose"]).verbose)
def test_check_env_default_no_env(self):
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", action=check_env, default=True)
self.assertTrue(parser.parse_args([]).verbose)
self.assertTrue(parser.parse_args(["-v"]).verbose)
self.assertTrue(parser.parse_args(["--verbose"]).verbose)
def test_check_env_env_zero(self):
os.environ["PET_VERBOSE"] = "0"
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", action=check_env)
self.assertFalse(parser.parse_args([]).verbose)
self.assertTrue(parser.parse_args(["--verbose"]).verbose)
def test_check_env_env_one(self):
os.environ["PET_VERBOSE"] = "1"
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", action=check_env)
self.assertTrue(parser.parse_args([]).verbose)
self.assertTrue(parser.parse_args(["--verbose"]).verbose)
def test_check_env_default_env_zero(self):
os.environ["PET_VERBOSE"] = "0"
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", action=check_env, default=True)
self.assertFalse(parser.parse_args([]).verbose)
self.assertTrue(parser.parse_args(["--verbose"]).verbose)
def test_check_env_default_env_one(self):
os.environ["PET_VERBOSE"] = "1"
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", action=check_env, default=True)
self.assertTrue(parser.parse_args([]).verbose)
self.assertTrue(parser.parse_args(["--verbose"]).verbose)
|
pytorch-master
|
test/distributed/argparse_util_test.py
|
# Owner(s): ["oncall: distributed"]
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
LOOPBACK,
gpus_for_rank,
Task,
ModuleForDdpCommHook,
SparseGradientModule,
)
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_gloo,
skip_if_lt_x_gpu,
simple_sparse_reduce_tests,
skip_if_win32,
create_device,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
sandcastle_skip,
)
def simple_reduce_tests(rank, world_size):
tests = [
(
c10d.ReduceOp.SUM,
torch.tensor([rank + 1.0]),
torch.tensor([float(world_size * (world_size + 1) / 2)]),
),
(
c10d.ReduceOp.PRODUCT,
torch.tensor([rank + 1.0]),
torch.tensor([float(math.factorial(world_size))]),
),
(
c10d.ReduceOp.MIN,
torch.tensor([rank + 1.0]),
torch.tensor([1.0]),
),
(
c10d.ReduceOp.MAX,
torch.tensor([rank + 1.0]),
torch.tensor([world_size]),
),
]
# Generate tests for BAND.
# The bit that is set changes in every iteration to check
# that the output changes accordingly.
for i in range(4):
vin = rank | (1 << i)
vout = 1 << i
tests.append(
(
c10d.ReduceOp.BAND,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
# Generate tests for BOR.
# These emulate a larger world size per iteration by having every
# rank contribute multiple values that are pre-OR'ed.
for i in range(1, 5):
vin = reduce(operator.or_, [rank * i + j for j in range(i)])
vout = reduce(operator.or_, range(world_size * i))
tests.append(
(
c10d.ReduceOp.BOR,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
# Generate tests for XOR.
# These emulate a larger world size per iteration by having every
# rank contribute multiple values that are pre-XOR'ed.
for i in range(1, 5):
vin = reduce(operator.xor, [rank * i + j for j in range(i)])
vout = reduce(operator.xor, range(world_size * i))
tests.append(
(
c10d.ReduceOp.BXOR,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
return tests
def simple_coalesced_reduce_tests(rank, world_size):
return [
(
c10d.ReduceOp.SUM,
[torch.tensor([rank + 1]), torch.tensor([(rank + 1) ** 2])],
[
torch.tensor([float(world_size * (world_size + 1) / 2)]),
torch.tensor(
[float(world_size * (world_size + 1) * (2 * world_size + 1) / 6)]
),
],
),
(
c10d.ReduceOp.PRODUCT,
[torch.tensor([rank + 1.0]), torch.tensor([rank + 2.0])],
[
torch.tensor([float(math.factorial(world_size))]),
torch.tensor([float(math.factorial(world_size + 1))]),
],
),
(
c10d.ReduceOp.MIN,
[torch.tensor([rank + x]) for x in [0.0, 1.0]],
[torch.tensor([0.0]), torch.tensor([1.0])],
),
(
c10d.ReduceOp.MAX,
[torch.tensor([rank + x]) for x in [1.0, 2.0]],
[torch.tensor([world_size]), torch.tensor([world_size + 1.0])],
),
]
def simple_multi_input_reduce_tests(rank, world_size):
return [
(
c10d.ReduceOp.SUM,
[torch.tensor([2 * rank + 0.0]), torch.tensor([2 * rank + 1.0])],
torch.tensor([float(world_size * (2 * world_size - 1))]),
),
(
c10d.ReduceOp.PRODUCT,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([float(math.factorial(2 * world_size))]),
),
(
c10d.ReduceOp.MIN,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([1.0]),
),
(
c10d.ReduceOp.MAX,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([2 * world_size]),
),
]
class RendezvousEnvTest(TestCase):
@requires_gloo()
@retry_on_connect_failures
def test_logging_init(self):
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(common.find_free_port())
os.environ["RANK"] = "0"
previous_handlers = logging.root.handlers
c10d.init_process_group(backend="gloo", init_method="env://")
current_handlers = logging.root.handlers
self.assertEqual(len(previous_handlers), len(current_handlers))
for current, previous in zip(current_handlers, previous_handlers):
self.assertEqual(current, previous)
c10d.destroy_process_group()
class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):
@requires_gloo()
@retry_on_connect_failures
def test_default_store_timeout_gloo(self):
self._test_default_store_timeout("gloo")
class ProcessGroupGlooTest(MultiProcessTestCase):
def _create_process_group_gloo(self, store, rank, world_size, opts):
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, opts)
dist.barrier(group=pg)
return pg
def setUp(self):
super(ProcessGroupGlooTest, self).setUp()
self._spawn_processes()
def opts(self, threads=2):
opts = c10d.ProcessGroupGloo._Options()
opts._timeout = 5.0
opts._devices = [create_device(interface=LOOPBACK)]
opts._threads = threads
return opts
@requires_gloo()
def test_multi_device_constructor(self):
store = c10d.FileStore(self.file_name, self.world_size)
opts = c10d.ProcessGroupGloo._Options()
opts._timeout = 5.0
opts._devices = [
create_device(interface=LOOPBACK),
create_device(interface=LOOPBACK),
]
pg = self._create_process_group_gloo(store, self.rank, self.world_size, opts)
# Execute 2x the number of operations to ensure we use every device.
for fut in [pg.allreduce(torch.ones(i + 1)).get_future() for i in range(4)]:
fut.wait()
@requires_gloo()
def test_empty_tensors(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
xs = [torch.FloatTensor([])]
fut = pg.broadcast(xs).get_future()
fut.wait()
output = fut.value()
self.assertEqual(0, output[0].numel())
self.assertEqualIgnoreType(xs[0], output[0])
@requires_gloo()
def test_broadcast_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = -1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor type"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t2], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t3], opts)
def _test_broadcast_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
fut = pg.broadcast(xs, opts).get_future()
fut.wait()
return fut.value()
# Every rank is root once
for i in range(self.world_size):
# Run with 1 input tensor
x = fn(torch.tensor([self.rank]))
output = broadcast([x], i, 0)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i]), output[0])
# Run with 2 input tensors
num = 2
for j in range(num):
xs = [
fn(torch.tensor([self.rank * num + 0.0])),
fn(torch.tensor([self.rank * num + 1.0])),
]
output = broadcast(xs, i, j)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i * num + j]), output[0])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i * num + j]), output[1])
# Test overloaded convenience function
x = torch.tensor([self.rank + 1.0])
fut = pg.broadcast(x, root=0).get_future()
fut.wait()
result = fut.value()
self.assertEqual(torch.tensor([1.0]), result[0])
@requires_gloo()
def test_broadcast_basics(self):
self._test_broadcast_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_broadcast_basics_cuda(self):
self._test_broadcast_basics(lambda t: t.clone().cuda())
def _test_broadcast_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = [
pg.broadcast(inputs[i], root=(i % self.world_size))
for i in range(len(inputs))
]
for i, work_handle in enumerate(work_handles):
work_handle.wait()
self.assertEqual(
torch.tensor([(i * self.world_size) + (i % self.world_size)]),
inputs[i],
msg=("Mismatch in iteration %d" % i),
)
@requires_gloo()
def test_broadcast_stress(self):
inputs = [torch.tensor([i * self.world_size + self.rank]) for i in range(1000)]
self._test_broadcast_stress(inputs)
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_broadcast_stress_cuda(self):
inputs = [
torch.tensor([i * self.world_size + self.rank]).cuda() for i in range(1000)
]
self._test_broadcast_stress(inputs)
@requires_gloo()
def test_allreduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "requires non-empty tensor list"):
opts = c10d.AllreduceOptions()
pg.allreduce([], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor type"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t2], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t3], opts)
def _test_allreduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
for (op, input, expected) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
fut = pg.allreduce([tensor], opts).get_future()
fut.wait()
result = fut.value()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, result[0])
# Multi input tests
tests = simple_multi_input_reduce_tests(self.rank, self.world_size)
for (op, inputs, output) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensors = [fn(input) for input in inputs]
fut = pg.allreduce(tensors, opts).get_future()
fut.wait()
result = fut.value()
for tensor in result:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, tensor)
# Test overloaded convenience function (defaults to using sum)
x = fn(torch.tensor([self.rank + 1.0]))
fut = pg.allreduce(x).get_future()
fut.wait()
result = fut.value()
self.assertEqual(
torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]),
result[0],
)
@requires_gloo()
def test_allreduce_basics(self):
self._test_allreduce_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_allreduce_basics_cuda(self):
self._test_allreduce_basics(lambda t: t.clone().cuda())
# _using_work_api tests are to make sure we still properly support work API.
# This should go away as we deprecate it.
def _test_allreduce_basics_using_work_api(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
for (op, input, expected) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
work = pg.allreduce([tensor], opts)
work.wait()
result = work.result()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, result[0])
# Multi input tests
tests = simple_multi_input_reduce_tests(self.rank, self.world_size)
for (op, inputs, output) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensors = [fn(input) for input in inputs]
work = pg.allreduce(tensors, opts)
work.wait()
result = work.result()
for tensor in result:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, tensor)
# Test overloaded convenience function (defaults to using sum)
x = fn(torch.tensor([self.rank + 1.0]))
work = pg.allreduce(x)
work.wait()
result = work.result()
self.assertEqual(
torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]),
result[0],
)
@requires_gloo()
def test_allreduce_basics_using_work_api(self):
self._test_allreduce_basics_using_work_api(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_allreduce_basics_cuda_using_work_api(self):
self._test_allreduce_basics_using_work_api(lambda t: t.clone().cuda())
def _test_allreduce_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = [
pg.allreduce(inputs[i]).get_future() for i in range(len(inputs))
]
for i, future_handle in enumerate(future_handles):
future_handle.wait()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor(
[
(i * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
),
future_handle.value()[0],
msg=("Mismatch in iteration %d" % i),
)
@requires_gloo()
def test_allreduce_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_allreduce_stress(inputs)
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_allreduce_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allreduce_stress(inputs)
@requires_gloo()
def test_allreduce_coalesced_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros(1, dtype=torch.float32)
t2 = torch.zeros(1, dtype=torch.float64)
t3 = torch.sparse_coo_tensor([[0]], [1], size=(1,))
with self.assertRaisesRegex(RuntimeError, "requires non-empty tensor list"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([], opts)
with self.assertRaisesRegex(RuntimeError, "tensors must all have the same type"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t2], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor layout at index"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t3], opts)
with self.assertRaisesRegex(RuntimeError, "unsupported layout"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t3, t3.clone()], opts)
@skip_if_lt_x_gpu(1)
@requires_gloo()
def test_allreduce_coalesced_checks_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros(1, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "unsupported device type"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1.cuda(), t1.cuda()], opts)
def _test_allreduce_coalesced_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
test_cases = simple_coalesced_reduce_tests(self.rank, self.world_size)
for op, inputs, outputs in test_cases:
opts = c10d.AllreduceCoalescedOptions()
opts.reduceOp = op
tensors = [fn(x) for x in inputs]
fut = pg.allreduce_coalesced(tensors, opts).get_future()
fut.wait()
result = fut.value()
for result_tensor, expected in zip(result, outputs):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(result_tensor, expected)
@requires_gloo()
def test_allreduce_coalesced_basics(self):
self._test_allreduce_coalesced_basics(lambda t: t.clone())
def _expected_output(self, i):
ws = self.world_size
return 2 * [torch.tensor([(i * ws) + (ws * (ws - 1) / 2)])]
def _test_allreduce_coalesced_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = [
pg.allreduce_coalesced(input).get_future() for input in inputs
]
for i, future_handle in enumerate(future_handles):
future_handle.wait()
result = future_handle.value()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
self._expected_output(i),
result,
msg="Mismatch in iteration {}".format(i),
)
@requires_gloo()
def test_allreduce_coalesced_stress(self):
inputs = [2 * [torch.tensor([i + self.rank])] for i in range(1000)]
self._test_allreduce_coalesced_stress(inputs)
@requires_gloo()
def test_allreduce_coalesced_async(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", rank=self.rank, world_size=self.world_size, store=store
)
xs = [2 * [torch.tensor([i + self.rank])] for i in range(2)]
futs = [c10d.all_reduce_coalesced(x, async_op=True) for x in xs]
torch.futures.wait_all(futs)
for i, fut in enumerate(futs):
self.assertEqualIgnoreType(
self._expected_output(i),
fut.wait(),
msg="Mismatch in iteration {}".format(i),
)
@requires_gloo()
def test_sparse_allreduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1])
t2 = torch.sparse_coo_tensor([[0]], [1], size=(2,))
t3 = torch.sparse_coo_tensor([[0]], [1], size=(4,))
with self.assertRaisesRegex(RuntimeError, "requires non-empty tensor list"):
opts = c10d.AllreduceOptions()
pg.allreduce([], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor layout"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t2], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
opts = c10d.AllreduceOptions()
pg.allreduce([t2, t3], opts)
# Sparse allreduce only works with c10d.ReduceOp.SUM.
for op in [c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX]:
with self.assertRaisesRegex(RuntimeError, "unsupported reduction operation"):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
pg.allreduce([t3], opts)
def _test_sparse_allreduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
for num_inputs_per_rank in [1, 2]:
tests = simple_sparse_reduce_tests(
self.rank, self.world_size, num_inputs=num_inputs_per_rank
)
for (inputs, outputs) in tests:
tensors = [fn(input) for input in inputs]
fut = pg.allreduce(tensors).get_future()
fut.wait()
result = fut.value()
self.assertEqual(tensors, outputs)
self.assertEqual(result, outputs)
@sandcastle_skip("intermittent failures on Windows, in CI")
@requires_gloo()
def test_sparse_allreduce_basics(self):
self._test_sparse_allreduce_basics(lambda t: t)
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_sparse_allreduce_basics_cuda(self):
self._test_sparse_allreduce_basics(lambda t: t.clone().cuda())
@requires_gloo()
def test_scatter_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = -1
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = self.world_size
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element output tensor list"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([], [], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element output tensor list"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([t1, t1], [], opts)
with self.assertRaisesRegex(RuntimeError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(RuntimeError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * self.world_size, [t1] * self.world_size], opts)
desired_list_size = self.world_size
incorrect_list_size = self.world_size - 1
err_str = "Incorrect input list size {}. Input list size should be {}"
with self.assertRaisesRegex(
RuntimeError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * incorrect_list_size], opts)
incorrect_list_size = self.world_size + 1
with self.assertRaisesRegex(
RuntimeError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * incorrect_list_size], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor type"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t2] * self.world_size], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t3] * self.world_size], opts)
with self.assertRaisesRegex(RuntimeError, "requires empty input on non-root"):
opts = c10d.ScatterOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.scatter([t1], [[t1] * self.world_size], opts)
def _test_scatter_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Preallocate tensors for input/output
input = [fn(torch.tensor([self.rank])) for _ in range(self.world_size)]
outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]
# Take turns being the scatter root and accumulate work items
futures = []
for i in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = i
if i == self.rank:
futures.append(pg.scatter([outputs[i]], [input], opts).get_future())
else:
futures.append(pg.scatter([outputs[i]], [], opts).get_future())
# Wait for work to complete
for i in range(self.world_size):
futures[i].wait()
result = futures[i].value()
self.assertEqual(torch.tensor([i]), result[0])
@requires_gloo()
def test_scatter_basics(self):
self._test_scatter_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_scatter_basics_cuda(self):
self._test_scatter_basics(lambda t: t.clone().cuda())
def _test_scatter_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
outputs = [
[fn(torch.tensor([-1])) for _ in range(self.world_size)]
for _ in range(len(inputs))
]
future_handles = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = root
if root == self.rank:
fut = pg.scatter(
[outputs[i][root]], [[fn(e) for e in inputs[i]]], opts
).get_future()
else:
fut = pg.scatter([outputs[i][root]], [], opts).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
iter = i // self.world_size
root = i % self.world_size
result = future_handle.value()
self.assertEqual(
torch.tensor([iter + root]),
result[0],
msg=("Mismatch in iteration %d for rank %d" % (iter, root)),
)
@requires_gloo()
def test_scatter_stress(self):
inputs = [
[torch.tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone())
@sandcastle_skip(
"Test is flaky, see https://github.com/pytorch/pytorch/issues/15963"
)
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_scatter_stress_cuda(self):
inputs = [
[torch.tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone().cuda())
@requires_gloo()
def test_gather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = -1
pg.gather([], [t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = self.world_size
pg.gather([], [t1], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element input tensor list"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element input tensor list"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [t1, t1], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element output list"
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([], [t1], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element output list"
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * self.world_size, [t1] * self.world_size], [t1], opts)
desired_list_size = self.world_size
incorrect_list_size = self.world_size - 1
err_str = "Incorrect output list size {}. Output list size should be {}"
with self.assertRaisesRegex(
RuntimeError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * incorrect_list_size], [t1], opts)
incorrect_list_size = self.world_size + 1
with self.assertRaisesRegex(
RuntimeError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * incorrect_list_size], [t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor type"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t2] * self.world_size], [t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t3] * self.world_size], [t1], opts)
with self.assertRaisesRegex(RuntimeError, "requires empty output on non-root"):
opts = c10d.GatherOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.gather([[t1] * self.world_size], [t1], opts)
def _test_gather_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Preallocate tensors for input/output
input = [fn(torch.tensor([self.rank]))]
outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]
# Take turns being the gather root and accumulate work items
futures = []
for i in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = i
if i == self.rank:
futures.append(pg.gather([outputs], input, opts).get_future())
else:
futures.append(pg.gather([], input, opts).get_future())
# Wait for work to complete
expected = [fn(torch.tensor([rank])) for rank in range(self.world_size)]
for i in range(self.world_size):
futures[i].wait()
result = futures[i].value()
if i == self.rank:
self.assertEqual(expected, result)
@requires_gloo()
def test_gather_basics(self):
self._test_gather_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_gather_basics_cuda(self):
self._test_gather_basics(lambda t: t.clone().cuda())
@requires_gloo()
def test_gather_noncontiguous_input(self):
# Take a column of 2D tensor, such that memory is not dense
self._test_gather_basics(lambda t: t.expand(2, 2).contiguous()[:, 0])
def _test_gather_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = []
outputs = [
[[fn(torch.tensor([-1])) for _ in range(self.world_size)]]
for _ in range(len(inputs))
]
expected_outputs = [
[[torch.tensor([i + j]) for j in range(self.world_size)]]
for i in range(len(inputs))
]
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = root
if root == self.rank:
fut = pg.gather(outputs[i], [fn(inputs[i])], opts).get_future()
else:
fut = pg.gather([], [fn(inputs[i])], opts).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
result = future_handle.value()
self.assertEqual(
expected_outputs[iter],
[result],
msg=("Mismatch in iteration %d for root %d" % (iter, root)),
)
@requires_gloo()
def test_gather_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_gather_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone().cuda())
@requires_gloo()
def test_allgather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "requires non-empty input tensor list"):
pg.allgather([], [])
with self.assertRaisesRegex(
RuntimeError, "requires input/output tensor lists to have the same length"
):
pg.allgather([], [t1])
with self.assertRaisesRegex(
RuntimeError, "requires input/output tensor lists to have the same length"
):
pg.allgather([[t1] * self.world_size, [t1] * self.world_size], [t1])
with self.assertRaisesRegex(RuntimeError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size - 1)], [t1])
with self.assertRaisesRegex(RuntimeError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size + 1)], [t1])
with self.assertRaisesRegex(RuntimeError, "invalid tensor type"):
pg.allgather(
[[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t2]
)
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
pg.allgather(
[[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t3]
)
with self.assertRaisesRegex(RuntimeError, "invalid tensor type"):
pg.allgather([([t1, t2] * (self.world_size))[: self.world_size]], [t1])
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
pg.allgather([([t1, t3] * (self.world_size))[: self.world_size]], [t1])
def _test_allgather_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Run with N input tensor per rank
for n in [1, 2, 3]:
input = [fn(torch.tensor([n * self.rank + i])) for i in range(n)]
output = [
[fn(torch.tensor([-1])) for _ in range(n * self.world_size)]
for _ in range(n)
]
expected_output = [
[fn(torch.tensor([i])) for i in range(n * self.world_size)]
for _ in range(n)
]
fut = pg.allgather(output, input).get_future()
fut.wait()
result = fut.value()
if n == 1:
result = [result]
self.assertEqual(expected_output, result)
@requires_gloo()
def test_allgather_basics(self):
self._test_allgather_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_allgather_basics_cuda(self):
self._test_allgather_basics(lambda t: t.clone().cuda())
@requires_gloo()
def test_allgather_noncontiguous_input(self):
# Take a column of 2D tensor, such that memory is not dense
self._test_allgather_basics(lambda t: t.expand(2, 2).contiguous()[:, 0])
def _test_allgather_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = []
outputs = [
[[fn(torch.tensor([-1])) for _ in range(self.world_size)]]
for _ in range(len(inputs))
]
expected_outputs = [
[[torch.tensor([i + j]) for j in range(self.world_size)]]
for i in range(len(inputs))
]
input_holder = {}
for i in range(len(inputs)):
# Note that this works around the data race discussed in
# https://github.com/pytorch/pytorch/issues/75529, but we should
# actually be able to pass the list directly into allgather when
# that race is fixed.
input_holder[i] = [fn(inputs[i])]
fut = pg.allgather(outputs[i], input_holder[i]).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
result = future_handle.value()
self.assertEqual(
expected_outputs[i],
[result],
msg=("Mismatch in iteration %d" % i),
)
@requires_gloo()
def test_allgather_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_allgather_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone().cuda())
@requires_gloo()
def test_allgather_coalesced_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
dummy_input = [torch.zeros([1], dtype=torch.float32)]
dummy_output_lists = [
[torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size)
]
# One of output tensors does not match input list.
dummy_output_lists[0] = [torch.zeros([0], dtype=torch.float32)]
with self.assertRaisesRegex(
RuntimeError, "invalid size of output tensor at index 0"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# One of output tensors does not match input list.
dummy_output_lists[0] = [torch.zeros([1], dtype=torch.float64)]
with self.assertRaisesRegex(RuntimeError, "invalid tensor type at index 0"):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# Output lists have too many elements
dummy_output_lists = [
[torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size + 1)
]
with self.assertRaisesRegex(
RuntimeError, "output lists should be equal to world size"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# Output is not a list of lists.
dummy_output_lists = [torch.zeros([0], dtype=torch.float32)]
with self.assertRaisesRegex(
RuntimeError, "Invalid function argument.*output_tensor_lists"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
@requires_gloo()
def test_allgather_coalesced_async(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", rank=self.rank, world_size=self.world_size, store=store
)
xxs = [2 * [torch.tensor([i + self.rank])] for i in range(2)]
yys = [[[torch.zeros_like(x) for x in xx] for _ in range(self.world_size)] for xx in xxs]
futs = [c10d.all_gather_coalesced(yy, xx, async_op=True) for xx, yy in zip(xxs, yys)]
# expected outputs
zzs = [[2 * [torch.tensor([i + r])] for r in range(self.world_size)] for i in range(2)]
torch.futures.wait_all(futs)
for yy, zz in zip(yys, zzs):
# one iteration
for y_out, z_out in zip(yy, zz):
# one output tensor list
for y, z in zip(y_out, z_out):
# one tensor in output tensor list
self.assertEqualIgnoreType(y, z)
# Added to address https://github.com/pytorch/pytorch/issues/65231
# In the failed tests, all assertEqualIgnoreType are passed on all
# processes. However, one of the process didn't call ProcessGroupGloo
# destructor before exiting program. This is not surprising as the only
# guarantee that Python makes is that garbage collection MAY happen
# before the program exits. If GC didn't happen, the two threads in
# ProcessGroup might be destructed before joined.
# FIXME: it's still unclear why only this test require explicit
# destroy_process_group()
c10d.destroy_process_group()
@requires_gloo()
def test_reduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1], dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root tensor"):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.reduce([t1], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element tensor list"
):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.reduce([t1, t1], opts)
def _test_reduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
for (op, input, output) in simple_reduce_tests(self.rank, self.world_size):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.reduceOp = op
opts.rootRank = root
tmp = fn(input)
fut = pg.reduce([tmp], opts).get_future()
fut.wait()
result = fut.value()
if root == self.rank:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, result[0])
@requires_gloo()
def test_reduce_basics(self):
self._test_reduce_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_reduce_basics_cuda(self):
self._test_reduce_basics(lambda t: t.clone().cuda())
def _test_reduce_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = []
outputs = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.rootRank = root
tmp = inputs[i].clone()
outputs.append(tmp)
fut = pg.reduce([tmp], opts).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
result = future_handle.value()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor(
[
(iter * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
),
result[0],
msg=("Mismatch in iteration %d with root rank %d" % (iter, root)),
)
@requires_gloo()
def test_reduce_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_reduce_stress(inputs)
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_reduce_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_reduce_stress(inputs)
@requires_gloo()
def test_send_recv_all_to_all(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Preallocate tensors for input/output
inputs = [torch.tensor([self.rank]) for _ in range(self.world_size)]
outputs = [torch.tensor([-1]) for _ in range(self.world_size)]
# Issue sends
send_work = []
for i in range(self.world_size):
if i == self.rank:
continue
send_work.append(pg.send([inputs[i]], i, 0))
# Issue recvs
recv_work = []
for i in range(self.world_size):
if i == self.rank:
continue
recv_work.append(pg.recv([outputs[i]], i, 0))
# Wait for sends to complete
for work in send_work:
work.wait()
self.assertTrue(work.is_completed())
# Wait for recvs to complete
for work in recv_work:
work.wait()
self.assertTrue(work.is_completed())
# Test that every output other than our own contains the respective rank
for i in range(self.world_size):
if i == self.rank:
continue
self.assertEqual(torch.tensor([i]), outputs[i])
@requires_gloo()
def test_barrier_implies_wait(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Kick off allreduce operations
size = (100, 100)
num = 16
tensors = [torch.full(size, float(i)) for i in range(num)]
for tensor in tensors:
# Note: leak the returned work handle
pg.allreduce(tensor)
# Barrier should ensure all previous work has completed
pg.barrier().get_future().wait()
for i, tensor in enumerate(tensors):
self.assertEqual(torch.full(size, float(i * self.world_size)), tensor)
@skip_if_win32()
@requires_gloo()
def test_round_robin(self):
num_process_groups = 2
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d._round_robin_process_groups(
[
c10d.ProcessGroupGloo(
c10d.PrefixStore(str(i), store),
self.rank,
self.world_size,
self.opts(),
)
for i in range(num_process_groups)
]
)
# Run a few collectives so that we have called each process group
for _ in range(num_process_groups + 1):
tensor = torch.full([100, 100], float(self.rank))
pg.broadcast(tensor, root=0).wait()
self.assertEqual(torch.full([100, 100], 0.0), tensor)
@skip_if_win32()
@requires_gloo()
def test_round_robin_create_destroy(self):
store = c10d.FileStore(self.file_name, self.world_size)
def create(num, prefix):
return c10d._round_robin_process_groups(
[
c10d.ProcessGroupGloo(
c10d.PrefixStore("%s/%d" % (prefix, i), store),
self.rank,
self.world_size,
self.opts(),
)
for i in range(num)
]
)
# Run create/use/destroy twice
for i in range(2):
num_process_groups = 2
pg = create(num=num_process_groups, prefix=i)
for _ in range(3):
tensor = torch.ones([10, 10])
pg.allreduce(tensor).wait()
self.assertEqual(torch.full([10, 10], float(self.world_size)), tensor)
del pg
class DistributedDataParallelTest(
test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
self._spawn_processes()
def _get_process_group(self):
store = self._get_store()
return c10d.ProcessGroupGloo(store, self.rank, self.world_size)
def _test_gloo_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo._Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_gloo()
def test_gloo_backend_cpu_module(self):
self._test_gloo_backend([torch.device("cpu")], None)
@requires_gloo()
def test_gloo_backend_cpu_module_grad_is_view(self):
self._test_gloo_backend(
[torch.device("cpu")], None, gradient_as_bucket_view=True
)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, int_devices)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, devices)
@requires_gloo()
@skip_if_lt_x_gpu(4)
def test_gloo_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
@requires_gloo()
@skip_if_lt_x_gpu(8)
def test_gloo_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
def _test_global_local_unused_params_grad(
self, gradient_as_bucket_view=False, static_graph=False
):
"""
By simulating a multi-task training, this test is to make sure:
1) DDP does not touch the grad of globally unused parameters.
2) DDP does update the grad of locally unused parameters.
"""
class GlobalLocalUnusedParamModule(nn.Module):
def __init__(self):
super(GlobalLocalUnusedParamModule, self).__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
def task_parameters(self):
return (self.t0.p, self.t1.p, self.task_unused.p)
def forward(self, x, rank):
return self.t0(x) if rank == 0 else self.t1(x)
def run_and_verify_grad(model):
# Run forward
output = model(8, self.rank)
# The grads of all parameters should be None at this point.
t0_p, t1_p, task_unused_p = model.module.task_parameters()
self.assertIsNone(t0_p.grad)
self.assertIsNone(t1_p.grad)
self.assertIsNone(task_unused_p.grad)
# Run backward
output.mean().backward()
# Now locally unused parameter should have grad updated on all ranks.
# However the globally unused parameter should still have None grad.
self.assertIsNotNone(t0_p.grad)
self.assertIsNotNone(t1_p.grad)
self.assertIsNone(task_unused_p.grad)
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Test on CPU
cpu_model = DistributedDataParallel(
GlobalLocalUnusedParamModule().cpu(),
process_group=process_group,
find_unused_parameters=True,
gradient_as_bucket_view=gradient_as_bucket_view,
static_graph=static_graph,
)
run_and_verify_grad(cpu_model)
# Test on GPU
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
GlobalLocalUnusedParamModule().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=True,
gradient_as_bucket_view=gradient_as_bucket_view,
static_graph=static_graph,
)
run_and_verify_grad(gpu_model)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_global_local_unused_params_grad(self):
self._test_global_local_unused_params_grad()
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_global_local_unused_params_grad_with_grad_is_view(self):
self._test_global_local_unused_params_grad(gradient_as_bucket_view=True)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_global_local_unused_params_grad_with_static_graph(self):
self._test_global_local_unused_params_grad(static_graph=True)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_find_unused_parameters_when_unused_parameters_empty(self):
"""
An empty unused_parameters array does not imply find_unused_parameters =
false. This test makes sure that DDP allreduces unused parameters
accordingly where the forward pass in some process uses all parameters.
This unit test creates a module that uses all parameters in rank = 0, and
has unused parameters in other ranks.
"""
class FindUnusedParamModule(nn.Module):
def __init__(self):
super(FindUnusedParamModule, self).__init__()
self.t0 = Task()
self.t1 = Task()
def task_parameters(self):
return (self.t0.p, self.t1.p)
def forward(self, x, rank):
return self.t1(self.t0(x)) if rank == 0 else self.t1(x)
def run_and_verify_grad(model):
# Run forward
output = model(8, self.rank)
# The grads of all parameters should be None at this point.
[self.assertIsNone(t_p.grad) for t_p in model.module.task_parameters()]
# Run backward
output.mean().backward()
# Now locally unused parameter should have grad updated on all ranks.
[self.assertIsNotNone(t_p.grad) for t_p in model.module.task_parameters()]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Test on CPU
cpu_model = DistributedDataParallel(
FindUnusedParamModule().cpu(),
process_group=process_group,
find_unused_parameters=True,
)
run_and_verify_grad(cpu_model)
# Test on GPU
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
FindUnusedParamModule().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=True,
)
run_and_verify_grad(gpu_model)
@requires_gloo()
def test_ignored_output(self):
"""
Test that the output of a model can be ignored and that there is no
implicit requirement that `backward` gets called.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
class IgnoredOutput(nn.Module):
def __init__(self):
super(IgnoredOutput, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
model = DistributedDataParallel(
IgnoredOutput().float(),
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
# Run a few iterations where we ignore the output.
for _ in range(4):
output = model(input)
del output
# Run a few iterations where we use the output.
for _ in range(4):
output = model(input)
loss = criterion(output, target)
loss.backward()
@requires_gloo()
def test_ignored_output_with_unused_parameters(self):
"""
Test that the output of a model can be ignored and that there is no
implicit requirement that `backward` gets called, if not all model
parameters participated in computing the model output.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
class IgnoredOutputWithUnusedParameters(nn.Module):
def __init__(self):
super(IgnoredOutputWithUnusedParameters, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
model = DistributedDataParallel(
IgnoredOutputWithUnusedParameters().float(),
process_group=process_group,
find_unused_parameters=True,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
# Run a few iterations where we ignore the output.
for _ in range(4):
output = model(input)
del output
# Run a few iterations where we use the output.
for _ in range(4):
output = model(input)
loss = criterion(output, target)
loss.backward()
def _run_and_verify_sparse_gradients(self, vanilla_model, ddp_model):
mult = 2
batch_size = mult * self.world_size
criterion = nn.CrossEntropyLoss()
input = torch.randint(0, 10, [batch_size, 2])
target = torch.randint(0, 10, [batch_size])
# Run with entire batch against single process version
criterion(vanilla_model(input), target).backward()
# Run with partial batch against multi process version
partial_input = input.split(mult)[self.rank]
partial_target = target.split(mult)[self.rank]
criterion(ddp_model(partial_input), partial_target).backward()
# Check that the gradients are sparse and identical
vanilla_parameter = next(vanilla_model.parameters())
ddp_parameter = next(ddp_model.parameters())
self.assertEqual(vanilla_parameter.grad.coalesce(), ddp_parameter.grad.coalesce())
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_save_load_checkpoint(self):
dist.init_process_group(
"gloo",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
def train_loop(model, optimizer, iterations):
for _ in range(iterations):
optimizer.zero_grad()
output = model(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model_withload = TestModel().float().to(device_id)
model_withoutload = TestModel().float().to(device_id)
ddp_withload = DistributedDataParallel(
model_withload,
device_ids=[device_id],
)
ddp_withoutload = DistributedDataParallel(
model_withoutload,
device_ids=[device_id],
)
# ensure that all the three models start with the same set of parameters. By default they are randomized on construction
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
for p in model_withload.parameters():
with torch.no_grad():
p.zero_()
for p in ddp_withoutload.parameters():
with torch.no_grad():
p.zero_()
batch_size = 4
criterion = nn.CrossEntropyLoss()
optimizer_withload = torch.optim.SGD(ddp_withload.parameters(), lr=0.001)
optimizer_non_ddp_withload = torch.optim.SGD(
model_withload.parameters(), lr=0.001
)
optimizer_withoutload = torch.optim.SGD(ddp_withoutload.parameters(), lr=0.001)
input = torch.rand([batch_size, 2], dtype=torch.float).to(device_id)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# run the model for 6 iterations, with a checkpoint in the middle
train_loop(ddp_withload, optimizer_withload, 3)
# zero out parameters of both DDP and non-DDP models and reload them from the DDP state dict
checkpoint_path = tempfile.gettempdir() + "/model.checkpoint"
if self.rank == 0:
torch.save(ddp_withload.state_dict(), checkpoint_path)
dist.barrier()
map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank}
ddp_state_dict = torch.load(checkpoint_path, map_location=map_location)
for model in [ddp_withload, model_withload]:
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
ddp_withload.load_state_dict(ddp_state_dict)
# the non-DDP model needs to first remove the prefix of "module." from the DDP state dict
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
ddp_state_dict, "module."
)
model_withload.load_state_dict(ddp_state_dict)
train_loop(ddp_withload, optimizer_withload, 3)
train_loop(model_withload, optimizer_non_ddp_withload, 3)
# re-run the model with the same inputs for 6 iterations with no checkpoint
train_loop(ddp_withoutload, optimizer_withoutload, 6)
for p_withload, p_withoutload, p_non_ddp_withload in zip(
ddp_withload.parameters(),
ddp_withoutload.parameters(),
model_withload.parameters(),
):
self.assertEqual(p_withload, p_withoutload)
self.assertEqual(p_non_ddp_withload, p_withoutload)
def _test_sparse_gradients(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Ensure initialized weights and inputs are identical across processes
torch.manual_seed(1337)
vanilla_model = SparseGradientModule()
ddp_model = DistributedDataParallel(
copy.deepcopy(vanilla_model),
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
self._run_and_verify_sparse_gradients(vanilla_model, ddp_model)
@requires_gloo()
def test_sparse_gradients(self):
self._test_sparse_gradients()
@requires_gloo()
def test_sparse_gradients_grad_is_view(self):
self._test_sparse_gradients(gradient_as_bucket_view=True)
@requires_gloo()
def test_ddp_comm_hook_future_passing_cpu(self):
"""
This unit test verifies whether the Future object is passed properly.
The callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Test on CPU
cpu_model = DistributedDataParallel(
ModuleForDdpCommHook().cpu(), process_group=process_group
)
# Register DDP Communication Hook
cpu_model.register_comm_hook(None, self._simple_hook)
# check whether the grads are equal to what then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(cpu_model, 8, 2 * torch.ones(2, 2))
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_gloo(self):
"""
This unit test verifies whether the Future object is passed properly using gloo backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
@requires_gloo()
def test_ddp_invalid_comm_hook_init(self):
"""
This unit test makes sure that register_comm_hook properly checks the format
of hook defined by user. The Python hook must be callable. This test also
checks whether bucket annotation checked properly if defined.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
with self.assertRaisesRegex(TypeError, "Communication hook must be callable."):
model.register_comm_hook(state=None, hook=1)
with self.assertRaisesRegex(
ValueError, "bucket annotation should be dist.GradBucket."
):
def comm_hook(
state: object, bucket: int
) -> torch.futures.Future[torch.Tensor]:
return torch.futures.Future()
model.register_comm_hook(state=None, hook=comm_hook)
@requires_gloo()
def test_ddp_invalid_comm_hook_return_type(self):
"""
This test checks whether return annotation checked properly if defined. It also
checks whether an internal error is thrown if return type is incorrect and user
hasn't specified any return type annotation.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
expected_err = "Communication hook: return annotation should be torch.futures.Future"
with self.assertRaisesRegex(
ValueError,
expected_err,
):
def comm_hook(state: object, bucket: dist.GradBucket) -> int:
return torch.futures.Future()
model.register_comm_hook(state=None, hook=comm_hook)
verify_ddp_error_logged(model, expected_err)
with self.assertRaisesRegex(
RuntimeError,
"callback must return a torch.futures.Future object, but got",
):
def comm_hook(state: object, bucket: dist.GradBucket):
return 1
model.register_comm_hook(state=None, hook=comm_hook)
# Run forward
output = model(8, self.rank)
# Run backward
output.mean().backward()
@requires_gloo()
def test_ddp_comm_hook_register_just_once(self):
"""
DDP communication hook can only be registered once. This test validates whether
the error is thrown properly when register_comm_hook is called more than once.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
def dummy_hook(state, bucket):
fut = torch.futures.Future()
fut.set_result([bucket.buffer()])
return fut
model.register_comm_hook(None, dummy_hook)
with self.assertRaisesRegex(
RuntimeError,
"register_comm_hook or register_builtin_comm_hook can only be called once.",
):
model.register_comm_hook(None, dummy_hook)
@requires_gloo()
def test_ddp_comm_hook_sparse_gradients(self):
"""
Runs "test_sparse_gradients" unit test with DDP communication hook. We define a
simple hook that does allreduce and works with gloo backend for this test.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Ensure initialized weights and inputs are identical across processes
torch.manual_seed(1337)
vanilla_model = SparseGradientModule()
ddp_model = DistributedDataParallel(
copy.deepcopy(vanilla_model),
process_group=process_group,
)
def allreduce_hook_gloo(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
def div_by_world_size(fut):
# Divide the result by 2 * world_size.
return fut.wait()[0] / self.world_size
# Prepare allreduced grad bucket tensors by running an async work.
fut = process_group.allreduce([bucket.buffer()]).get_future()
return fut.then(div_by_world_size)
ddp_model.register_comm_hook(None, allreduce_hook_gloo)
self._run_and_verify_sparse_gradients(vanilla_model, ddp_model)
class ReducerModule(nn.Module):
def __init__(self):
super(ReducerModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, use_fc3=True):
x = self.relu(self.fc1(x)).float()
x = self.relu(self.fc2(x)).float()
if use_fc3:
x = self.fc3(x).float()
return F.softmax(x, dim=1)
class ReducerTest(TestCase):
def setUp(self):
self.file = tempfile.NamedTemporaryFile(delete=False)
self.store = c10d.FileStore(self.file.name, 1)
self.process_group = c10d.ProcessGroupGloo(self.store, 0, 1)
@requires_gloo()
def test_single_dtype_single_bucket(self):
model = ReducerModule()
parameters = list(model.parameters())
buckets = [list(range(len(parameters)))]
dist.Reducer(parameters, buckets, [dist._DEFAULT_FIRST_BUCKET_BYTES], self.process_group)
def _create_mixed_precision_model(self):
model = ReducerModule()
model.float()
model.fc1.double()
return model
@requires_gloo()
def test_multi_dtype_single_bucket(self):
model = self._create_mixed_precision_model()
# Raise if there are multiple types per bucket.
# In this case we create one bucket for all parameters.
with self.assertRaises(RuntimeError):
parameters = list(model.parameters())
buckets = [list(range(len(parameters)))]
dist.Reducer(
parameters,
buckets,
[dist._DEFAULT_FIRST_BUCKET_BYTES],
self.process_group
)
@requires_gloo()
def test_multi_dtype_multi_bucket(self):
model = self._create_mixed_precision_model()
parameters = list(model.parameters())
group_by_dtype = groupby(
range(len(parameters)), key=lambda i: parameters[i].dtype
)
buckets = [list(indices) for _, indices in group_by_dtype]
dist.Reducer(
parameters,
buckets,
[dist._DEFAULT_FIRST_BUCKET_BYTES for _ in buckets],
self.process_group
)
def _create_reducer_for_models(self, models, find_unused_parameters=False):
self.assertEqual(len(models), 1)
parameters = list(models[0].parameters())
group_by_dtype = groupby(
range(len(parameters)), key=lambda i: parameters[i].dtype
)
buckets = [list(indices) for _, indices in group_by_dtype]
return dist.Reducer(
parameters,
buckets,
[dist._DEFAULT_FIRST_BUCKET_BYTES for _ in range(len(buckets))],
self.process_group,
find_unused_parameters=find_unused_parameters,
)
@requires_gloo()
def test_forward_backward(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model])
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input), target)
reducer.prepare_for_backward(output)
output.backward()
@requires_gloo()
def test_forward_backward_unused_parameters(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model], find_unused_parameters=True)
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input, use_fc3=False), target)
# Check that the grad of fc3 is not set.
self.assertEqual(None, model.fc3.weight.grad)
# Compute and accumulate gradients.
reducer.prepare_for_backward(output)
output.backward()
# The reducer will have marked the grad of fc3 as ready, because
# it doesn't show up in the autograd graph of `output`. Since fc3.weight
# is considered being globally unused, it will be kept untouched as None.
self.assertEqual(None, model.fc3.weight.grad)
@requires_gloo()
def test_forward_backward_optimizer(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model], find_unused_parameters=True)
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
for i in range(3):
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
# The `zero_grad` function calls `detach_` and `zero_` on the grad
# tensors of model parameters. If we tried to set the grad tensors
# to a view of the reducer's bucket tensors, this would blow up.
optimizer.zero_grad()
# Unused parameter only in the first iteration.
output = loss(model(input, use_fc3=(i > 0)), target)
reducer.prepare_for_backward(output)
output.backward()
optimizer.step()
class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
# No support for float16 for CPU tensors
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
# The tensors to pass to broadcast are idential to the target
# only on the process that is the root of the broadcast.
if self.rank == root_rank:
tensors = list(tensor.clone() for tensor in target)
else:
tensors = list(torch.zeros_like(tensor) for tensor in target)
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_gloo_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo._Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
device = torch.device("cuda:%d" % self.rank)
ranks = list(range(self.world_size))
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
def test_broadcast_coalesced_gloo_cpu(self):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo._Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
device = torch.device("cpu")
ranks = list(range(self.world_size))
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_default_pg_gloo(self):
self._test_sequence_num_set_default_pg(backend="gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_gloo_new_group(self):
self._test_sequence_num_set_new_group(backend="gloo")
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_sequence_num_incremented_gloo_default(self):
self._test_sequence_num_incremented_default_group("gloo")
@skip_if_lt_x_gpu(4)
@requires_gloo()
def test_sequence_num_incremented_gloo_subgroup(self):
if self.world_size < 4:
return sandcastle_skip("Test requires world_size of at least 4")
self._test_sequence_num_incremented_subgroup("gloo")
@requires_gloo()
def test_gloo_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "device_ids not supported"):
c10d.barrier(device_ids=[self.rank])
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_gloo_warn_not_in_group(self):
self._test_warn_not_in_group(backend="gloo")
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
pytorch-master
|
test/distributed/test_c10d_gloo.py
|
# Owner(s): ["oncall: distributed"]
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.multiprocessing as mp
from torch.testing._internal.common_distributed import \
MultiProcessTestCase
from torch.testing._internal.common_utils import load_tests,\
NO_MULTIPROCESSING_SPAWN
# Torch distributed.nn is not available in windows
# check #42095, it errors on import.
_torch_dist_nn_available = True
try:
import torch.distributed.nn
except ImportError:
_torch_dist_nn_available = False
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if not c10d.is_available():
print('c10d not available, skipping tests', file=sys.stderr)
sys.exit(0)
if NO_MULTIPROCESSING_SPAWN:
print('spawn not available, skipping tests', file=sys.stderr)
sys.exit(0)
class AbstractProcessGroupShareTensorTest(object):
world_size = 2
def _test_multiprocess(self, f, shared_tensors, init_pg, n_output):
ws = self.world_size
# file store will delete the test file on destruction
file = tempfile.NamedTemporaryFile(delete=False)
ctx = mp.get_context('spawn')
c2p = ctx.Queue(2)
p2c = ctx.Queue(2)
ps = []
for i in range(ws):
p = ctx.Process(
target=f,
args=(i, file.name, shared_tensors, ws, init_pg, c2p, p2c))
p.start()
ps.append(p)
for _ in range(ws * n_output):
pid, expected, result = c2p.get()
self.assertEqual(
expected,
result,
msg=(
"Expect rank {} to receive tensor {} but got {}."
).format(pid, expected, result)
)
for _ in range(ws):
p2c.put(0)
for p in ps:
p.join(2)
# Why classmethod? multiprocessing cannot pickle TestCase subclass when in
# spawn mode. See https://bugs.python.org/issue33884.
@classmethod
def _test_broadcast_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
pg.broadcast(xs).wait()
c2p.put((rank, torch.zeros(2, 2), xs[0].to("cpu")))
p2c.get()
@classmethod
def _test_allreduce_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
pg.allreduce(xs, op=c10d.ReduceOp.SUM).wait()
c2p.put((rank, torch.ones(2, 2) * 2, xs[0].to("cpu")))
p2c.get()
@classmethod
def _test_allgather_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
ys = [[torch.zeros_like(xs[0]) for i in range(world_size)]]
pg.allgather(ys, xs).wait()
for i in range(world_size):
c2p.put((rank, torch.ones(2, 2) * i, ys[0][i].to("cpu")))
p2c.get()
class TestDistributedNNFunctions(MultiProcessTestCase):
def setUp(self):
super(TestDistributedNNFunctions, self).setUp()
self._spawn_processes()
def tearDown(self):
super(TestDistributedNNFunctions, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
def _test_broadcast(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend=backend
)
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
x.requires_grad = True
y = torch.distributed.nn.broadcast(x, 1)
self.assertEqual(y, 1 + torch.ones(5, 5))
z = y.sin().sum()
z.backward()
# We can't check the gradient of communications numerically so we have to do some calculations
if self.rank == 1:
self.assertEqual(x.grad, 2 * torch.cos(x))
elif self.rank == 0:
self.assertEqual(x.grad, torch.zeros(5, 5, device=device))
def _test_reduce(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend=backend
)
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
x.requires_grad = True
y = torch.distributed.nn.reduce(x, 1, op=c10d.ReduceOp.SUM)
if self.rank == 1:
self.assertEqual(y, 3 * torch.ones(5, 5, device=device))
z = y.sin().sum()
z.backward()
# Gradients are broadcasted to both ranks
x_g = (3 * torch.ones(5, 5, device=device)).cos()
self.assertEqual(x.grad, x_g)
def _test_allreduce(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend=backend
)
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
x.requires_grad = True
y = torch.distributed.nn.all_reduce(x, op=c10d.ReduceOp.SUM)
self.assertEqual(y, 3 * torch.ones(5, 5, device=device))
z = y.sin().sum()
z.backward()
x_g = 2 * (3 * torch.ones(5, 5, device=device)).cos()
self.assertEqual(x.grad, x_g)
def _test_all_gather(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend=backend
)
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
x.requires_grad = True
tensors = torch.distributed.nn.all_gather(x)
for i, t in enumerate(tensors):
self.assertEqual(t, torch.ones(5, 5, device=device) + i)
y = torch.sum(torch.stack(tensors), axis=0)
z = y.sin().sum()
z.backward()
x_s = 2 * (3 * torch.ones(5, 5, device=device)).cos()
self.assertEqual(x.grad, x_s)
def _test_all_to_all(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend=backend
)
device = torch.device(f"cuda:{self.rank}")
x0 = torch.ones(5, 5, device=device) + 2 * self.rank
x1 = torch.ones(5, 5, device=device) + 2 * self.rank
x0.requires_grad = True
x1.requires_grad = True
y0 = torch.empty_like(x0)
y1 = torch.empty_like(x1)
tensors = torch.distributed.nn.all_to_all([y0, y1], [x0, x1])
for i, t in enumerate(tensors):
self.assertEqual(t, torch.ones(5, 5, device=device) + 2 * i)
y = torch.sum(torch.stack(tensors), axis=0)
z = y.sin().sum()
z.backward()
x_s = (4 * torch.ones(5, 5, device=device)).cos()
self.assertEqual(x0.grad, x_s)
self.assertEqual(x1.grad, x_s)
def _test_all_to_all_single(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend=backend
)
device = torch.device(f"cuda:{self.rank}")
row = self.world_size * (self.rank + 1) * (self.world_size + 1) / 2
x = torch.ones(int(row), 5, device=device) * (self.rank + 1)
x.requires_grad = True
y = torch.empty_like(x)
split_sizes = [(i + 1) * (self.rank + 1) for i in range(self.world_size)]
y = torch.distributed.nn.all_to_all_single(
y, x, output_split_sizes=split_sizes, input_split_sizes=split_sizes
)
expected = []
for idx, tensor in enumerate(torch.split(x, split_sizes)):
expected.append(torch.full_like(tensor, (idx + 1)))
expected = torch.cat(expected)
self.assertEqual(y, expected)
z = y.sin().sum()
z.backward()
x_s = ((self.rank + 1) * torch.ones(int(row), 5, device=device)).cos()
self.assertEqual(x.grad, x_s)
|
pytorch-master
|
test/distributed/test_c10d_spawn.py
|
# Owner(s): ["oncall: distributed"]
import copy
import os
import sys
import tempfile
import test_c10d_spawn
import torch
import torch.distributed as c10d
import torch.nn as nn
from test_c10d_spawn import _torch_dist_nn_available, TestDistributedNNFunctions
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
from torch.testing._internal.common_distributed import requires_gloo, \
create_device, skip_if_lt_x_gpu
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if, TEST_WITH_DEV_DBG_ASAN
# Fails on Python-3.9, see https://github.com/pytorch/pytorch/issues/51619
if sys.version_info < (3, 9):
class ProcessGroupShareTensorTest(test_c10d_spawn.AbstractProcessGroupShareTensorTest, TestCase):
@classmethod
def opts(cls, threads=2):
opts = c10d.ProcessGroupGloo._Options()
opts._timeout = 5.0
opts._devices = [create_device(interface='lo')]
opts._threads = threads
return opts
@classmethod
def _init_pg_gloo(cls, rank, filename, world_size):
store = c10d.FileStore(filename, world_size)
return c10d.ProcessGroupGloo(
store, rank, world_size, ProcessGroupShareTensorTest.opts())
@sandcastle_skip_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_broadcast_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_broadcast_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
1)
@sandcastle_skip_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_allreduce_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allreduce_process,
[torch.ones(2, 2).to(i) for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
1)
@sandcastle_skip_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_allgather_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
self.world_size)
@classmethod
def _test_allgather_chunk_process(
cls, rank, filename, shared_tensor, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
chunks = torch.chunk(shared_tensor, world_size, dim=0)
x = chunks[rank]
ys = [torch.zeros_like(x) for _ in range(world_size)]
pg.allgather(ys, x).wait()
c2p.put((rank, chunks[0].to("cpu"), ys[0].to("cpu")))
c2p.put((rank, chunks[1].to("cpu"), ys[1].to("cpu")))
p2c.get()
@sandcastle_skip_if(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_allgather_chunk_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_chunk_process,
torch.tensor(range(4)).reshape(2, 2),
ProcessGroupShareTensorTest._init_pg_gloo,
self.world_size)
class DistributedDataParallelSingleProcessTest(TestCase):
def setUp(self):
self.rank = 0
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False) # noqa: P201
def tearDown(self):
try:
os.remove(self.file.name)
except OSError:
pass
def _test_base(self, net, inp, check_allclose=True):
store = c10d.FileStore(self.file.name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
if inp[0].is_cuda:
device_ids = [torch.cuda.current_device()]
else:
device_ids = None
ddp = nn.parallel.DistributedDataParallel(
copy.deepcopy(net),
device_ids=device_ids,
process_group=process_group
)
net_opt = torch.optim.Adam(net.parameters(), lr=0.001)
ddp_opt = torch.optim.Adam(ddp.parameters(), lr=0.001)
for i, j in zip(ddp.parameters(), net.parameters()):
self.assertTrue(i.allclose(j))
for _ in range(10):
net_out = net(*inp)
ddp_out = ddp(*inp)
net_out.sum().backward()
ddp_out.sum().backward()
net_opt.step()
ddp_opt.step()
if check_allclose:
for i, j in zip(ddp.parameters(), net.parameters()):
self.assertTrue(i.allclose(j))
@requires_gloo()
def test_cpu(self):
self._test_base(nn.Linear(2, 2), [torch.randn(30, 2)])
@requires_gloo()
@sandcastle_skip_if(not TEST_CUDA, "At least 1 CUDA GPUS needed")
def test_cuda(self):
self._test_base(nn.Linear(2, 2).to(0), [torch.randn(30, 2).to(0)])
@requires_gloo()
@sandcastle_skip_if(not TEST_CUDA, "At least 1 CUDA GPUS needed")
def test_rnn(self):
# This test is inspired by the bug reported in
# https://github.com/pytorch/pytorch/issues/36268
BATCH_SIZE = 12 # Divisible by 2, 3, 4
INPUT_DIM = 256
OUTPUT_DIM = 256
HIDDEN_DIM = 256
N_LAYERS = 3
SEQ_LEN = 100
class Net(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, hidden_layers):
super(Net, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.hidden_layers = hidden_layers
self.lstm = nn.LSTM(input_dim, hidden_dim, hidden_layers, batch_first=True)
self.h2o = nn.Linear(hidden_dim, output_dim)
def forward(self, x, y):
self.lstm.flatten_parameters()
h_t, _ = self.lstm(x)
output = self.h2o(h_t)
loss = nn.functional.mse_loss(output, y)
return loss
net = Net(INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS).to(0)
inp = [
torch.randn((BATCH_SIZE, SEQ_LEN, INPUT_DIM)).to(0),
torch.rand((BATCH_SIZE, SEQ_LEN, OUTPUT_DIM)).to(0)
]
# Not checking result allclose as the parameter inconsistency exist
# prior to this change. See #37079
self._test_base(net, inp, check_allclose=False)
# Skip dev-asan as torch + multiprocessing spawn have known issues
if not TEST_WITH_DEV_DBG_ASAN:
class TestDistributedNNFunctionsGloo(TestDistributedNNFunctions):
# Test Common Ops First.
@requires_gloo()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_broadcast(self):
self._test_broadcast("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_reduce(self):
self._test_reduce("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_allreduce(self):
self._test_allreduce("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_gather(self):
self._test_all_gather("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_to_all(self):
self._test_all_to_all("gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_all_to_all_single(self):
self._test_all_to_all_single("gloo")
# Test Ops only supported in GLOO.
@requires_gloo()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_gather(self):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='gloo')
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
x.requires_grad = True
tensors = torch.distributed.nn.gather(x, 1)
if self.rank == 1:
for i, t in enumerate(tensors):
self.assertEqual(t, torch.ones(5, 5, device=device) + i)
elif self.rank == 0:
for i, t in enumerate(tensors):
zeros = torch.zeros(5, 5, device=device)
self.assertEqual(t, zeros)
y = torch.sum(torch.stack(tensors), axis=0)
z = y.sin().sum()
z.backward()
# Test gradient
x_s = 3 * torch.ones(5, 5, device=device)
self.assertEqual(x.grad, x_s.cos())
@requires_gloo()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(not _torch_dist_nn_available, "torch.distributed.nn is not available")
def test_scatter(self):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='gloo')
device = torch.device(f"cuda:{self.rank}")
x0 = torch.ones(5, 5, device=device)
x1 = torch.ones(5, 5, device=device) + 1
x0.requires_grad = True
x1.requires_grad = True
y = torch.distributed.nn.scatter([x0, x1], 1)
if self.rank == 1:
self.assertEqual(y, 1 + torch.ones(5, 5, device=device))
elif self.rank == 0:
self.assertEqual(y, torch.ones(5, 5, device=device))
z = y.sin().sum()
z.backward()
# Test gradient
if self.rank == 1:
x0_s = torch.ones(5, 5, device=device).cos()
x1_s = (2 * torch.ones(5, 5, device=device)).cos()
self.assertEqual(x0.grad, x0_s)
self.assertEqual(x1.grad, x1_s)
if self.rank == 0:
self.assertEqual(x0.grad, torch.zeros(5, 5, device=device))
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/distributed/test_c10d_spawn_gloo.py
|
# Owner(s): ["oncall: distributed"]
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip ASAN as torch + multiprocessing spawn have known issues", file=sys.stderr
)
sys.exit(0)
# bfloat16 is only supported by CUDA 11+
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_common_errors(self):
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(common.find_free_port()),
}
class Env(object):
def __init__(self, vars):
self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)
def __enter__(self):
self.env_patcher.start()
def __exit__(self, type, value, traceback):
self.env_patcher.stop()
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?world_size={}".format(1))
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous("env://?rank={}".format(0))
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1))
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):
@requires_nccl()
@retry_on_connect_failures
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_default_store_timeout_nccl(self):
self._test_default_store_timeout("nccl")
class ProcessGroupNCCLNoGPUTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
def tearDown(self):
pass
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() > 0, "GPUs are available, skipping test"
)
def test_init_no_gpus(self):
store = c10d.FileStore(self.file.name, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!"
):
c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ProcessGroupNCCLTest(MultiProcessTestCase):
def _create_process_group_nccl(self, store, opts):
# create nccl processgroup with opts
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=opts)
pg = c10d.distributed_c10d._get_default_group()
return pg
def opts(self, high_priority_stream=False):
opts = c10d.ProcessGroupNCCL.Options()
opts.is_high_priority_stream = high_priority_stream
return opts
def setUp(self):
super(ProcessGroupNCCLTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
# self.num_gpus = torch.cuda.device_count()
self._spawn_processes()
def tearDown(self):
super(ProcessGroupNCCLTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
@property
def rank_to_GPU(self):
# return rank to GPU map
return init_multigpu_helper(self.world_size, "nccl")
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_empty_tensors(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_idx = self.rank_to_GPU[self.rank][0]
xs = [torch.FloatTensor([]).cuda(local_device_idx)]
pg.broadcast(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.allreduce(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.reduce(xs).wait()
self.assertEqual(0, xs[0].numel())
ys = [[torch.FloatTensor([]).cuda(local_device_idx) for _ in range(self.world_size)]]
pg.allgather(ys, xs).wait()
for y in ys[0]:
self.assertEqual(0, y.numel())
ys = [torch.FloatTensor([]).cuda(local_device_idx)]
xs = [[torch.FloatTensor([]).cuda(local_device_idx) for _ in range(self.world_size)]]
pg.reduce_scatter(ys, xs).wait()
self.assertEqual(0, ys[0].numel())
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_broadcast_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
return work.result()
# Every rank is root once
for i in range(self.world_size):
# Run with 1 input tensor
x = torch.tensor([self.rank]).cuda(self.rank_to_GPU[self.rank][0])
output = broadcast([x], i, 0)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i]), output[0])
expected_tensor = torch.empty([i + 1, i + 1]).fill_(i + 1)
xs = [torch.empty([i + 1, i + 1]).fill_(-1).cuda(device=device_idx) for device_idx in self.rank_to_GPU[self.rank]]
# test with multiple input tensors (multiple gpu in one rank)
for j in range(len(xs)):
if self.rank == i:
xs[j] = expected_tensor.cuda(device=self.rank_to_GPU[self.rank][j])
broadcast(xs, i, j)
for tensor in xs:
self.assertEqual(tensor, expected_tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allreduce_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
device_count = torch.cuda.device_count()
pg = self._create_process_group_nccl(store, self.opts())
local_device_id = self.rank_to_GPU[self.rank][0]
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.SUM)
ndev = float(self.world_size)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([ndev * (ndev + 1) / 2]),
tensors[0],
)
# Avg (only available for NCCL 2.10+)
if torch.cuda.nccl.version() >= (2, 10, 0):
tensors = [torch.tensor([self.rank + 1.]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.AVG)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([ndev * (ndev + 1.) / (2. * ndev)]),
tensors[0],
)
# Product
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.PRODUCT)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(math.factorial(self.world_size))]), tensors[0]
)
# Min
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.MIN)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([1.0]), tensors[0])
# Max
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.MAX)
self.assertEqual(torch.tensor([self.world_size]), tensors[0])
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
allreduce(tensors, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_id = self.rank_to_GPU[self.rank][0]
def reduce(xs, rootRank, rootTensor, op=None):
opts = c10d.ReduceOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
if op:
opts.reduceOp = op
work = pg.reduce(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.world_size):
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
reduce(tensors, rt, 0)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
if self.rank == rt:
self.assertEqualIgnoreType(
torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]),
tensors[0],
)
else:
self.assertEqualIgnoreType(
torch.tensor([self.rank + 1]),
tensors[0],
)
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
reduce(tensors, self.rank, rt, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
def allgather(output_ts, input_ts):
work = pg.allgather(output_ts, input_ts)
return work.wait()
tensors = [torch.empty(2, 2).fill_(2).cuda(device=i) for i in local_device_ids]
output_tensors = []
expected_output = []
output_per_gpu = ([torch.empty(2, 2).fill_(-1)] * len(local_device_ids) * self.world_size)
expected_per_gpu = ([torch.empty(2, 2).fill_(2)] * len(local_device_ids) * self.world_size)
for gpu in local_device_ids:
output_tensors.append([t.cuda(device=gpu) for t in output_per_gpu])
expected_output.append([t.cuda(device=gpu) for t in expected_per_gpu])
result = allgather(output_tensors, tensors)
# Verification
self.assertEqual(output_tensors, expected_output)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_id = self.rank_to_GPU[self.rank][0]
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(local_device_id)
output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(local_device_id)
allgather_base(output_t, tensor)
# Verification
self.assertEqual(torch.arange(self.world_size), output_t)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_basics(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_id = self.rank_to_GPU[self.rank][0]
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"output tensor size must be equal to world_size times input tensor size",
):
tensor = torch.tensor([self.rank]).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=tensor.dtype).cuda(
local_device_id
)
# fails the check because output_t is not correctly sized
allgather_base(output_t, tensor)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "output tensor must have the same type as input tensor"
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
local_device_id
)
# fails the check because the dtype is different
allgather_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_gather_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
def gather(output_t, input_t, rootRank):
opts = c10d.GatherOptions()
opts.rootRank = rootRank
if rootRank == self.rank:
work = pg.gather(output_t, input_t, opts)
else:
work = pg.gather([], input_t, opts)
work.wait()
# init input
tensors = []
for device_id in local_device_ids:
tensors.append(torch.tensor([self.rank]).cuda(device_id))
# init output
output_ts = []
for idx in range(num_gpus):
gpu_idx = local_device_ids[idx]
output_ts.append([])
for rank in range(self.world_size):
output_ts[idx].append(torch.tensor([-1]).cuda(gpu_idx))
expected = [[torch.tensor([rank]) for rank in range(self.world_size)]]
for rank in range(self.world_size):
gather(output_ts, tensors, rank)
if rank == self.rank:
self.assertEqual(expected, output_ts)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_gather_stress(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
def gather(output_t, input_t, rootRank):
opts = c10d.GatherOptions()
opts.rootRank = rootRank
if rootRank == self.rank:
work = pg.gather(output_t, input_t, opts)
else:
work = pg.gather([], input_t, opts)
work.wait()
stress_length = 1000
# init input
tensors = []
for i in range(stress_length):
tensors.append([])
for device_id in local_device_ids:
tensors[i].append(torch.tensor([self.rank]).cuda(device_id))
# init output
output_ts = []
for i in range(stress_length):
output_ts.append([[] for _ in range(num_gpus)])
for idx, ls in enumerate(output_ts[i]):
gpu_idx = local_device_ids[idx]
for _ in range(self.world_size):
ls.append(torch.tensor([-1]).cuda(gpu_idx))
expected = [[torch.tensor([rank]) for rank in range(self.world_size)]]
for i in range(stress_length):
for rank in range(self.world_size):
gather(output_ts[i], tensors[i], rank)
# Verification
if rank == self.rank:
self.assertEqual(output_ts[i], expected)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_gather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
# init input
tensors = []
for device_id in local_device_ids:
tensors.append(torch.tensor([self.rank]).cuda(device_id))
# init output
output_ts = []
for idx in range(num_gpus):
gpu_idx = local_device_ids[idx]
output_ts.append([])
for rank in range(self.world_size):
output_ts[idx].append(torch.tensor([-1]).cuda(gpu_idx))
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = -1
pg.gather(output_ts, tensors, opts)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
pg.gather(output_ts, tensors, 0)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = self.world_size
pg.gather(output_ts, tensors, opts)
with self.assertRaisesRegex(
RuntimeError, "Tensor list must be nonempty"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather(output_ts, [], opts)
with self.assertRaisesRegex(
RuntimeError, "Tensors must be on distinct GPU devices"
):
# init input
tensors2 = []
for device_id in local_device_ids:
tensors2.append(torch.tensor([self.rank]).cuda(device_id))
tensors2.append(torch.tensor([self.rank]).cuda(device_id))
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather(output_ts, tensors2, opts)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_scatter_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
def scatter(output_t, input_t, rootRank):
opts = c10d.ScatterOptions()
opts.rootRank = rootRank
if rootRank == self.rank:
work = pg.scatter(output_t, input_t, opts)
else:
work = pg.scatter(output_t, [], opts)
work.wait()
# init output
tensors = []
for device_id in local_device_ids:
tensors.append(torch.tensor([-1]).cuda(device_id))
# init input
scatter_list = []
for idx in range(num_gpus):
gpu_idx = local_device_ids[idx]
scatter_list.append([])
for rank in range(self.world_size):
scatter_list[idx].append(torch.tensor([rank]).cuda(gpu_idx))
# test each rank to scatter
expected = [torch.tensor([self.rank])]
for rank in range(self.world_size):
scatter(tensors, scatter_list, rank)
self.assertEqual(expected, tensors)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_scatter_stress(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
def scatter(output_t, input_t, rootRank):
opts = c10d.ScatterOptions()
opts.rootRank = rootRank
if rootRank == self.rank:
work = pg.scatter(output_t, input_t, opts)
else:
work = pg.scatter(output_t, [], opts)
work.wait()
stress_length = 1000
# init output
tensors = []
for i in range(stress_length):
tensors.append([])
for device_id in local_device_ids:
tensors[i].append(torch.tensor([-1]).cuda(device_id))
# init input
scatter_list = []
for i in range(stress_length):
scatter_list.append([[] for _ in range(num_gpus)])
for idx, ls in enumerate(scatter_list[i]):
gpu_idx = local_device_ids[idx]
for rank in range(self.world_size):
ls.append(torch.tensor([rank]).cuda(gpu_idx))
# test each rank to scatter
expected = [torch.tensor([self.rank])]
for i in range(stress_length):
for rank in range(self.world_size):
scatter(tensors[i], scatter_list[i], rank)
# Verification
self.assertEqual(tensors[i], expected)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_scatter_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
# init output
tensors = []
for device_id in local_device_ids:
tensors.append(torch.tensor([-1]).cuda(device_id))
# init input
scatter_list = []
for idx in range(num_gpus):
gpu_idx = local_device_ids[idx]
scatter_list.append([])
for rank in range(self.world_size):
scatter_list[idx].append(torch.tensor([rank]).cuda(gpu_idx))
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = -1
pg.scatter(tensors, scatter_list, opts)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
pg.scatter(tensors, scatter_list, 0)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = self.world_size
pg.scatter(tensors, scatter_list, opts)
with self.assertRaisesRegex(
RuntimeError, "Tensor list must be nonempty"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([], scatter_list, opts)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_basics(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_id = self.rank_to_GPU[self.rank][0]
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"input tensor must be the same size as output size times world size",
):
input_t = torch.tensor([self.rank]).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda(
local_device_id
)
# fails the check because output_t is not correctly sized
reduce_scatter_base(output_t, input_t)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "input tensor must be the same type as the outut tensor."
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
local_device_id
)
# fails the check because the dtype is different
reduce_scatter_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
def reduce_scatter(outputs, input_lists, op):
opts = c10d.ReduceScatterOptions()
opts.reduceOp = op
work = pg.reduce_scatter(outputs, input_lists, opts)
work.wait()
output = [torch.tensor([0]).cuda(i) for i in local_device_ids]
# GPU/rank
# 0 [1], [2], [3], [4]
# 1 [2], [3], [4], [5]
# 2 [3], [4], [5], [6]
# 3 [4], [5], [6], [7]
# Sum
tensor_lists = []
input_per_gpu = []
for i in range(self.world_size):
input_per_gpu.append(torch.tensor([self.rank + i + 1]))
for gpu in local_device_ids:
tensor_lists.append([t.cuda(device=gpu) for t in input_per_gpu])
reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM)
for i in range(num_gpus):
expected = torch.tensor(
[
float((1 + self.world_size) * self.world_size / 2)
+ self.world_size * self.rank
])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
# Min
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN)
for i in range(num_gpus):
expected = torch.tensor([self.rank + 1 + i])
self.assertEqual(expected, output[i])
# Max
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(num_gpus):
expected = torch.tensor(
[self.rank + self.world_size + i]
)
self.assertEqual(expected, output[i])
# Product
reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT)
# math pakcage don't have math.perm until python 3.8, so
# we implement a naive version here.
def perm(n, k):
prod_val = n
for val in range(n - k + 1, n):
prod_val *= val
return prod_val
for i in range(num_gpus):
prod_val = perm(self.rank + self.world_size, self.world_size)
expected = torch.tensor([prod_val])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
# Test the input params overridden scenarios, aka, when the input is
# a list and output is just one tensor.
# Sum
output_tensor = torch.empty_like(input_per_gpu[0][0]).cuda(self.rank)
input_list = [tensor[0].cuda(self.rank) for tensor in input_per_gpu]
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.SUM).wait()
expected = torch.tensor(
float((1 + self.world_size) * self.world_size / 2) + self.world_size * self.rank
)
self.assertEqualIgnoreType(expected, output_tensor)
# Min
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.MIN).wait()
expected = torch.tensor(self.rank + 1)
self.assertEqualIgnoreType(expected, output_tensor)
# Max
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.MAX).wait()
expected = torch.tensor(self.rank + self.world_size)
self.assertEqualIgnoreType(expected, output_tensor)
# Product
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.PRODUCT).wait()
prod_val = self.rank + 1
for k in range(1, self.world_size):
prod_val = prod_val * (self.rank + 1 + k)
expected = torch.tensor(prod_val)
self.assertEqualIgnoreType(expected, output_tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_id = self.rank_to_GPU[self.rank][0]
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
# reduce_scatter_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
output_t = torch.empty([1]).cuda(local_device_id)
tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(local_device_id)
reduce_scatter_base(output_t, tensor)
# Verification
self.assertEqual(output_t[0], self.rank * self.world_size)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
def allreduce(tensors):
opts = c10d.AllreduceOptions()
work = pg.allreduce(tensors, opts)
return work
# Making the collective to operate on
# 1, 2, 3, 4, .... len(local_device_ids) GPUs
tensors_list = [[] for _ in range(len(local_device_ids))]
for i in range(1, len(local_device_ids) + 1):
for j in range(i):
tensors_list[i - 1].append(torch.tensor([j + 1]).cuda(local_device_ids[j]))
works = []
for tensors in tensors_list:
work = allreduce(tensors)
works.append(work)
# Barrier will ensure that all previous work is completed
pg.barrier().wait()
for i in range(1, len(local_device_ids) + 1):
for j in range(i):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([(j + 1) * self.world_size]), tensors_list[i - 1][j]
)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_send_recv(self):
store = c10d.FileStore(self.file_name, self.world_size)
self._create_process_group_nccl(store, self.opts())
device = self.rank_to_GPU[self.rank][0]
# Generate the same random tensor
torch.manual_seed(0)
send_tensor = torch.rand(10, 10, device=device)
if self.rank == 0:
dist.send(send_tensor, 1)
if self.rank == 1:
recv_tensor = torch.rand(10, 10, device=device)
dist.recv(recv_tensor, 0)
self.assertEqual(send_tensor, recv_tensor)
# Test with non-contiguous tensors.
send_tensor_view = send_tensor.t()
if self.rank == 0:
with self.assertRaisesRegex(RuntimeError, 'Tensors must be contiguous'):
dist.send(send_tensor_view, 1)
class DistributedDataParallelTest(
test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def _get_process_group(self):
store = self._get_store()
return c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def _test_nccl_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_propagate_error_reason(self):
# Need to use NCCL_BLOCKING_WAIT and not ASYNC_ERROR_HANDLING,
# otherwise process will be taken down and we can't check for errors.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
os.environ["NCCL_BLOCKING_WAIT"] = "1"
# TODO: smaller timeout can fail since PG NCCl does health check in
# constructor. Look into reducing this test's runtime.
store = c10d.FileStore(self.file_name, self.world_size)
# provide sufficient timeout to initialize NCCL comm.
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size, timeout=timedelta(seconds=15))
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
pg.barrier().wait(timedelta(seconds=5))
# Simulate stuckness in rank 0.
if self.rank == 0:
pg_gloo.barrier().wait()
inp = torch.ones(1).cuda(self.rank)
if self.rank != 0:
# Time out due to rank 0 not calling into allreduce.
with self.assertRaises(RuntimeError):
pg.allreduce([inp]).wait(timedelta(seconds=5))
# Now when nonzero rank attempts to use communicator, original failure reason should be logged.j
try:
pg.allreduce([torch.ones(2).cuda(self.rank)]).wait()
except RuntimeError as e:
self.assertTrue("timed out in call to wait()" in str(e))
self.assertTrue("TensorShape=[1]" in str(e))
else:
self.fail("Expected error to be raised!")
# Unblock rank 0
pg_gloo.barrier().wait()
# TODO: We can also test that if rank 0 attempts to use the communicator,
# then we should error out with the info that it was aborted due to
# timeout on another rank. Although this would only be the case after
# the watchdog has run on the rank, and there is no reliable way
# to confirm it has run.
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_multi_device_ids_not_allowed(self):
int_devices = list(range(torch.cuda.device_count()))
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_device_ids_None(self):
self._test_nccl_backend(None, None)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_empty_device_ids(self):
# This tests the backward compatibility of accepting an empty list as `device_ids`,
# although we no longer document this in favor of the default value of `None`,
# which is consistent with multi-device modules and CPU modules.
self._test_nccl_backend(None, [])
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_multi_device_module_device_ids_None(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, devices)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(8)
def test_nccl_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(
ValueError,
"DistributedDataParallel device_ids and output_device arguments only work with "
"single-device/multiple-device GPU modules or CPU modules",
):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "input module must be on the same type of devices"
):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
def _test_fp16(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16(self):
self._test_fp16()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_grad_is_view(self):
self._test_fp16(gradient_as_bucket_view=True)
def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ForwardReturnValueModule(nn.Module):
def __init__(self):
super(ForwardReturnValueModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, fn):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# The first softmax does NOT include fc3 in its autograd graph
# whereas the second softmax DOES. If we pass only the first
# tensor we see in the output to the reducer, it marks the
# gradient for fc3 as ready (because it doesn't show up). If
# downstream uses of this return value choose to differentiate
# against the second output tensor, it would still receive a
# gradient and a callback for this tensor, resulting in a crash.
return fn(
F.softmax(x, dim=1),
F.softmax(self.fc3(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
ForwardReturnValueModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Always run "backward" to ensure the reducer is called by autograd.
# If we don't correctly capture the output tensors from the return value,
# the reducer won't see a hook for the unused parameter, and throw an error.
# The correct capture is what we're testing in this function.
def test(box, unbox):
output = model(input, fn=box)
loss = criterion(unbox(output), target)
loss.backward()
# Test with identity return value
test(
box=lambda x, y: (x, y),
unbox=lambda obj: obj[1],
)
# Test with list return value
test(
box=lambda x, y: ["foo", x, "bar", y],
unbox=lambda obj: obj[3],
)
# Test with tuple return value
test(
box=lambda x, y: ("foo", x, "bar", y),
unbox=lambda obj: obj[3],
)
# Test with dict return value
test(
box=lambda x, y: {"foo": "bar", "a": x, "b": y},
unbox=lambda obj: obj["b"],
)
# Test with list with dict return value
test(
box=lambda x, y: ["foo", "bar", {"a": x, "b": y}],
unbox=lambda obj: obj[2]["b"],
)
# Test with dict with list return value
test(
box=lambda x, y: {"foo": "bar", "list": [0, x, 1, y]},
unbox=lambda obj: obj["list"][3],
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value(self):
self._test_arbitrary_forward_return_value()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value_grad_is_view(self):
self._test_arbitrary_forward_return_value(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_with_lazy_parameters(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Modules with uninitialized parameters"
):
DistributedDataParallel(
torch.nn.LazyLinear(10), process_group=process_group
)
def _test_find_unused_parameters_kwarg(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
torch.cuda.set_device(self.rank)
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
process_group = c10d.distributed_c10d._get_default_group()
class FindUnusedParametersModule(nn.Module):
def __init__(self):
super(FindUnusedParametersModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# Return the fc3 module so that the caller can invoke it
# outside of the forward function. While this is bad practice,
# we can use it to trigger a reducer error.
return (F.softmax(x, dim=1), self.fc3)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
ddp_model = None
def test_find_unused_parameters(
find_unused_parameters, test_default=False, gradient_as_bucket_view=False
):
if test_default:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
else:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
gradient_as_bucket_view=gradient_as_bucket_view,
)
nonlocal ddp_model
ddp_model = model
output, fc3 = model(input)
output = fc3(output)
loss = criterion(output, target)
loss.backward()
# First test that finding unused params under these conditions is to
# trigger an error when `backward` is called (because fc3 is an unused
# parameter and will therefore be marked ready twice).
try:
test_find_unused_parameters(
True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.assertTrue(
str(ex).startswith(
"Expected to mark a variable ready only once.",
)
)
unused_index = 2
unused_index_str = f"Parameter at index {unused_index}"
model = ddp_model.module
for module_name, module in model.named_modules():
if module == model.fc3:
for parameter_name, _ in module.named_parameters(recurse=False):
unused_fqn = f"{module_name}.{parameter_name}"
# Only one such parameter in model.fc3, since bias=False
break
if dist.get_debug_level() != dist.DebugLevel.OFF:
unused_index_str += f" with name {unused_fqn}"
self.assertTrue(unused_index_str in str(ex))
else:
self.fail("Expected exception")
dist.barrier(process_group)
# Then test that the default behavior can be overridden by setting
# `find_unused_parameters=False`.
try:
test_find_unused_parameters(
False, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# Test find_unused_parameters defaults to False
try:
test_find_unused_parameters(
True, test_default=True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# TODO: Combine the following tests once https://github.com/pytorch/pytorch/issues/55967
# is resolved.
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_debug_detail(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_debug_info(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_debug_off(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_detail(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_info(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_off(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
def _test_multiple_outputs_multiple_backward(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class MultipleOutputModule(nn.Module):
def __init__(self):
super(MultipleOutputModule, self).__init__()
def define_module():
return nn.Sequential(
nn.Linear(2, 10, bias=False),
nn.ReLU(),
nn.Linear(10, 4, bias=False),
nn.ReLU(),
)
self.module0 = define_module()
self.module1 = define_module()
def forward(self, x):
return (
F.softmax(self.module0(x), dim=1),
F.softmax(self.module1(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
MultipleOutputModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Compute loss and gradients for both outputs
output1, output2 = model(input)
loss1 = criterion(output1, target)
loss1.backward()
loss2 = criterion(output2, target)
loss2.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward(self):
self._test_multiple_outputs_multiple_backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward_grad_is_view(self):
self._test_multiple_outputs_multiple_backward(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_no_grad(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class NoGradModule(nn.Module):
def __init__(self):
super(NoGradModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoGradModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
def check_no_grads():
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# After initialization, no parameter has their gradient set.
check_no_grads()
# Run `forward` function with torch.no_grad()
with torch.no_grad():
output = model(input)
self.assertTrue(isinstance(output, torch.Tensor))
# No parameter should have their gradient set.
check_no_grads()
def _test_accumulate_gradients_module(self, gradient_as_bucket_view=False):
# This is NOT the recommended way to implement accumulating grads, but
# we would like to make sure DDP does not mess up with the underlying
# module.
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = self.world_size
model, ddp_model, input, target = self._prepare_single_device_module(
process_group, devices, devices, global_batch_size, gradient_as_bucket_view
)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad
with torch.no_grad():
ddp_model.train()
ddp_model.module(input)
# Check two model parameters over 4 iterations.
# Use 4 iterations because we alternate between reducing and
# not reducing and want to make sure we switch both ways.
for iteration in range(4):
step_model(model, input, target)
if iteration % 2 == 0:
# Skip gradients sync without calling prepare_for_backward
step_model(
ddp_model.module,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertNotEqual(i.grad, j.grad)
else:
step_model(
ddp_model,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module(self):
self._test_accumulate_gradients_module()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module_with_grad_is_view(self):
self._test_accumulate_gradients_module(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_failure_recovery(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# need to create a separate file for the recovered FileStore, because
# the original one will be deleted when destructing the first FileStore.
recovery_filename = self.file_name + "_recovery"
if self.rank == 0:
# the file will be deleted by the recovered FileStore
open(recovery_filename, "w").close()
# not necessary to run barrier here, as DDP will synchronize
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = TestModel().float().to(device_id)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
del ddp
del process_group
del store # this will delete self.file_name
store = c10d.FileStore(recovery_filename, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_default_pg(self):
dist.init_process_group(
"nccl",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
default_pg = c10d.distributed_c10d._get_default_group()
dist.destroy_process_group(default_pg)
self.assertFalse(dist.is_initialized())
def _test_grad_layout(self, replica_devices, layer_devs, local_batch_size):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = local_batch_size * self.world_size
# Carry out some trials with small buckets and some with big buckets.
bucketsizes = (0.000001, 25)
# Tuples of lists. Each list describes per-layer characteristics for one trial.
layer_formats = (
[torch.contiguous_format] * 4,
[torch.channels_last] * 2 + [torch.contiguous_format] * 2,
[torch.channels_last] * 4,
)
layer_dtypes = (
[torch.float] * 4,
[torch.float] * 2 + [torch.half] * 2,
[torch.half] * 4,
)
input_dev = layer_devs[0] if isinstance(layer_devs, list) else layer_devs
target_dev = layer_devs[-1] if isinstance(layer_devs, list) else layer_devs
input = torch.randn(
(global_batch_size, 8, 8, 8), device=input_dev, dtype=torch.float
)
target = torch.randn(
(global_batch_size, 8, 4, 4), device=target_dev, dtype=torch.float
)
local_batch_start = self.rank * local_batch_size
local_batch_end = (self.rank + 1) * local_batch_size
# Reducer.cpp sneakily creates one "initial bucket" that ignores the "bucket_cap_mb"
# argument. The following makes sure the initial bucket also complies.
@contextmanager
def first_bucket_size(ddp_bucket_mb):
old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES
dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6)
try:
yield
finally:
dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
for formats, dtypes, bucketsize in product(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
model_msg = (
"rank = {} formats = {} dtypes = {} bucketsize = {} ".format(
self.rank, formats, dtypes, bucketsize
)
)
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
copy.deepcopy(m),
device_ids=replica_devices,
process_group=process_group,
bucket_cap_mb=bucketsize,
)
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
tol = 1.0e-3 if has_half else 1.0e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
print(
"Caught exception during model creation for " + model_msg,
flush=True,
)
raise
# 3 iters: First iter creates grads, second iter retests after rebucketing,
# third iter tries zeroed grads.
for it in range(3):
iter_msg = "iter = {} ".format(it) + model_msg
named_msg = iter_msg
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(
m_ddp(input[local_batch_start:local_batch_end]).float(),
target[local_batch_start:local_batch_end],
).backward()
for i, ((layer_name, m_child), m_ddp_child) in enumerate(
zip(m.named_children(), m_ddp.module.children())
):
named_msg = layer_name + ".weight" + " " + iter_msg
self.assertTrue(
m_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
self.assertTrue(
m_ddp_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
for j, ((param_name, p), p_ddp) in enumerate(
zip(
m_child.named_parameters(),
m_ddp_child.parameters(),
)
):
named_msg = (
layer_name + "." + param_name + " " + iter_msg
)
self.assertEqual(
p.grad, p_ddp.grad, rtol=tol, atol=tol
)
opt.step()
opt_ddp.step()
if it == 0:
for p, p_ddp in zip(m.parameters(), m_ddp.parameters()):
p.grad = None
p_ddp.grad = None
else:
m.zero_grad()
m_ddp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
print(
"Caught exception during iterations at " + named_msg,
flush=True,
)
raise
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_grad_layout_1devicemodule_1replicaperprocess(self):
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
# Tells DDP to use just one device.
replica_devices = [dev0]
# Tells _test_grad_layout to construct ConvNet with all layers on this process's first assigned device.
layer_devs = dev0
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(4)
@skip_if_rocm
def test_grad_layout_2devicemodule(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
dev0 = torch.device("cuda:" + str(int_devices[0]))
dev1 = torch.device("cuda:" + str(int_devices[1]))
# DDP's default behavior for a multi-device module is "don't replicate."
replica_devices = None
# Tells _test_grad_layout to constructs this process's ConvNet on 2 devices, with 2 layers on each device.
layer_devs = [dev0] * 2 + [dev1] * 2
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_param_layout_mismatch_error(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
layer_devs = dev0
layer_formats = (
[torch.contiguous_format] * 4
if self.rank == 0
else [torch.channels_last] * 4
)
layer_dtypes = [torch.float] * 4
m = ConvNet(layer_devs, layer_formats, layer_dtypes)
if self.rank == 0:
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
else:
with self.assertRaisesRegex(
RuntimeError,
".* appears not to match strides of the same param in process 0",
):
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
def _gpu_model_with_ddp_comm_hook(
self,
process_group,
hook=None,
gradient_as_bucket_view=False,
state=None,
static_graph=False,
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
static_graph=static_graph,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_nccl(self):
"""
This unit test verifies whether the Future object is passed properly using nccl backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
def _test_ddp_comm_hook_allreduce_hook_nccl(
self, gradient_as_bucket_view=False, static_graph=False
):
"""
This unit test verifies whether a DDP communication hook that just calls
allreduce gives the same result with the case of no hook registered.
Without the then callback, the future_value in reducer is no longer
a PyObject, and this unit test verifies future_value is properly checked.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
return (
process_group.allreduce(tensors)
.get_future()
.then(lambda fut: fut.value()[0])
)
# Get GPU model with allreduce_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_hook, gradient_as_bucket_view, static_graph
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_default_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether default Python DDP communication hooks ALLREDUCE, FP16_COMPRESS
and BF16_COMPRESS, can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# For these default DDP comm hooks, the only state is process group.
state = process_group
hook_options = [default.allreduce_hook, default.fp16_compress_hook]
if (
not TEST_WITH_ROCM
and BFLOAT16_AVAILABLE
and c10d.is_nccl_available()
and torch.cuda.nccl.version() >= (2, 10)
):
hook_options.append(default.bf16_compress_hook)
for hook in hook_options:
# Get GPU model with the hook registered.
# The first arg 'process_group' is used for initializing the test environment,
# so it cannot be replaced by 'state', although they have the same value.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_fp16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the FP16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [
(powerSGD.powerSGD_hook, powerSGD_state),
(default.allreduce_hook, process_group),
]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.fp16_compress_wrapper(hook),
gradient_as_bucket_view,
state,
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_bf16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the BF16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [
(powerSGD.powerSGD_hook, powerSGD_state),
(default.allreduce_hook, process_group),
]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.bf16_compress_wrapper(hook),
gradient_as_bucket_view,
state,
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_powerSGD_ddp_comm_hook_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether Python DDP communication hook POWER_SGD
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with the hook registered.
# Test the hook with different algorithmic configs.
for use_error_feedback, warm_start, batch_tensors_with_same_shape in product(
[True, False], [True, False], [True, False],
):
state = powerSGD.PowerSGDState(
process_group=process_group,
matrix_approximation_rank=1,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
batch_tensors_with_same_shape=batch_tensors_with_same_shape,
)
for hook in [powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook]:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_builtin_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether built-in C++ DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for comm_hook_type in [
dist.BuiltinCommHookType.ALLREDUCE,
dist.BuiltinCommHookType.FP16_COMPRESS,
]:
# Get GPU model with the built-in communication hook.
gpu_model = self._gpu_model_with_builtin_ddp_comm_hook(
process_group, comm_hook_type, gradient_as_bucket_view
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl(self):
self._test_ddp_comm_hook_allreduce_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl(self):
self._test_default_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_nccl(self):
self._test_fp16_compress_wrapper()
@requires_nccl()
@requires_nccl_version((2, 10), "Need NCCL 2.10+ for BF16_COMPRESS")
@sandcastle_skip_if(
not BFLOAT16_AVAILABLE,
"BFloat16 is only supported by CUDA 11+",
)
@skip_if_lt_x_gpu(2)
def test_bf16_compress_wrapper_nccl(self):
self._test_bf16_compress_wrapper()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl(self):
self._test_builtin_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl(self):
self._test_powerSGD_ddp_comm_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_grad_is_view(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_static_graph(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(static_graph=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl_is_view(self):
self._test_default_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_is_view(self):
self._test_fp16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@requires_nccl_version((2, 10), "Need NCCL 2.10+ for BF16_COMPRESS")
@sandcastle_skip_if(
not BFLOAT16_AVAILABLE,
"BFloat16 is only supported by CUDA 11+",
)
@skip_if_lt_x_gpu(2)
def test_bf16_compress_wrapper_is_view(self):
self._test_bf16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl_grad_is_view(self):
self._test_builtin_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl_grad_is_view(self):
self._test_powerSGD_ddp_comm_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_with_then_hook_nccl(self):
"""
This unit test verifies whether a DDP communication hook that calls allreduce and then
multiplies the result by ten and divides by two gives the expected result.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_with_then_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
fut = process_group.allreduce(tensors).get_future()
def mult(fut):
# Multiply the result by 10.
return 10 * fut.value()[0]
def div(fut):
# Divide the result by 2.
return 0.5 * fut.value()
return fut.then(mult).then(div)
# Get GPU model with allreduce_with_then_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_with_then_hook
)
# check whether the grads are equal to what allreduce returns multiplied by 5.
# without the comm_hook, result would be still 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2))
class AcceptsParam(torch.nn.Module):
def __init__(self, p, factor):
super().__init__()
self.a = p
self.f = factor
def forward(self, input):
return input + self.a * self.f
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
size = 2048 * 2048
dev = self.rank
world = self.world_size
p = torch.nn.Parameter(torch.randn(size, requires_grad=True))
for try_set_to_none, use_bucket_view in product((False, True), (False, True)):
m = torch.nn.Sequential(
self.AcceptsParam(p, dev + 1), self.AcceptsParam(p, dev + 1)
).cuda(dev)
m = torch.nn.parallel.DistributedDataParallel(
m,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[dev],
process_group=process_group,
)
for i in range(3):
m.zero_grad(set_to_none=try_set_to_none)
m(1).sum().backward()
# Each param value is multiplied by "rank + 1" twice in forward, so the grad
# values produced by a particular rank should be 2. * (rank + 1).
# Summing these over ranks and dividing by world size gives the expected result:
analytic = torch.full_like(
p, 2.0 * (world * (world + 1.0) / 2.0) / world, device=dev
)
for name, p in m.named_parameters():
self.assertEqual(
p.grad,
analytic,
"mismatch at "
+ name
+ ".grad for "
+ "set_to_none = {}, use_bucket_view = {}".format(
try_set_to_none, use_bucket_view
),
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_channels_last_contig(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device(f"cuda:{self.rank}")
tensor = torch.ones((2, 16, 768, 1152), dtype=torch.float32, device=device).to(memory_format=torch.channels_last)
process_group.broadcast([tensor]).wait()
class NcclErrorHandlingTest(MultiProcessTestCase):
def setUp(self):
super(NcclErrorHandlingTest, self).setUp()
# Need to skip return code checking for these tests since the child
# processes don't exit cleanly.
self.skip_return_code_checks = [
self.test_nccl_errors_blocking_abort.__wrapped__,
self.test_nccl_errors_blocking_sigkill.__wrapped__,
self.test_nccl_errors_blocking_sigterm.__wrapped__,
self.test_nccl_errors_blocking_nonzero_exit.__wrapped__,
]
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(NcclErrorHandlingTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 3
@property
def blocking_wait_error_msg(self):
return "Caught collective operation timeout"
def _run_all_reduce(self, pg):
pg.allreduce(torch.rand(10).cuda(self.rank))
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
@sandcastle_skip("Test does not pass when run locally")
def test_nccl_errors_nonblocking(self):
# Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test
# since test_c10d_common runs with async error handling by default, but this
# tests behavior when it is not enabled.
prev_nccl_async_error_handling = os.environ.get(
"NCCL_ASYNC_ERROR_HANDLING", None
)
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
# This allreduce does not block Python thread as allreduce enqueues
# the cuda operation, and then wait only blocks the current cuda
# stream.
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
work.wait()
# Now the work scheduled next should hang forever since the previous
# allreduce will never complete.
t = threading.Thread(target=self._run_all_reduce, args=(process_group,))
t.daemon = True
t.start()
t.join(int(get_timeout(self.id()) / 5))
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling
def _test_nccl_errors_blocking(self, func):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=10),
)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# Operation would time out in blocking mode.
work.wait(timeout=timedelta(seconds=self.op_timeout_sec))
# Run some GPU operations to make sure cuda has not gotten stuck.
# It was observed cuda could get stuck if NCCL communicators were
# not properly aborted before throwing RuntimeError.
a = torch.rand(10).cuda(self.rank)
elif self.rank == 1:
# Clean up structures (ex: files for FileStore before going down)
del process_group
func()
else:
# Wait for timeout
time.sleep(2 * self.op_timeout_sec)
# Now verify communicators on this rank have been aborted by the watchdog thread.
self._wait_for_comm_abort(process_group)
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_clean_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(0))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_nonzero_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(1))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
@sandcastle_skip(
"Frequently times out see https://github.com/pytorch/pytorch/issues/58920"
)
def test_nccl_errors_blocking_abort(self):
self._test_nccl_errors_blocking(lambda: os.abort())
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigkill(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGKILL))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigterm(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGTERM))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_blocking_wait_with_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=10),
)
process_group.barrier().wait()
if self.rank == 0:
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# This should timeout
process_group.barrier().wait(timeout=timedelta(seconds=self.op_timeout_sec))
def _run_invalid_nccl_blocking_wait_env(self, val):
os.environ["NCCL_BLOCKING_WAIT"] = val
store = c10d.FileStore(self.file_name, self.world_size)
with self.assertRaises(RuntimeError):
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_invalid_nccl_blocking_wait_env(self):
self._run_invalid_nccl_blocking_wait_env("abc")
self._run_invalid_nccl_blocking_wait_env("-1")
self._run_invalid_nccl_blocking_wait_env("2147483647")
self._run_invalid_nccl_blocking_wait_env("4294967295")
def _check_valid_comm_exception(self, e):
exception_str = str(e)
valid_exceptions = [
"NCCL communicator was aborted",
"NCCL communicator encountered error",
"Caught collective operation timeout"
]
return any(exc in exception_str for exc in valid_exceptions)
def _wait_for_comm_abort(self, process_group, timeout=None):
"""
Waits for the watchdog thread to abort communicators for the process group.
"""
while True:
try:
if not timeout:
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
else:
assert isinstance(timeout, timedelta)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=timeout)
except Exception as e:
if self._check_valid_comm_exception(e):
return
else:
raise e
time.sleep(1)
@with_nccl_blocking_wait
@requires_nccl()
@requires_gloo()
@skip_if_lt_x_gpu(3)
def test_nccl_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
# Initialize process_group.
process_group = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=10)
)
# Control gloo pg used as go-ahead signal/barrier
# to coordinate btwn ranks.
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
failed_collective_timeout = timedelta(milliseconds=100)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=timedelta(seconds=5))
if self.rank == 0:
# This should timeout in about 1 second.
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=failed_collective_timeout)
# Now do a barrier to tell other rank to go ahead.
pg_gloo.barrier().wait()
else:
# Wait on rank 0 to fail.
try:
pg_gloo.barrier().wait()
except Exception as e:
raise ValueError(f"Rank {self.rank} barrier timed out waiting for rank 0 with error: {str(e)}")
# Now verify communicators on this rank have
# been aborted by watchdog.
self._wait_for_comm_abort(process_group, failed_collective_timeout)
class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
# No support for float16 for CPU tensors
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
# The tensors to pass to broadcast are idential to the target
# only on the process that is the root of the broadcast.
if self.rank == root_rank:
tensors = list(tensor.clone() for tensor in target)
else:
tensors = list(torch.zeros_like(tensor) for tensor in target)
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device("cuda:%d" % self.rank)
ranks = [0, 1]
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_all_reduce_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device("cuda:%d" % self.rank)
tensors = [torch.full((60 + i,), self.rank + 1 + i, device=device, dtype=torch.float) for i in range(5)]
torch.distributed.all_reduce_coalesced(tensors, group=process_group)
for i, t in enumerate(tensors):
self.assertEqual(t, torch.full_like(t, self.world_size * (i + (self.world_size + 1.) / 2.)))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_default_pg_nccl(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_default_pg(backend="nccl")
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_sequence_num_incremented_nccl_default(self):
self._test_sequence_num_incremented_default_group("nccl")
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sequence_num_incremented_nccl_subgroup(self):
if self.world_size < 4:
return sandcastle_skip("Test requires world_size of at least 4")
self._test_sequence_num_incremented_subgroup("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_nccl_new_group(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_new_group(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_nccl_options_high_priority_stream(self):
pg_opts = c10d.ProcessGroupNCCL.Options()
pg_opts.is_high_priority_stream = True
store = c10d.FileStore(self.file_name, self.world_size)
# Test init_process_group accepts options
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=pg_opts,
)
# Test with new_group
pg = c10d.new_group([0, 1], pg_options=pg_opts)
# test if the process group constructed with high priority stream
self.assertTrue(pg.options.is_high_priority_stream)
# test the process group works as expected
t = torch.tensor([self.rank + 1] * 10).cuda(self.rank)
pg.allreduce(t).wait()
expected_tensor = torch.tensor([3] * 10).cuda(self.rank)
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
c10d.all_reduce(t)
expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank)
self.assertEqual(expected_tensor, t)
# Test with new_group
pg = c10d.new_group([0, 1])
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([0])
if self.rank == 0:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([1])
if self.rank == 1:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout(self):
os.environ["ENABLE_NCCL_HEALTH_CHECK"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Health check failure"
):
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=10),
)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group(self):
os.environ["ENABLE_NCCL_HEALTH_CHECK"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=10),
)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Health check failure"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group_non_member(self):
os.environ["ENABLE_NCCL_HEALTH_CHECK"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=10),
)
if self.rank == 1:
with self.assertRaisesRegex(
RuntimeError, "Health check failure"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
c10d.barrier(device_ids=[self.rank])
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids_function_argument(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "Invalid function argument"):
c10d.barrier(device_ids=self.rank)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_nccl_warn_not_in_group_debug_detail(self):
self._test_warn_not_in_group(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_nccl_warn_not_in_group_debug_info(self):
self._test_warn_not_in_group(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_nccl_warn_not_in_group_debug_off(self):
self._test_warn_not_in_group(backend="nccl")
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
pytorch-master
|
test/distributed/test_c10d_nccl.py
|
# Owner(s): ["oncall: distributed"]
import os
import sys
from datetime import timedelta
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
from test_c10d_common import LOOPBACK
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
requires_gloo,
skip_if_lt_x_gpu,
with_dist_debug_levels,
create_device,
)
from torch.testing._internal.common_utils import (
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
class AbstractProcessGroupWrapperTest(MultiProcessTestCase):
def setUp(self):
super(AbstractProcessGroupWrapperTest, self).setUp()
self._spawn_processes()
def _validate_error(self, exception, op_type, rank, tensor):
err = str(exception)
self.assertTrue(
op_type in err, f"Got {err} but expected {op_type} to be in error."
)
# User doesn't call barrier with tensor.
if op_type != "BARRIER":
self.assertTrue(
f"{list(tensor.shape)}" in err,
f"Did not find shapes {list(tensor.shape)} in error {err}",
)
# For CUDA, only assert on device type, not index
if "cuda" in str(tensor.device):
self.assertTrue(
"cuda" in err, f"Did not find cuda device in error {err}"
)
else:
self.assertTrue(
str(tensor.device) in err,
f"Did not find tensor device {str(tensor.device)} in error {err}",
)
# C++ and python type strings are not exactly the same.
if "float" in str(tensor.dtype):
self.assertTrue("Float" in err, "Expected Float type")
elif "int" in str(tensor.dtype):
self.assertTrue("Long" in err, "Expected Long type")
else:
self.fail(f"Unexpected dtype {str(tensor.dtype)} for error {err}")
def _test_collective_hang(self, wrapper_pg, use_cuda=False):
# All ranks besides 1 call allreduce and wrapper_pg should detect a hang
# and report an issue with rank 1.
faulty_rank = 1
if self.rank != faulty_rank:
tensor = torch.randn(20, 10)
if use_cuda:
tensor = tensor.to(self.rank)
if self.rank == 0:
# Rank 0 reports faulty ranks
err = f"Ranks {faulty_rank} failed to pass monitoredBarrier"
else:
err = "Please check rank 0 logs for faulty rank"
# Gloo can sometimes throw the following error if a rank exits early
# before rank 0 calls into the allreduce.
err += "|Connection closed by peer|Connection reset by peer"
with self.assertRaisesRegex(RuntimeError, err):
wrapper_pg.allreduce([tensor])
def _test_collectives_op_mismatch(self, wrapper_pg, use_cuda=False):
tensor = torch.randn(20, 10)
if use_cuda:
tensor = tensor.to(self.rank)
works = []
# Run a few successful collectives
for _ in range(10):
work = wrapper_pg.allreduce([tensor])
works.append(work)
for w in works:
w.wait()
# Simulate mismatch: allreduce vs reduce.
# Error including info about inconsistent collective, rank, tensor
# shape, device, and dtype should be raised.
with self.assertRaisesRegex(RuntimeError, ".*") as cm:
if self.rank == 0:
wrapper_pg.allreduce([tensor])
else:
wrapper_pg.reduce([tensor])
self._validate_error(
exception=cm.exception,
op_type="ALLREDUCE" if self.rank == 0 else "REDUCE",
rank=self.rank,
tensor=tensor,
)
with self.assertRaisesRegex(RuntimeError, ".*") as cm:
if self.rank == 0:
wrapper_pg.reduce([tensor])
else:
wrapper_pg.barrier()
self._validate_error(
exception=cm.exception,
op_type="REDUCE" if self.rank == 0 else "BARRIER",
rank=self.rank,
tensor=tensor,
)
with self.assertRaisesRegex(RuntimeError, ".*") as cm:
scatter_result = [torch.ones(4) * i for i in range(self.world_size)]
scattered_tensor = torch.empty(4)
if self.rank == 0:
wrapper_pg.scatter(scattered_tensor, scatter_result, 0)
else:
wrapper_pg.reduce_scatter(scattered_tensor, scatter_result)
self._validate_error(
exception=cm.exception,
op_type="SCATTER" if self.rank == 0 else "REDUCE_SCATTER",
rank=self.rank,
tensor=scattered_tensor,
)
with self.assertRaisesRegex(RuntimeError, ".*") as cm:
if self.rank == 0:
wrapper_pg.broadcast(tensor, 0)
else:
output_tensors = [
torch.zeros_like(tensor) for _ in range(self.world_size)
]
wrapper_pg.allgather([output_tensors], [tensor])
self._validate_error(
exception=cm.exception,
op_type="BROADCAST" if self.rank == 0 else "ALLGATHER",
rank=self.rank,
tensor=tensor,
)
def _test_collective_shape_mismatch(self, wrapper_pg, use_cuda=False):
wrapper_pg.barrier()
dim = 2 if self.rank == 0 else 10
tensor = torch.randn(20, dim)
if use_cuda:
tensor = tensor.to(self.rank)
with self.assertRaisesRegex(RuntimeError, ".*") as cm:
wrapper_pg.allreduce([tensor])
self._validate_error(
exception=cm.exception,
op_type="ALLREDUCE",
rank=self.rank,
tensor=tensor,
)
# Check errors are raised when dimensionality of shapes is different
tensor = torch.randn(20, 10, 2) if self.rank == 0 else torch.randn(20, 10)
if use_cuda:
tensor = tensor.to(self.rank)
with self.assertRaisesRegex(RuntimeError, ".*") as cm:
wrapper_pg.allreduce([tensor])
self._validate_error(
exception=cm.exception,
op_type="ALLREDUCE",
rank=self.rank,
tensor=tensor,
)
# Check shape errors with scatter
input = [
torch.tensor(
[self.rank] if self.rank == 0 else [self.rank, self.rank],
device=self.rank if use_cuda else "cpu",
)
for _ in range(self.world_size)
]
outputs = [
torch.tensor(
[-1] if self.rank == 0 else [-1, -1],
device=self.rank if use_cuda else "cpu",
)
for _ in range(self.world_size)
]
root_rank = 0
opts = c10d.ScatterOptions()
opts.rootRank = root_rank
with self.assertRaisesRegex(RuntimeError, ".*") as cm:
if self.rank == root_rank:
wrapper_pg.scatter([outputs[self.rank]], [input], opts).wait()
else:
wrapper_pg.scatter([outputs[self.rank]], [], opts).wait()
self._validate_error(
exception=cm.exception,
op_type="SCATTER",
rank=self.rank,
tensor=outputs[self.rank],
)
# ASAN is not safe since we are spawning processes.
if not TEST_WITH_DEV_DBG_ASAN:
@requires_gloo()
@requires_nccl()
class ProcessGroupNCCLWrapperTest(AbstractProcessGroupWrapperTest):
def setUp(self):
super(AbstractProcessGroupWrapperTest, self).setUp()
self._spawn_processes()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
@property
def world_size(self) -> int:
return 2
def _create_wrapper_pg(self, with_new_group=False, timeout=10.0):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=timeout),
)
if with_new_group:
pg = c10d.new_group(backend="nccl", timeout=timedelta(seconds=timeout))
else:
_pg = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=timeout)
)
pg = c10d._create_process_group_wrapper(
_pg,
"unused",
store,
self.rank,
self.world_size,
timeout=timeout,
)
return pg
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_collective_hang(self):
pg = self._create_wrapper_pg(timeout=2.0)
self._test_collective_hang(pg)
# NOTE: these tests are separated by debug level instead of combined into
# one due to https://github.com/pytorch/pytorch/issues/55967, they can be
# combined after that is resolved.
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_collectives_op_mismatch_debug_mode(self):
pg = self._create_wrapper_pg(with_new_group=True)
self._test_collectives_op_mismatch(pg, use_cuda=True)
self._test_nccl_only_op_mismatch(pg)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_collectives_op_mismatch(self):
pg = self._create_wrapper_pg(with_new_group=False)
self._test_collectives_op_mismatch(pg, use_cuda=True)
self._test_nccl_only_op_mismatch(pg)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_collective_shape_mismatch_debug_mode(self):
pg = self._create_wrapper_pg(with_new_group=True)
self._test_collective_shape_mismatch(pg, use_cuda=True)
self._test_nccl_only_shape_mismatch(pg)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_collective_shape_mismatch(self):
pg = self._create_wrapper_pg(with_new_group=False)
self._test_collective_shape_mismatch(pg, use_cuda=True)
self._test_nccl_only_shape_mismatch(pg)
def _test_nccl_only_op_mismatch(self, wrapper_pg):
device = f"cuda:{self.rank}"
with self.assertRaisesRegex(RuntimeError, ".*") as cm:
output = torch.zeros(4 + self.rank, device=device)
input = torch.ones(4 * self.world_size, device=device)
if self.rank == 0:
wrapper_pg._allgather_base(output, input).wait()
else:
wrapper_pg._reduce_scatter_base(output, input).wait()
self._validate_error(
exception=cm.exception,
op_type="ALLGATHER_BASE" if self.rank == 0 else "REDUCE_SCATTER_BASE",
rank=self.rank,
tensor=input,
)
def _test_nccl_only_shape_mismatch(self, wrapper_pg):
device = f"cuda:{self.rank}"
with self.assertRaisesRegex(RuntimeError, ".*") as cm:
output = torch.zeros(4 + self.rank, device=device)
input = torch.ones(4 * self.world_size, device=device)
wrapper_pg._reduce_scatter_base(output, input).wait()
self._validate_error(
exception=cm.exception,
op_type="REDUCE_SCATTER_BASE",
rank=self.rank,
tensor=input,
)
with self.assertRaisesRegex(RuntimeError, ".*") as cm:
output = torch.zeros(4, device=device)
input = torch.ones((4 + self.rank) * self.world_size, device=device)
wrapper_pg._reduce_scatter_base(output, input).wait()
self._validate_error(
exception=cm.exception,
op_type="REDUCE_SCATTER_BASE",
rank=self.rank,
tensor=input,
)
@requires_gloo()
class ProcessGroupGlooWrapperTest(AbstractProcessGroupWrapperTest):
def setUp(self):
super(ProcessGroupGlooWrapperTest, self).setUp()
def opts(self, threads=2, timeout=10.0):
opts = c10d.ProcessGroupGloo._Options()
opts._timeout = timeout
opts._devices = [create_device(interface=LOOPBACK)]
opts._threads = threads
return opts
def _create_wrapper_pg(self, with_new_group=False, timeout=10.0):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", rank=self.rank, world_size=self.world_size, store=store
)
if with_new_group:
pg = c10d.new_group(backend="gloo")
else:
_pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(timeout=timeout)
)
pg = c10d._create_process_group_wrapper(
_pg,
"unused",
store,
self.rank,
self.world_size,
timeout=timeout,
)
return pg
def test_collective_hang(self):
pg = self._create_wrapper_pg(timeout=2.0)
self._test_collective_hang(pg)
# NOTE: these tests are separated by debug level instead of combined into
# one due to https://github.com/pytorch/pytorch/issues/55967, they can be
# combined after that is resolved.
@with_dist_debug_levels(levels=["DETAIL"])
def test_collectives_op_mismatch_debug_mode(self):
pg = self._create_wrapper_pg(with_new_group=True)
self._test_collectives_op_mismatch(pg)
@with_dist_debug_levels(levels=["OFF"])
def test_collectives_op_mismatch(self):
pg = self._create_wrapper_pg(with_new_group=False)
self._test_collectives_op_mismatch(pg)
@with_dist_debug_levels(levels=["DETAIL"])
def test_collective_shape_mismatch_debug_mode(self):
pg = self._create_wrapper_pg(with_new_group=True)
self._test_collective_shape_mismatch(pg)
@with_dist_debug_levels(levels=["OFF"])
def test_collective_shape_mismatch(self):
pg = self._create_wrapper_pg(with_new_group=False)
self._test_collective_shape_mismatch(pg)
@skip_if_lt_x_gpu(4)
@with_dist_debug_levels(levels=["DETAIL"])
def test_collectives_op_mismatch_cuda_debug_mode(self):
pg = self._create_wrapper_pg(with_new_group=True)
self._test_collectives_op_mismatch(pg, use_cuda=True)
@skip_if_lt_x_gpu(4)
@with_dist_debug_levels(levels=["OFF"])
def test_collectives_op_mismatch_cuda(self):
pg = self._create_wrapper_pg(with_new_group=False)
self._test_collectives_op_mismatch(pg, use_cuda=True)
@skip_if_lt_x_gpu(4)
@with_dist_debug_levels(levels=["DETAIL"])
def test_collective_shape_mismatch_cuda_debug_mode(self):
pg = self._create_wrapper_pg(with_new_group=True)
self._test_collective_shape_mismatch(pg, use_cuda=True)
@skip_if_lt_x_gpu(4)
@with_dist_debug_levels(levels=["OFF"])
def test_collective_shape_mismatch_cuda(self):
pg = self._create_wrapper_pg(with_new_group=False)
self._test_collective_shape_mismatch(pg, use_cuda=True)
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_pg_wrapper must not have initialized CUDA context on main process"
run_tests()
|
pytorch-master
|
test/distributed/test_pg_wrapper.py
|
# Owner(s): ["oncall: distributed"]
import os
import sys
from contextlib import closing
import torch.distributed as dist
import torch.distributed.launch as launch
from torch.distributed.elastic.utils import get_socket_with_port
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
TestCase,
run_tests,
)
def path(script):
return os.path.join(os.path.dirname(__file__), script)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip ASAN as torch + multiprocessing spawn have known issues", file=sys.stderr
)
sys.exit(0)
class TestDistributedLaunch(TestCase):
def test_launch_user_script(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
"--master_addr=localhost",
f"--master_port={master_port}",
"--node_rank=0",
"--use_env",
path("bin/test_script.py"),
]
launch.main(args)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/test_launcher.py
|
# Owner(s): ["oncall: distributed"]
import copy
import os
import sys
import tempfile
import threading
import time
from datetime import timedelta
from itertools import product
from sys import platform
from contextlib import suppress
import torch
import torch.distributed as dist
if not dist.is_available():
print("distributed package not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed.distributed_c10d as c10d
from torch.utils.checkpoint import checkpoint
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
if TEST_WITH_DEV_DBG_ASAN:
print("Multiprocessing spawn is not compatible with dev/dbg asan", file=sys.stderr)
sys.exit(0)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if platform == "darwin":
LOOPBACK = "lo0"
else:
LOOPBACK = "lo"
torch.backends.cuda.matmul.allow_tf32 = False
def gpus_for_rank(world_size):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
class AbstractTimeoutTest(object):
def _test_store_timeout(self, backend, init_method, c2p):
try:
dist.init_process_group(
backend=backend,
init_method=init_method,
world_size=1,
rank=0,
timeout=timedelta(seconds=1),
)
default_store = c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
default_store.get("nonexistent key")
tok = time.time()
dist.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
if sys.platform == "win32":
yield "file:///%s" % f.name.replace("\\", "/")
f.close()
else:
yield "file://%s" % f.name
f.close()
yield "tcp://127.0.0.1:%d" % common.find_free_port()
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout, args=(backend, init_method, c2p)
)
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_connect_failures handle the error
raise c2p[0]
else:
raise RuntimeError("Unexpected type {}".format(type(c2p[0])))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class DoubleGpuNet(nn.Module):
def __init__(self, gpus):
super(DoubleGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[1])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.fc3(x)
return F.softmax(x, dim=1).to(dev0)
class QuadraGpuNet(nn.Module):
def __init__(self, gpus):
super(QuadraGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[2])
self.fc4 = nn.Linear(4, 4, bias=False).to(gpus[3])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
dev2 = self.fc3.weight.device
dev3 = self.fc4.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.relu(self.fc3(x.to(dev2)))
x = self.fc4(x.to(dev3))
return F.softmax(x, dim=1).to(dev0)
class ConvNet(nn.Module):
def __init__(self, gpus, layouts, dtypes):
super(ConvNet, self).__init__()
self.dtypes = dtypes
if isinstance(gpus, list):
self.layer_gpus = gpus
else:
gpus = [gpus] * 4
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(
device=gpus[0], memory_format=layouts[0], dtype=dtypes[0]
)
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(
device=gpus[1], memory_format=layouts[1], dtype=dtypes[1]
)
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(
device=gpus[2], memory_format=layouts[2], dtype=dtypes[2]
)
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(
device=gpus[3], memory_format=layouts[3], dtype=dtypes[3]
)
def forward(self, x):
x = x.to(self.dtypes[0])
# Could say
# x = self.conv0(x).to(device=self.conv1.weight.device, dtype=self.dtypes[1])
# etc. But I don't want to appeal to the weights' devices directly, because part of this test's purpose
# is to verify weights are where expected if the model gets replicated.
gpus = self.layer_gpus if hasattr(self, "layer_gpus") else [x.device] * 4
x = self.conv0(x).to(device=gpus[1], dtype=self.dtypes[1])
x = self.conv1(x).to(device=gpus[2], dtype=self.dtypes[2])
x = self.conv2(x).to(device=gpus[3], dtype=self.dtypes[3])
return self.conv3(x)
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class ModuleForDdpCommHook(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x + rank)
class SparseGradientModule(nn.Module):
def __init__(self):
super(SparseGradientModule, self).__init__()
self.embedding = nn.EmbeddingBag(10, 10, sparse=True)
def forward(self, x):
return F.softmax(self.embedding(x), dim=1)
class CommonDistributedDataParallelTest(object):
def tearDown(self):
# DistributedDataParallel test doesn't seem to call FileStore destructor
# TODO: investigate this test and the test is known to have issues
# Use this hack to remove files for that test
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
def _prepare_single_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
device = devices[0] if devices else torch.device("cuda:%d" % self.rank)
ddp_model = DistributedDataParallel(
copy.deepcopy(model).to(device),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
model.to(device)
input = torch.randn(global_batch_size, 2).to(device)
target = torch.randn(global_batch_size, 4).to(device)
return model, ddp_model, input, target
def _prepare_multi_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
self.assertTrue(
len(devices) == 2 or len(devices) == 4,
"unexpected devices for ddp tests {}".format(devices),
)
if len(devices) == 2:
model = DoubleGpuNet(devices)
elif len(devices) == 4:
model = QuadraGpuNet(devices)
ddp_model = DistributedDataParallel(
copy.deepcopy(model),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
input = torch.randn(global_batch_size, 2).cuda(devices[0])
target = torch.randn(global_batch_size, 4)
return model, ddp_model, input, target
def _get_store(self):
return dist.FileStore(self.file_name, self.world_size)
def _get_process_group(self):
raise NotImplementedError("To be implemented by child class")
def _train_model(self, model, input_var, target, loss, run_checkpoint=False, use_reentrant=True):
model.train()
if run_checkpoint:
output = checkpoint(model, input_var, use_reentrant=use_reentrant)
else:
output = model(input_var)
l = loss(output, target)
l.backward()
def _test_ddp_checkpointing(
self,
input_model,
process_group,
use_bucket_view,
find_unused_parameters=False,
static_graph=False,
run_checkpoint=False,
use_reentrant=True,
allow_none_grads=False,
):
# to reproduce the same training results
torch.cuda.set_device(self.rank)
torch.manual_seed(31415)
model = copy.deepcopy(input_model).cuda()
ddp_model = copy.deepcopy(input_model).cuda()
ddp_model = nn.parallel.DistributedDataParallel(
ddp_model,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[self.rank],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
static_graph=static_graph,
)
self.assertEqual(
ddp_model._get_ddp_logging_data().get("static_graph", 0), static_graph
)
input, ddp_input, target, ddp_target = self._prepare_dummy_data()
loss = nn.MSELoss()
n_iters = 5
for i in range(n_iters):
model.zero_grad(set_to_none=False)
ddp_model.zero_grad(set_to_none=False)
self._train_model(model, input, target, loss, run_checkpoint=run_checkpoint, use_reentrant=use_reentrant)
self._train_model(
ddp_model, ddp_input, ddp_target, loss, run_checkpoint=run_checkpoint, use_reentrant=use_reentrant
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
if not allow_none_grads:
self.assertTrue(i.grad is not None)
self.assertTrue(j.grad is not None)
self.assertEqual(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# A list of tests for ddp with activation checkpointing
# when gradient_as_bucket_view=True, False.
# Most of the tests are referred to
# https://github.com/facebookresearch/fairscale/blob/main/tests/nn/pipe/test_checkpoint_ddp.py
class CheckpointOnceModule(nn.Module):
"""
Runs checkpoint for a single layer in the model.
"""
def __init__(self, use_reentrant=True):
super().__init__()
self.l1 = nn.Linear(20, 20)
self.l2 = nn.Linear(20, 20)
self.use_reentrant = use_reentrant
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x, use_reentrant=self.use_reentrant)
return x
class CheckpointTwiceModule(CheckpointOnceModule):
"""
Runs checkpoint for the same layer twice in a model. This simulates use
cases such as pipeline parallel where the same layer can be checkpointed
more than one time.
"""
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x, use_reentrant=self.use_reentrant)
x = checkpoint(self.l2, x, use_reentrant=self.use_reentrant)
return x
class CheckpointTwiceModuleWeightSharing(CheckpointTwiceModule):
"""
Similar to CheckpointTwiceModule but the weights are shared.
"""
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
# Share weights
self.l1.weight = self.l2.weight
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x, use_reentrant=self.use_reentrant)
x = checkpoint(self.l2, x, use_reentrant=self.use_reentrant)
return x
class DynamicCheckpointTwiceModule(CheckpointTwiceModule):
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
self.count = 0
def forward(self, inp):
if self.count % 2:
x = checkpoint(self.l1, inp, use_reentrant=self.use_reentrant)
else:
x = checkpoint(self.l2, inp, use_reentrant=self.use_reentrant)
self.count += 1
return x
class DynamicCheckpointTwiceModuleWeightSharing(DynamicCheckpointTwiceModule):
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
# Share weights
self.l1.weight = self.l2.weight
def _prepare_dummy_data(self):
ddp_bs = 16
bs = ddp_bs * self.world_size
input = torch.rand((bs, 20), device="cuda", requires_grad=True)
target = torch.randn((bs, 20), device="cuda")
offset = self.rank * ddp_bs
ddp_input = input[offset : offset + ddp_bs]
ddp_target = target[offset : offset + ddp_bs]
return input, ddp_input, target, ddp_target
@skip_if_lt_x_gpu(2)
@parametrize("use_reentrant", [True, False])
def test_ddp_checkpointing_once(self, use_reentrant):
"""
DDP works as expected when layer is checkpointed only once.
"""
process_group = self._get_process_group()
for use_bucket_view, static_graph in product((False, True), (False, True)):
self._test_ddp_checkpointing(
self.CheckpointOnceModule(use_reentrant=use_reentrant),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
)
if static_graph:
# find_unused_parameters does not make a difference, since it is
# ignored for static graph.
self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
find_unused_parameters=True,
)
@skip_if_lt_x_gpu(2)
@parametrize("use_reentrant", [True, False])
def test_ddp_checkpointing_unused_params(self, use_reentrant):
"""
With reentrant autograd checkpointing impl, DDP will fail when there are
unused params in the model and no static graph training. With
non-reentrant checkpointing implementation, this works as expected.
"""
process_group = self._get_process_group()
for use_bucket_view in (True, False):
err_ctx = (
suppress() if not use_reentrant else
self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once."
)
)
with err_ctx:
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(use_reentrant=use_reentrant),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
)
# test passes when static_graph is true
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(use_reentrant=use_reentrant),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=True,
)
@skip_if_lt_x_gpu(2)
@parametrize("use_reentrant", [True, False])
def test_ddp_checkpointing_twice(self, use_reentrant):
"""
Checkpoitning twice fails for non-static graph with reentrant checkpoint
implementation, succeeds with non-reentrant checkpoint implementation.
"""
process_group = self._get_process_group()
for use_bucket_view in (True, False):
err_ctx = (
suppress() if not use_reentrant else
self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once."
)
)
with err_ctx:
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(use_reentrant=use_reentrant),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False,
)
with err_ctx:
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(use_reentrant=use_reentrant),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False,
find_unused_parameters=True,
)
@skip_if_lt_x_gpu(2)
@parametrize("use_reentrant", [True, False])
def test_ddp_checkpointing_twice_static_graph(self, use_reentrant):
"""
Regardless of reentrant or non-reentrant checkpointing impl,
checkpointing twice works with static graph enabled.
"""
process_group = self._get_process_group()
for use_bucket_view in (True, False):
# Test passes when static_graph=True.
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(use_reentrant=use_reentrant),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=True,
)
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_dynamic_module(self):
"""
Dynamic module can be checkpointed, multiple times, with non-reentrant
checkpointing implementation.
"""
process_group = self._get_process_group()
for use_bucket_view in (True, False):
model = self._test_ddp_checkpointing(
self.DynamicCheckpointTwiceModule(use_reentrant=False),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False,
find_unused_parameters=True,
# Grads can be none sometimes due to dynamic module not using
# all params.
allow_none_grads=True
)
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_dynamic_weight_sharing(self):
"""
Dynamic module can be checkpointed multiple times with weight sharing
using non-reentrant checkpointing implementation.
"""
process_group = self._get_process_group()
for use_bucket_view in (True, False):
model = self._test_ddp_checkpointing(
self.DynamicCheckpointTwiceModuleWeightSharing(use_reentrant=False),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False,
find_unused_parameters=True,
# Grads can be none sometimes due to dynamic module not using
# all params.
allow_none_grads=True
)
# DDP works as expected if there is weight sharing among layers
@skip_if_lt_x_gpu(2)
@parametrize("use_reentrant", [True, False])
def test_ddp_checkpointing_weight_sharing(self, use_reentrant):
"""
Test that checkpointing with weight sharing works.
"""
process_group = self._get_process_group()
torch.cuda.set_device(self.rank)
for use_bucket_view, static_graph in product((False, True), (False, True)):
torch.manual_seed(31415)
l1 = nn.Linear(20, 20)
l2 = nn.Linear(20, 20)
l1.weight = l2.weight
model = nn.Sequential(l1, l2)
# TODO: non-reentrant based checkpointing of DDP module with
# static_graph runs into the below issue, see
# https://github.com/pytorch/pytorch/issues/70865 and
# https://github.com/pytorch/pytorch/issues/58111 for details.
err_ctx = (
self.assertRaisesRegex(
RuntimeError,
"Your training graph has changed in this iteration"
) if static_graph and not use_reentrant else suppress()
)
with err_ctx:
self._test_ddp_checkpointing(
model,
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
run_checkpoint=True,
use_reentrant=use_reentrant,
)
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_twice_weight_sharing(self):
"""
Checkpointing should work with static graph in the case of checkpointing
same layer twice and having weights shared acrosss layers.
"""
process_group = self._get_process_group()
torch.cuda.set_device(self.rank)
for use_bucket_view in (True, False):
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModuleWeightSharing(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=True,
)
def test_invalid_powerSGD_state(self):
for start_powerSGD_iter, use_error_feedback, warm_start in product(
[0, 1], [True, False], [True, False]
):
if not use_error_feedback and not warm_start:
continue
with self.assertRaisesRegex(
ValueError,
"Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, "
"because PowerSGD can only be applied after the first two iterations in DDP.",
):
state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=start_powerSGD_iter,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
def _test_ddp_with_process_group(
self,
process_group,
devices,
device_ids,
multi_device=False,
gradient_as_bucket_view=False,
):
"""
Note: we pass down `device_ids` all the way to DistributedDataParallel
as part of the test. Below you find tests that either use a list of
integers, a list of `torch.Device` instances, or an empty list.
The `devices` argument is used to control placement of the model and
must always be specified as list of `torch.Device` instances.
"""
local_batch_size = 1 if devices is None else len(devices)
global_batch_size = self.world_size * local_batch_size
if multi_device:
model, ddp_model, input, target = self._prepare_multi_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertTrue(ddp_logging_data.get("is_multi_device_module"))
else:
model, ddp_model, input, target = self._prepare_single_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertFalse(ddp_logging_data.get("is_multi_device_module"))
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
def update_parameters(model):
for param in model.parameters():
with torch.no_grad():
param -= param.grad
param.grad = None
# check two model parameters over 2 iterations
for iteration in range(2):
# single cpu/gpu training
step_model(model, input, target)
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
step_model(
ddp_model,
input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
)
# Update weights and run a second iteration to shake out errors
update_parameters(model)
update_parameters(ddp_model)
self.assertEqual(
len(list(model.parameters())), len(list(ddp_model.parameters()))
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertEqual(i, j, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
def _gpu_model_with_builtin_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a built-in DDP communication hook if defined
if hook is not None:
gpu_model._register_builtin_comm_hook(hook)
return gpu_model
def _run_and_verify_hook(self, model, input, expected_grad):
# Run forward
output = model(input, self.rank)
# Run backward
output.mean().backward()
[self.assertEqual(p.grad, expected_grad) for p in model.parameters()]
def _simple_hook(
self, state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
fut = torch.futures.Future()
fut.set_result(torch.ones_like(bucket.buffer()))
def fut_then(fut):
# Add ones to fut's result.
t = fut.value()
return t + torch.ones_like(t)
return fut.then(fut_then)
def _test_not_nan(self, model, x):
y = model(x)
self.assertFalse(y.isnan().any().item())
y.sum().backward()
for p in model.parameters():
self.assertFalse(p.grad.isnan().any().item())
@skip_if_lt_x_gpu(2)
def test_sync_batch_norm_only_empty_input(self):
pg = self._get_process_group()
model = torch.nn.Sequential(
nn.BatchNorm2d(2),
).to(device=self.rank)
model = DistributedDataParallel(
model,
device_ids=[self.rank],
process_group=pg,
)
model = nn.SyncBatchNorm.convert_sync_batchnorm(
model,
process_group=pg,
)
model.train()
# only rank 0 receives empty inputs
x = torch.zeros(
(1 if self.rank != 0 else 0, 2, 11, 13),
dtype=torch.float32,
device=self.rank
)
# input requires grad, this will trigger the collective communication
# in the backward pass
x.requires_grad = True
self._test_not_nan(model, x)
# input does not requires grad
x.requires_grad = False
self._test_not_nan(model, x)
# all ranks receive empty inputs
x = torch.zeros(
(0, 2, 11, 13),
dtype=torch.float32,
device=self.rank
)
# input requires grad, this will trigger the collective communication
# in the backward pass
x.requires_grad = True
self._test_not_nan(model, x)
# input does not requires grad
x.requires_grad = False
self._test_not_nan(model, x)
@skip_if_lt_x_gpu(2)
def test_sync_batch_norm_empty_input(self):
pg = self._get_process_group()
model = torch.nn.Sequential(
nn.Conv2d(2, 2, 3),
nn.BatchNorm2d(2),
nn.Linear(28, 2),
).to(device=self.rank)
model = DistributedDataParallel(
model,
device_ids=[self.rank],
process_group=pg,
)
model = nn.SyncBatchNorm.convert_sync_batchnorm(
model,
process_group=pg,
)
model.train()
# only rank 0 receives empty inputs
x = torch.zeros(
(3 if self.rank != 0 else 0, 2, 30, 30),
dtype=torch.float32,
device=self.rank
)
self._test_not_nan(model, x)
# all ranks receive empty inputs
x = torch.zeros(
(0, 2, 30, 30),
dtype=torch.float32,
device=self.rank
)
self._test_not_nan(model, x)
class ComputeBucketAssignmentTest(TestCase):
def test_single_limit_single_dtype(self):
tensors = [
torch.empty([100], dtype=torch.float),
torch.empty([200], dtype=torch.float),
torch.empty([100], dtype=torch.float),
torch.empty([50], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0], [1], [2], [3]], result)
def test_single_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0, 2], [1, 3], [4], [5]], result)
def test_multi_limit_single_dtype(self):
tensors = [
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [40, 80]
)
self.assertEqual(per_bucket_size_limits, [40, 80, 80])
self.assertEqual([[0], [1, 2], [3]], result)
def test_multi_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [200, 400]
)
self.assertEqual([[0], [1], [2, 4], [3, 5]], result)
self.assertEqual(per_bucket_size_limits, [200, 200, 400, 400])
class AbstractCommTest(object):
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
def _verify_sequence_number_across_pg(self, pg, verify_pg):
seq_num = pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
# We use a separate pg to verify the sequence numbers, otherwise these
# collectives will themselves increment the sequence number.
dist.all_gather_object(obj_list, seq_num, group=verify_pg)
self.assertEqual(len(set(obj_list)), 1)
return obj_list[0]
def _test_sequence_num_incremented(self, process_group, ranks):
# verify initial sequence numbers. Use a distinct process group for
# verification to keep counts as expected with respect to process_group.
verify_pg = dist.new_group(
ranks=ranks,
backend="gloo",
)
assert dist.get_world_size(process_group) == dist.get_world_size(verify_pg)
initial_num = (
self._verify_sequence_number_across_pg(
pg=process_group, verify_pg=verify_pg
)
if not c10d._rank_not_in_group(process_group)
else -1
)
# Verify sequence numbers are appropriately incremented
for i in range(10):
t = torch.ones(1, device=torch.cuda.current_device())
dist.all_reduce(t, group=process_group)
if not c10d._rank_not_in_group(process_group):
seq_num = self._verify_sequence_number_across_pg(
pg=process_group,
verify_pg=verify_pg,
)
self.assertEqual(initial_num + i + 1, seq_num)
if dist.get_world_size(process_group) > 2:
# Test when certain ranks don't call collectives
if dist.get_rank(process_group) not in [0, 2]:
dist.all_reduce(t, group=process_group, async_op=True)
# Now ranks 0 and 2 should be lagging by 1.
if not c10d._rank_not_in_group(process_group):
seq_num = process_group._get_sequence_number_for_group()
rank = dist.get_rank(process_group)
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
dist.all_gather_object(obj_list, (rank, seq_num), group=verify_pg)
rank_to_seq_num = {rank: num for (rank, num) in obj_list}
self.assertEqual(len(set(rank_to_seq_num.values())), 2)
self.assertEqual(rank_to_seq_num[0], rank_to_seq_num[2])
expected_same = {
rank_to_seq_num[i]
for i in rank_to_seq_num.keys()
if i not in [0, 2]
}
self.assertEqual(len(expected_same), 1)
self.assertEqual(rank_to_seq_num[0] + 1, rank_to_seq_num[1])
def _test_sequence_num_incremented_default_group(self, backend_name):
torch.cuda.set_device(self.rank)
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
self._test_sequence_num_incremented(
c10d._get_default_group(),
ranks=list(i for i in range(dist.get_world_size())),
)
def _test_sequence_num_incremented_subgroup(self, backend_name):
torch.cuda.set_device(self.rank)
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup_ranks = [0, 1, 2]
subgroup = dist.new_group(subgroup_ranks)
self._test_sequence_num_incremented(subgroup, subgroup_ranks)
def _test_sequence_num_set_default_pg(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
default_pg = c10d._get_default_group()
seq_num = default_pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(obj_list, seq_num)
self.assertEqual(len(set(obj_list)), 1)
def _test_sequence_num_set_new_group(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup = dist.new_group([0, 1])
if not c10d._rank_not_in_group(subgroup):
subgroup_seq = subgroup._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(subgroup))]
dist.all_gather_object(obj_list, subgroup_seq, group=subgroup)
self.assertEqual(len(set(obj_list)), 1)
def _test_warn_not_in_group(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
in_group_ranks = list(filter(lambda x: x % 2 == 0, range(self.world_size)))
group = dist.new_group(in_group_ranks)
x = torch.zeros(2, 2).cuda(self.rank)
xs = [torch.zeros(2, 2).cuda(self.rank) for _ in range(len(in_group_ranks))]
if self.rank not in in_group_ranks:
msg = ".*{}.*does not belong to.*"
with self.assertWarnsOnceRegex(UserWarning, msg.format("all_gather")):
dist.all_gather(xs, x, group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("all_reduce")):
dist.all_reduce(x, group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("barrier")):
dist.barrier(group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("broadcast")):
dist.broadcast(x, src=0, group=group)
else:
dist.all_gather(xs, x, group=group)
dist.all_reduce(x, group=group)
dist.barrier(group=group)
dist.broadcast(x, src=0, group=group)
class CommTest(AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_debug_level(self):
try:
del os.environ["TORCH_DISTRIBUTED_DEBUG"]
except KeyError:
pass
dist.set_debug_level_from_env()
# Default should be off
default_debug_mode = dist.get_debug_level()
self.assertEqual(default_debug_mode, dist.DebugLevel.OFF)
mapping = {
"OFF": dist.DebugLevel.OFF,
"off": dist.DebugLevel.OFF,
"oFf": dist.DebugLevel.OFF,
"INFO": dist.DebugLevel.INFO,
"info": dist.DebugLevel.INFO,
"INfO": dist.DebugLevel.INFO,
"DETAIL": dist.DebugLevel.DETAIL,
"detail": dist.DebugLevel.DETAIL,
"DeTaIl": dist.DebugLevel.DETAIL,
}
invalid_debug_modes = ["foo", 0, 1, -1]
for mode in mapping.keys():
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
dist.set_debug_level_from_env()
set_debug_mode = dist.get_debug_level()
self.assertEqual(
set_debug_mode,
mapping[mode],
f"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}",
)
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
with self.assertRaisesRegex(RuntimeError, "The value of TORCH_DISTRIBUTED_DEBUG must"):
dist.set_debug_level_from_env()
class DummyWork(dist._Work):
def wait(self, timeout=5.0):
if torch.cuda.is_available():
torch.cuda.current_stream().synchronize()
return True
class DummyProcessGroup(dist.ProcessGroup):
def getBackendName(self):
return "Dummy"
def allgather(self, output_tensor_lists, input_tensor_list, opts=None):
for output_tensor_list, input_tensor in zip(output_tensor_lists, input_tensor_list):
for output_tensor in output_tensor_list:
output_tensor.copy_(input_tensor)
return DummyWork()
def allreduce(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
def barrier(self, opts=None):
store = c10d._get_default_store()
key = "TEST:DummyProcessGroup:barrier"
if self.rank() == 0:
worker_count = 0
# By default, TCPServer lives on rank 0. So rank 0 needs to make
# sure that it does not exit too early before other ranks finish
# using the store.
# Note that, _store_based_barrier does not solve this problem, as
# all ranks need to run at least one store.add(key, 0) before
# exiting, but there is no guarantee that rank 0 is still alive at
# that point.
while worker_count < self.size() - 1:
worker_count = store.add(key, 0)
else:
store.add(key, 1)
return DummyWork()
def broadcast(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def reduce_scatter(self, output_tensor_list, input_tensor_lists, opts=None):
for output_tensor, input_tensor_list in zip(output_tensor_list, input_tensor_lists):
output_tensor.copy_(input_tensor_list[self.rank()])
return DummyWork()
def send(self, tensor_list, dst, tag=0):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def recv(self, tensor_list, src, tag=0):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
class PythonProcessGroupExtensionTest(MultiProcessTestCase):
def setUp(self):
super(PythonProcessGroupExtensionTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(PythonProcessGroupExtensionTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_get_backend_name(self):
dpg = DummyProcessGroup(0, 1)
self.assertEqual("Dummy", dpg.name())
def test_backend_class_attr(self):
dist.Backend.register_backend(
"dummy",
PythonProcessGroupExtensionTest.create_dummy
)
self.assertEqual(dist.Backend.DUMMY, "DUMMY")
self.assertEqual(
dist.Backend._plugins["DUMMY"],
PythonProcessGroupExtensionTest.create_dummy
)
@staticmethod
def create_dummy(store, rank, size, timeout):
return DummyProcessGroup(rank, size)
def test_collectives(self):
dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test all_gather
input_tensor = torch.ones(2, 2) * 7
output_tensor_list = [torch.zeros(2, 2) for _ in range(self.world_size)]
dist.all_gather(output_tensor_list, input_tensor)
for tensor in output_tensor_list:
self.assertEqual(tensor, input_tensor)
# test all_reduce
input_tensor = torch.ones(2, 2) * 7
dist.all_reduce(input_tensor)
self.assertEqual(input_tensor, torch.ones(2, 2) * 7 + 2)
# test broadcast
input_tensor = torch.zeros(2, 2)
dist.broadcast(input_tensor, 0, async_op=True).wait()
self.assertEqual(torch.ones(2, 2), input_tensor)
# test reduce_scatter
output_tensor = torch.zeros(2, 2)
input_tensor_list = [torch.ones(2, 2) for _ in range(self.world_size)]
dist.reduce_scatter(output_tensor, input_tensor_list)
self.assertEqual(output_tensor, torch.zeros(2, 2) + 1)
dist.barrier()
dist.destroy_process_group()
def test_send_recv(self):
dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test send
input_tensor = torch.zeros(2, 2)
dist.send(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 1)
# test recv
input_tensor = torch.zeros(2, 2)
dist.recv(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 2)
dist.barrier()
# intentionally not calling into `destroy_process_group` as not all
# user applications would explicitly that.
instantiate_parametrized_tests(CommonDistributedDataParallelTest)
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
pytorch-master
|
test/distributed/test_c10d_common.py
|
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import pytest
import torch
import torch.distributed as dist
@pytest.fixture(autouse=True)
def manual_seed_zero():
torch.manual_seed(0)
@pytest.fixture(scope="session")
def cuda_sleep():
# Warm-up CUDA.
torch.empty(1, device="cuda")
# From test/test_cuda.py in PyTorch.
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
def cuda_sleep(seconds):
torch.cuda._sleep(int(seconds * cycles_per_ms * 1000))
return cuda_sleep
def pytest_report_header():
return f"torch: {torch.__version__}"
@pytest.fixture
def setup_rpc(scope="session"):
file = tempfile.NamedTemporaryFile()
dist.rpc.init_rpc(
name="worker0",
rank=0,
world_size=1,
rpc_backend_options=dist.rpc.TensorPipeRpcBackendOptions(
init_method="file://{}".format(file.name),
)
)
yield
dist.rpc.shutdown()
def pytest_ignore_collect(path, config):
"Skip this directory if distributed modules are not enabled."
return not dist.is_available()
|
pytorch-master
|
test/distributed/pipeline/sync/conftest.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe
def test_simple_linears(setup_rpc):
def sum_grad(parameters):
return sum([p.grad.sum() for p in parameters if p.grad is not None])
def zero_grad(parameters):
for p in parameters:
p.grad = None
inputs = torch.rand(8, 1)
model = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 4), nn.Linear(4, 2), nn.Linear(2, 1),)
# Without Pipe
outputs = model(inputs)
loss = outputs.mean()
loss.backward()
grad_without_pipe = sum_grad(model.parameters())
zero_grad(model.parameters())
# With Pipe
model = Pipe(model, chunks=4)
outputs = model(inputs).local_value()
loss = outputs.mean()
loss.backward()
grad_with_pipe = sum_grad(model.parameters())
# Both grads should be identical.
assert torch.allclose(grad_with_pipe, grad_without_pipe)
|
pytorch-master
|
test/distributed/pipeline/sync/test_transparency.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe
def test_inplace_on_requires_grad(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1), nn.ReLU(inplace=True))
model = Pipe(model, checkpoint="always")
x = torch.rand(1)
y = model(x).local_value()
message = r"a leaf Variable that requires grad .* used in an in-place operation."
with pytest.raises(RuntimeError, match=message):
y.backward()
@pytest.mark.xfail(strict=True)
def test_inplace_on_not_requires_grad(setup_rpc):
# In-place operation on a tensor not requiring grad doesn't cause a
# RuntimeError. Currently, we cannot detect this case.
model = nn.Sequential(nn.ReLU(inplace=True))
model = Pipe(model, [1], devices=["cpu"], checkpoint="always")
x = torch.rand(1)
y = model(x).local_value()
del model
message = r"a leaf Variable that requires grad .* used in an in-place operation."
with pytest.raises(RuntimeError, match=message):
y.backward()
@pytest.mark.xfail(strict=True)
def test_inplace_incorrect_grad(setup_rpc):
class M(nn.Module):
def forward(self, foo_bar):
# 'foo' requires grad but 'bar' does not. In-place operation on
# 'bar' won't cause a RuntimeError.
foo, bar = foo_bar
# add_(1) is not idempotent, in contrast to relu_(). If it is
# executed multiple times, it will accumulates each difference onto
# 'bar'.
bar.add_(1)
# 'bar' is still captured by checkpointing. 'foo' will get
# incorrect grad.
return foo * bar
model = nn.Sequential(M())
model = Pipe(model, [1], devices=["cpu"], checkpoint="always")
foo = torch.tensor([1.0], requires_grad=True)
bar = torch.tensor([1.0])
output = model((foo, bar)).local_value()
del model
output.backward()
# The gradient of 'foo' should be 2, but it is 3 actually because
# bar.add_(1) was executed twice due to checkpointing.
assert foo.grad.item() == 2.0
|
pytorch-master
|
test/distributed/pipeline/sync/test_inplace.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.