python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for manip ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.platform import googletest
class ManipOpsTest(xla_test.XLATestCase):
"""Test cases for manip ops."""
def _testRoll(self, a, shift, axis):
with self.session() as session:
with self.test_scope():
p = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
output = manip_ops.roll(a, shift, axis)
result = session.run(output, {p: a})
self.assertAllEqual(result, np.roll(a, shift, axis))
def testNumericTypes(self):
for t in self.numeric_types:
self._testRoll(np.random.randint(-100, 100, (5)).astype(t), 3, 0)
self._testRoll(
np.random.randint(-100, 100, (4, 4, 3)).astype(t), [1, -6, 6],
[0, 1, 2])
self._testRoll(
np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t), [0, 1, -2],
[1, 2, 3])
def testFloatTypes(self):
for t in self.float_types:
self._testRoll(np.random.rand(5).astype(t), 2, 0)
self._testRoll(np.random.rand(3, 4).astype(t), [1, 2], [1, 0])
self._testRoll(np.random.rand(1, 3, 4).astype(t), [1, 0, -3], [0, 1, 2])
def testComplexTypes(self):
for t in self.complex_types:
x = np.random.rand(4, 4).astype(t)
self._testRoll(x + 1j * x, 2, 0)
x = np.random.rand(2, 5).astype(t)
self._testRoll(x + 1j * x, [1, 2], [1, 0])
x = np.random.rand(3, 2, 1, 1).astype(t)
self._testRoll(x + 1j * x, [2, 1, 1, 0], [0, 3, 1, 2])
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/manip_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for scan ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def numpy_reverse(x, axis):
length = len(x.shape)
if axis < 0:
axis = length + axis
ix = [
slice(None, None, -1) if i == axis else slice(None) for i in range(length)
]
return x[ix]
def handle_options(func, x, axis, exclusive, reverse):
"""Adds tf options to numpy scan ops."""
length = len(x.shape)
if axis < 0:
axis = length + axis
if reverse:
x = numpy_reverse(x, axis)
if exclusive:
ix_head = [slice(0, 1) if i == axis else slice(None) for i in range(length)]
ix_init = [
slice(0, -1) if i == axis else slice(None) for i in range(length)
]
if func == np.cumsum:
init = np.zeros_like(x[ix_head])
elif func == np.cumprod:
init = np.ones_like(x[ix_head])
else:
raise ValueError("Unknown scan function.")
x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)
else:
x = func(x, axis=axis)
if reverse:
x = numpy_reverse(x, axis)
return x
class CumsumTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
tf_out = math_ops.cumsum(p, axis, exclusive, reverse).eval(
feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumsum(p, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumsum(input_tensor, [0]).eval()
class CumprodTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
prod = math_ops.cumprod(p, axis, exclusive, reverse)
tf_out = prod.eval(feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumprod(x, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumprod(input_tensor, [0]).eval()
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/scan_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for fused batch norm operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
DATA_FORMATS = (
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
)
class FusedBatchNormTest(xla_test.XLATestCase, parameterized.TestCase):
def _reference_training(self, x, scale, offset, epsilon, data_format):
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
x_square = x * x
x_square_sum = np.sum(x_square, (0, 1, 2))
x_sum = np.sum(x, axis=(0, 1, 2))
element_count = np.size(x) / int(np.shape(x)[-1])
mean = x_sum / element_count
var = x_square_sum / element_count - mean * mean
factor = element_count / max(element_count - 1, 1)
corrected_var = var * factor
normalized = (x - mean) / np.sqrt(var + epsilon)
return (normalized * scale + offset), mean, var, corrected_var
def _reference_grad(self, x, grad_y, scale, mean, var, epsilon, data_format):
# Use the following formulas to calculate gradients:
# grad_scale =
# sum(grad_y * (x - mean)) * rsqrt(var + epsilon)
#
# grad_offset = sum(output_y)
#
# grad_x =
# 1/N * scale * rsqrt(var + epsilon) * (N * grad_y - sum(grad_y) -
# (x - mean) * sum(grad_y * (x - mean)) / (var + epsilon))
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
grad_x = scale * (grad_y - np.mean(grad_y, axis=(0, 1, 2)) -
(x - mean) * np.mean(grad_y *
(x - mean), axis=(0, 1, 2)) /
(var + epsilon)) / np.sqrt(var + epsilon)
grad_scale = np.sum(
grad_y * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2))
grad_offset = np.sum(grad_y, axis=(0, 1, 2))
return grad_x, grad_scale, grad_offset
@parameterized.named_parameters(*DATA_FORMATS)
def testInference(self, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
y_ref, mean_ref, var_ref, _ = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format_src)
with self.session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, variance = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=mean_ref,
variance=var_ref,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val, _, _ = sess.run([y, mean, variance], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
def _testLearning(self, use_gradient_checker, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
# When in training mode, fused_batchnorm applies an implicit Bessel's
# correction. So we have to use the corrected variance here, as well.
y_ref, mean_ref, _, var_ref_corr = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format_src)
with self.session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, var = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=None,
variance=None,
epsilon=epsilon,
data_format=data_format,
is_training=True)
# Check gradient.
if use_gradient_checker:
err = gradient_checker.compute_gradient_error(
t_val,
x_val_converted.shape,
y,
x_val_converted.shape,
extra_feed_dict={
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertLess(err, 1e-3)
y_val, mean_val, var_val = sess.run([y, mean, var], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(mean_val, mean_ref, atol=1e-3)
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
self.assertAllClose(var_val, var_ref_corr, atol=1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testLearning(self, data_format):
self._testLearning(False, data_format)
@parameterized.named_parameters(*DATA_FORMATS)
def testLearningWithGradientChecker(self, data_format):
self._testLearning(True, data_format)
@parameterized.named_parameters(*DATA_FORMATS)
def testGradientTraining(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
# The TensorFlow FusedBatchNormGrad training operation takes two inputs with
# implementation defined values. In theory the only correct value these
# inputs are the corresponding reserve_space_{1|2} outputs from the
# FusedBatchNorm training operation. However, in practice, we rely on the
# first one being mean on {C|G}PU, and the second one being variance on CPU
# and inverse(sqrt(variance + epsilon)) on GPU (we test this assumption
# separately).
reserve_space_1_val = mean_val
if self.device == "XLA_GPU":
reserve_space_2_val = np.reciprocal(np.sqrt(var_val + epsilon))
else:
reserve_space_2_val = var_val
data_format_src = "NHWC"
grad_x_ref, grad_scale_ref, grad_offset_ref = self._reference_grad(
x_val, grad_val, scale_val, mean_val, var_val, epsilon, data_format_src)
with self.session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad_x_ref_converted = test_utils.ConvertBetweenDataFormats(
grad_x_ref, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
reserve_space_1 = array_ops.placeholder(
np.float32, shape=scale_shape, name="reserve_space_1")
reserve_space_2 = array_ops.placeholder(
np.float32, shape=scale_shape, name="reserve_space_2")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
grad_x, grad_scale, grad_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad,
x,
scale,
reserve_space_1,
reserve_space_2,
data_format=data_format,
is_training=True)
grad_x_val, grad_scale_val, grad_offset_val = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
reserve_space_1: reserve_space_1_val,
reserve_space_2: reserve_space_2_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref_converted, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testGradientInference(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
data_format_src = "NHWC"
with self.session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
mean = array_ops.placeholder(np.float32, shape=scale_shape, name="mean")
var = array_ops.placeholder(np.float32, shape=scale_shape, name="var")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
with self.test_scope():
out = gen_nn_ops.fused_batch_norm_grad(
grad,
x,
scale,
mean,
var,
data_format=data_format,
is_training=False)
grad_x, grad_scale, grad_offset, _, _ = out
ref_x, ref_scale, ref_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad, x, scale, mean, var, data_format=data_format, is_training=False)
grad_x_val, grad_scale_val, grad_offset_val, = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
grad_x_ref, grad_scale_ref, grad_offset_ref, = sess.run(
[ref_x, ref_scale, ref_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/fused_batchnorm_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for quantized operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class QuantizedOpsTest(xla_test.XLATestCase):
# Verify that quantized types can be clustered by XLA.
def testQuantizedTypeRoundtrip(self):
with self.session() as session:
for dtype in self.quantized_tf_types:
in_values = np.array([1, 2, 3, 4, 5, 6])
expected = [[1, 2], [3, 4], [5, 6]]
with self.test_scope():
p = array_ops.placeholder(dtype=dtypes.int32)
x = math_ops.cast(p, dtype)
x = array_ops.reshape(x, [3, 2])
value = session.run(x, {p: in_values})
self.assertAllEqual(value, expected)
class DeuantizedOpsTest(xla_test.XLATestCase):
def pack_uint8_r2_to_uint32(self, test_input):
num_rows, num_columns = test_input.get_shape().as_list()
num_output_columns = int(math.ceil(num_columns / 4.0))
padding_input = array_ops.pad(
math_ops.cast(test_input, dtype=dtypes.uint8),
constant_op.constant([[
0,
0,
], [0, num_output_columns * 4 - num_columns]]))
output = array_ops.zeros([num_rows, num_output_columns],
dtype=dtypes.uint32)
num_elements_per_pack = 4
shift_bits = 8
iota_r1 = math_ops.range(num_output_columns * num_elements_per_pack)
for p in range(num_elements_per_pack):
selected_index = math_ops.equal(
math_ops.mod(iota_r1, num_elements_per_pack), p)
gather_index = array_ops.boolean_mask(iota_r1, selected_index)
gathered_input = array_ops.gather(padding_input, gather_index, axis=1)
total_shift_bits = shift_bits * (num_elements_per_pack - p - 1)
left_shift_input = bitwise_ops.left_shift(
math_ops.cast(gathered_input, dtype=dtypes.uint32), total_shift_bits)
output = bitwise_ops.bitwise_or(output, left_shift_input)
return output
def testDequantizeQuint8(self):
num_rows = 100
num_columns = 3547
random_input = np.random.normal(128.0, 10.0, [num_rows, num_columns])
with self.session() as session:
with ops.device("CPU"):
test_input = ops.convert_to_tensor(random_input, dtype=dtypes.float32)
transposed_input = array_ops.transpose(test_input, [1, 0])
quantized_input = array_ops.quantize(transposed_input, 0.0, 255.0,
dtypes.quint8)
packed_input = self.pack_uint8_r2_to_uint32(quantized_input.output)
with self.test_scope():
transposed_quantized_output = xla.dequantize(packed_input, 0.0, 255.0,
"MIN_COMBINED", True)
quantized_output = array_ops.slice(transposed_quantized_output, [0, 0],
[num_rows, num_columns])
value = session.run(quantized_output)
self.assertAllClose(value, random_input, 1.0)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/quantized_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for xla.reduce_window."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class ReduceWindowTest(xla_test.XLATestCase):
"""Test cases for xla.reduce_window."""
def _reduce_window(self, operand, init, reducer, **kwargs):
with self.session():
placeholder = array_ops.placeholder(operand.dtype)
with self.test_scope():
output = xla.reduce_window(placeholder, init, reducer, **kwargs)
return output.eval(feed_dict={placeholder: operand})
def testReduceWindow(self):
# TODO(b/77644762): float16 and float64 ReduceWindow are unimplemented.
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def sum_reducer(x, y):
return x + y
@function.Defun(dtype, dtype)
def mul_reducer(x, y):
return x * y
self.assertAllClose(
np.array([3, 5, 7, 9, 11, 13], dtype=dtype),
self._reduce_window(
np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[2]))
self.assertAllClose(
np.array([3, 7, 11], dtype=dtype),
self._reduce_window(
np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[2],
window_strides=[2]))
self.assertAllClose(
np.array([1, 4, 7], dtype=dtype),
self._reduce_window(
np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[1],
window_strides=[3]))
self.assertAllClose(
np.array([[24, 36, 24], [96, 0, 0]], dtype=dtype),
self._reduce_window(
np.array([[1, 2, 3, 4], [4, 3, 2, 1], [2, 4, 0, 1]], dtype=dtype),
1.0,
mul_reducer,
window_dimensions=[2, 2],
window_strides=[1, 1]))
self.assertAllClose(
np.array([[0, 0, 0], [5, 10, 5], [2, 4, 1], [0, 0, 0]], dtype=dtype),
self._reduce_window(
np.array([[1, 2, 3, 4], [4, 3, 2, 1], [2, 4, 0, 1]], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[2, 2],
window_strides=[2, 2],
padding=[[2, 3], [1, 2]]))
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/reduce_window_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA listdiff operator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ListDiffTest(xla_test.XLATestCase):
def _testListDiff(self, x, y, out, idx):
for dtype in [dtypes.int32, dtypes.int64]:
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.session():
x_tensor = ops.convert_to_tensor(x, dtype=dtype)
y_tensor = ops.convert_to_tensor(y, dtype=dtype)
with self.test_scope():
out_tensor, idx_tensor = array_ops.listdiff(
x_tensor, y_tensor, out_idx=index_dtype)
tf_out, tf_idx = self.evaluate([out_tensor, idx_tensor])
self.assertAllEqual(out, tf_out)
self.assertAllEqual(idx, tf_idx)
self.assertEqual(1, out_tensor.get_shape().ndims)
self.assertEqual(1, idx_tensor.get_shape().ndims)
def testBasic1(self):
self._testListDiff(x=[1, 2, 3, 4], y=[1, 2], out=[3, 4], idx=[2, 3])
def testBasic2(self):
self._testListDiff(x=[1, 2, 3, 4], y=[2], out=[1, 3, 4], idx=[0, 2, 3])
def testBasic3(self):
self._testListDiff(x=[1, 4, 3, 2], y=[4, 2], out=[1, 3], idx=[0, 2])
def testDuplicates(self):
self._testListDiff(x=[1, 2, 4, 3, 2, 3, 3, 1],
y=[4, 2],
out=[1, 3, 3, 3, 1],
idx=[0, 3, 5, 6, 7])
def testRandom(self):
num_random_tests = 10
int_low = -7
int_high = 8
max_size = 50
for _ in xrange(num_random_tests):
x_size = np.random.randint(max_size + 1)
x = np.random.randint(int_low, int_high, size=x_size)
y_size = np.random.randint(max_size + 1)
y = np.random.randint(int_low, int_high, size=y_size)
out_idx = [(entry, pos) for pos, entry in enumerate(x) if entry not in y]
if out_idx:
out, idx = map(list, zip(*out_idx))
else:
out = []
idx = []
self._testListDiff(list(x), list(y), out, idx)
def testFullyOverlapping(self):
self._testListDiff(x=[1, 2, 3, 4], y=[1, 2, 3, 4], out=[], idx=[])
def testNonOverlapping(self):
self._testListDiff(x=[1, 2, 3, 4],
y=[5, 6],
out=[1, 2, 3, 4],
idx=[0, 1, 2, 3])
def testEmptyX(self):
self._testListDiff(x=[], y=[1, 2], out=[], idx=[])
def testEmptyY(self):
self._testListDiff(x=[1, 2, 3, 4], y=[], out=[1, 2, 3, 4], idx=[0, 1, 2, 3])
def testEmptyXY(self):
self._testListDiff(x=[], y=[], out=[], idx=[])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/listdiff_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Proximal Gradient Descent optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import proximal_gradient_descent
class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase):
def testResourceProximalGradientDescentwithoutRegularization(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0])
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run 3 steps Proximal Gradient Descent.
for _ in range(3):
update.run()
self.assertAllClose(np.array([-0.9, -1.8]), self.evaluate(var0))
self.assertAllClose(np.array([-0.09, -0.18]), self.evaluate(var1))
def testProximalGradientDescentwithoutRegularization2(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 3 steps Proximal Gradient Descent
for _ in range(3):
update.run()
self.assertAllClose(np.array([0.1, 0.2]), self.evaluate(var0))
self.assertAllClose(np.array([3.91, 2.82]), self.evaluate(var1))
def testProximalGradientDescentWithL1(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.001, l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps proximal gradient descent.
for _ in range(10):
update.run()
self.assertAllClose(np.array([-1.988, -3.988001]), self.evaluate(var0))
self.assertAllClose(np.array([3.67, 2.37]), self.evaluate(var1))
def testProximalGradientDescentWithL1_L2(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.001, l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps Proximal Gradient Descent
for _ in range(10):
update.run()
self.assertAllClose(np.array([-0.0495, -0.0995]), self.evaluate(var0))
self.assertAllClose(np.array([-0.0045, -0.0095]), self.evaluate(var1))
def applyOptimizer(self, opt, steps=5):
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run ProximalAdagrad for a few steps
for _ in range(steps):
update.run()
return self.evaluate(var0), self.evaluate(var1)
def testEquivGradientDescentwithoutRegularization(self):
with self.session(), self.test_scope():
val0, val1 = self.applyOptimizer(
proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0))
with self.session(), self.test_scope():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0))
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/proximal_gradient_descent_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.compiler.tests.unstack."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class UnstackOpTest(xla_test.XLATestCase, parameterized.TestCase):
def _test(self, size):
with self.session() as sess:
x_tf = array_ops.placeholder(np.float32, shape=[size, 512])
with self.test_scope():
ret = array_ops.unstack(x_tf)
ret_vals = sess.run([ret], feed_dict={x_tf: np.zeros([size, 512])})
self.assertLen(ret_vals[0], size)
for ret_val in ret_vals[0]:
self.assertTrue(np.all(ret_val == 0.))
def testLarge2000(self):
self._test(2000)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/unstack_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops which manipulate lists of tensors via bridge."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.platform import test
class ListOpsTest(parameterized.TestCase, xla_test.XLATestCase):
def testElementShape(self):
with self.session() as sess, self.test_scope():
dim = array_ops.placeholder(dtypes.int32)
l = list_ops.empty_tensor_list(
element_shape=(dim, 15),
element_dtype=dtypes.float32,
max_num_elements=20)
e32 = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int32)
e64 = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int64)
self.assertAllEqual(sess.run(e32, {dim: 10}), (10, 15))
self.assertAllEqual(sess.run(e64, {dim: 7}), (7, 15))
def testPushPop(self):
with self.session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=(7, 15),
element_dtype=dtypes.float32,
max_num_elements=10)
l = list_ops.tensor_list_push_back(
l, constant_op.constant(1.0, shape=(7, 15)))
l = list_ops.tensor_list_push_back(
l, constant_op.constant(2.0, shape=(7, 15)))
l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
_, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e2), 2.0 * np.ones((7, 15)))
self.assertAllEqual(sess.run(e1), 1.0 * np.ones((7, 15)))
def testDoNotConstantFoldVariants(self):
with self.session() as sess, self.test_scope():
val = array_ops.placeholder(dtype=dtypes.float32)
l = list_ops.empty_tensor_list(
element_shape=(7, 15),
element_dtype=dtypes.float32,
max_num_elements=10)
# Note: Pushing a Placeholder will force the constant folding code
# to build a Const node with a DT_VARIANT output. This tests that XLA
# passes a cf_consider_fn which prevent folding such nodes.
l = list_ops.tensor_list_push_back(
l, array_ops.fill(value=val, dims=(7, 15)))
l = list_ops.tensor_list_push_back(
l, constant_op.constant(2.0, shape=(7, 15)))
l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
_, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e2, {val: 1.0}), 2.0 * np.ones((7, 15)))
self.assertAllEqual(sess.run(e1, {val: 1.0}), 1.0 * np.ones((7, 15)))
def testPushPopSeparateLists(self):
with self.session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=[],
element_dtype=dtypes.float32,
max_num_elements=20)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l2 = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
l3 = list_ops.tensor_list_push_back(l, constant_op.constant(3.0))
_, e11 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
l2, e21 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
l2, e22 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
l3, e31 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
l3, e32 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
result = sess.run([e11, [e21, e22], [e31, e32]])
self.assertEqual(result, [1.0, [2.0, 1.0], [3.0, 1.0]])
def testEmptyTensorListNoMax(self):
with self.session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=(7, 15), element_dtype=dtypes.float32)
l = list_ops.tensor_list_push_back(
l, constant_op.constant(1.0, shape=(7, 15)))
_, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Set the max number of elements"):
self.assertAllEqual(sess.run(e), 1.0 * np.ones((7, 15)))
def testEmptyTensorListMax(self):
with self.session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=(10, 15), element_dtype=dtypes.float32,
max_num_elements=2)
l = list_ops.tensor_list_push_back(
l, array_ops.fill(value=3.0, dims=(10, 15)))
_, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e), 3.0 * np.ones((10, 15)))
def testListFromTensor(self):
with self.session(), self.test_scope():
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e, 1.0)
l, e0 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(e0, 2.0)
l, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(e1, 1.0)
self.assertAllEqual(list_ops.tensor_list_length(l), 2)
def testGetSet(self):
with self.session(), self.test_scope():
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e0, 1.0)
l = list_ops.tensor_list_set_item(l, 0, 3.0)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [3.0, 2.0])
def testSetDoesNotUpdatePushIndex(self):
with self.session(), self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=[], element_dtype=dtypes.float32, max_num_elements=2)
# SetItem should not change the push index.
l = list_ops.tensor_list_set_item(l, 1, 3.)
l = list_ops.tensor_list_push_back(l, 5.)
l = list_ops.tensor_list_push_back(l, 7.)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [5., 7.])
def testGetSetReserved(self):
with self.session(), self.test_scope():
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=2)
e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e0, 0.0)
l = list_ops.tensor_list_set_item(l, 0, 3.0)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [3.0, 0.0])
def testSetStackReservedUnknownElementShape(self):
with self.session(), self.test_scope():
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=2)
l = list_ops.tensor_list_set_item(l, 0, [3.0, 4.0])
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [[3.0, 4.0], [0., 0.]])
def testPushInEmptyListWithUnknownElementShape(self):
with self.session(), self.test_scope():
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None, max_num_elements=2)
l = list_ops.tensor_list_push_back(l, [3.0, 4.0])
# Pushing an element with a different shape should raise an error.
with self.assertRaisesRegexp(errors.InternalError, "shape"):
l = list_ops.tensor_list_push_back(l, 5.)
self.evaluate(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32))
def testGetSetReservedNonScalar(self):
with self.session() as sess, self.test_scope():
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32,
element_shape=(7, 15),
num_elements=2)
l = list_ops.tensor_list_set_item(
l, 0, constant_op.constant(1.0, shape=(7, 15)))
e1 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
e2 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e1), np.ones((7, 15)))
self.assertAllEqual(sess.run(e2), np.zeros((7, 15)))
def testStack(self):
with self.session(), self.test_scope():
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=2)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e, 1.0)
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t.shape.as_list(), [None])
self.assertAllEqual(t, [1.0, 2.0])
@parameterized.named_parameters(
("FlatList", [1.0, 2.0, 3.0], [], [0, 2], [1.0, 3.0]),
("NestedList", [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]
], [2], [1], [[3.0, 4.0]]),
("EmptyIndices", [1.0, 2.0, 3.0], [], [], []),
)
def testGather(self, input_list, element_shape, indices, output):
with self.session(), self.test_scope():
tensor_list = list_ops.tensor_list_from_tensor(
input_list, element_shape=element_shape)
gather_t = list_ops.tensor_list_gather(
tensor_list, indices, element_dtype=dtypes.float32)
self.assertAllEqual(gather_t, output)
def testStackWithUninitializedTensors(self):
with self.session(), self.test_scope():
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=3)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [0., 0., 0.])
def testZerosLikeForTensorList(self):
with self.session(), self.test_scope():
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=2)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
z = array_ops.zeros_like(l)
z = list_ops.tensor_list_stack(z, element_dtype=dtypes.float32)
self.assertAllEqual(z.shape.as_list(), [None])
self.assertAllEqual(z, [0.0, 0.0])
if __name__ == "__main__":
os.environ["TF_XLA_FLAGS"] = ("--tf_xla_min_cluster_size=2 " +
os.environ.get("TF_XLA_FLAGS", ""))
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/tensor_list_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class CholeskyOpTest(xla_test.XLATestCase):
# Cholesky defined for float64, float32, complex64, complex128
# (https://www.tensorflow.org/api_docs/python/tf/cholesky)
@property
def float_types(self):
return set(super(CholeskyOpTest, self).float_types).intersection(
(np.float64, np.float32, np.complex64, np.complex128))
def _verifyCholeskyBase(self, sess, placeholder, x, chol, verification, atol):
chol_np, verification_np = sess.run([chol, verification], {placeholder: x})
self.assertAllClose(x, verification_np, atol=atol)
self.assertShapeEqual(x, chol)
# Check that the cholesky is lower triangular, and has positive diagonal
# elements.
if chol_np.shape[-1] > 0:
chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
chol_np.shape[-1]))
for chol_matrix in chol_reshaped:
self.assertAllClose(chol_matrix, np.tril(chol_matrix), atol=atol)
self.assertTrue((np.diag(chol_matrix) > 0.0).all())
def _verifyCholesky(self, x, atol=1e-6):
# Verify that LL^T == x.
with self.session() as sess:
placeholder = array_ops.placeholder(
dtypes.as_dtype(x.dtype), shape=x.shape)
with self.test_scope():
chol = linalg_ops.cholesky(placeholder)
with ops.device('/cpu:0'):
verification = math_ops.matmul(chol, chol, adjoint_b=True)
self._verifyCholeskyBase(sess, placeholder, x, chol, verification, atol)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]])
for dtype in self.float_types:
self._verifyCholesky(data.astype(dtype))
def testBatch(self):
for dtype in self.float_types:
simple_array = np.array(
[[[1., 0.], [0., 5.]]], dtype=dtype) # shape (1, 2, 2)
self._verifyCholesky(simple_array)
self._verifyCholesky(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array(
[[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]], dtype=dtype)
self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array)))
# Generate random positive-definite matrices.
matrices = np.random.rand(10, 5, 5).astype(dtype)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T, matrices[i])
self._verifyCholesky(matrices, atol=1e-4)
def testNonSquareMatrix(self):
for dtype in self.float_types:
with self.assertRaises(ValueError):
linalg_ops.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]], dtype=dtype))
with self.assertRaises(ValueError):
linalg_ops.cholesky(
np.array(
[[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]],
dtype=dtype))
def testWrongDimensions(self):
for dtype in self.float_types:
tensor3 = constant_op.constant([1., 2.], dtype=dtype)
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
def testLarge2000x2000(self):
n = 2000
shape = (n, n)
data = np.ones(shape).astype(np.float32) / (2.0 * n) + np.diag(
np.ones(n).astype(np.float32))
self._verifyCholesky(data, atol=1e-4)
def testMatrixConditionNumbers(self):
for dtype in self.float_types:
condition_number = 1000
size = 20
# Generate random positive-definite symmetric matrices, and take their
# Eigendecomposition.
matrix = np.random.rand(size, size)
matrix = np.dot(matrix.T, matrix)
_, w = np.linalg.eigh(matrix)
# Build new Eigenvalues exponentially distributed between 1 and
# 1/condition_number
v = np.exp(-np.log(condition_number) * np.linspace(0, size, size) / size)
matrix = np.dot(np.dot(w, np.diag(v)), w.T).astype(dtype)
self._verifyCholesky(matrix, atol=1e-4)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/cholesky_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class QrOpTest(xla_test.XLATestCase, parameterized.TestCase):
def AdjustedNorm(self, x):
"""Computes the norm of matrices in 'x', adjusted for dimension and type."""
norm = np.linalg.norm(x, axis=(-2, -1))
return norm / (max(x.shape[-2:]) * np.finfo(x.dtype).eps)
def CompareOrthogonal(self, x, y, rank):
# We only compare the first 'rank' orthogonal vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Q is only unique up to sign (complex phase factor for complex matrices),
# so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertTrue(np.all(self.AdjustedNorm(x - y) < 2400.0))
def CheckApproximation(self, a, q, r):
# Tests that a ~= q*r.
precision = self.AdjustedNorm(a - np.matmul(q, r))
self.assertTrue(np.all(precision < 3200.0))
def CheckUnitary(self, x):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = math_ops.matmul(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
precision = self.AdjustedNorm(xx.eval() - self.evaluate(identity))
self.assertTrue(np.all(precision < 4800.0))
def _test(self, dtype, shape, full_matrices):
np.random.seed(1)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype)
with self.session() as sess:
x_tf = array_ops.placeholder(dtype)
with self.test_scope():
q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices)
q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})
q_dims = q_tf_val.shape
np_q = np.ndarray(q_dims, dtype)
np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
new_first_dim = np_q_reshape.shape[0]
x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
for i in range(new_first_dim):
if full_matrices:
np_q_reshape[i, :, :], _ = np.linalg.qr(
x_reshape[i, :, :], mode="complete")
else:
np_q_reshape[i, :, :], _ = np.linalg.qr(
x_reshape[i, :, :], mode="reduced")
np_q = np.reshape(np_q_reshape, q_dims)
self.CompareOrthogonal(np_q, q_tf_val, min(shape[-2:]))
self.CheckApproximation(x_np, q_tf_val, r_tf_val)
self.CheckUnitary(q_tf_val)
SIZES = [1, 2, 5, 10, 32, 100, 300]
DTYPES = [np.float32]
PARAMS = itertools.product(SIZES, SIZES, DTYPES)
@parameterized.parameters(*PARAMS)
def testQR(self, rows, cols, dtype):
# TODO(b/111317468): Test other types.
for full_matrices in [True, False]:
# Only tests the (3, 2) case for small numbers of rows/columns.
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
self._test(dtype, batch_dims + (rows, cols), full_matrices)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/qr_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Ftrl optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
class FtrlOptimizerTest(xla_test.XLATestCase):
def initVariableAndGradient(self, dtype):
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.02, 0.04], dtype=dtype)
return var0, var1, grads0, grads1
def equivAdagradTest_FtrlPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
opt = ftrl.FtrlOptimizer(
3.0,
learning_rate_power=-0.5, # using Adagrad learning rate
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run Ftrl for a few steps
for _ in range(steps):
ftrl_update.run()
return self.evaluate(var0), self.evaluate(var1)
def equivAdagradTest_AdagradPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
adagrad_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run Adagrad for a few steps
for _ in range(steps):
adagrad_update.run()
return self.evaluate(var0), self.evaluate(var1)
def equivGradientDescentTest_FtrlPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
opt = ftrl.FtrlOptimizer(
3.0,
learning_rate_power=-0.0, # using Fixed learning rate
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run Ftrl for a few steps
for _ in range(steps):
ftrl_update.run()
return self.evaluate(var0), self.evaluate(var1)
def equivGradientDescentTest_GradientDescentPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
opt = gradient_descent.GradientDescentOptimizer(3.0, name="sgd")
sgd_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run GradientDescent for a few steps
for _ in range(steps):
sgd_update.run()
return self.evaluate(var0), self.evaluate(var1)
def testFtrlwithoutRegularization(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run 3 steps FTRL
for _ in range(3):
ftrl_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-2.60260963, -4.29698515]),
self.evaluate(var0),
float_rtol=1e-4,
half_rtol=1e-2)
self.assertAllCloseAccordingToType(
np.array([-0.28432083, -0.56694895]),
self.evaluate(var1),
float_rtol=1e-5,
half_rtol=1e-2)
def testFtrlwithoutRegularization2(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 3 steps FTRL
for _ in range(3):
ftrl_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-2.55607247, -3.98729396]),
self.evaluate(var0),
1e-5,
1e-5,
float_rtol=1e-4)
self.assertAllCloseAccordingToType(
np.array([-0.28232238, -0.56096673]), self.evaluate(var1), 1e-5,
1e-5)
def testFtrlWithL1(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps FTRL
for _ in range(10):
ftrl_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-7.66718769, -10.91273689]),
self.evaluate(var0),
rtol=1e-4,
bfloat16_rtol=1e-1,
bfloat16_atol=1e-1)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]),
self.evaluate(var1),
rtol=1e-4)
def testFtrlWithL1_L2(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps FTRL
for _ in range(10):
ftrl_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-0.24059935, -0.46829352]),
self.evaluate(var0),
rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([-0.02406147, -0.04830509]),
self.evaluate(var1),
rtol=1e-5)
def testFtrlWithL1_L2_L2Shrinkage(self):
"""Test the new FTRL op with support for l2 shrinkage.
The addition of this parameter which places a constant pressure on weights
towards the origin causes the gradient descent trajectory to differ. The
weights will tend to have smaller magnitudes with this parameter set.
"""
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
# Run 10 steps FTRL
for _ in range(10):
ftrl_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-0.22578996, -0.44345799]),
self.evaluate(var0),
rtol=1e-4)
self.assertAllCloseAccordingToType(
np.array([-0.14378493, -0.13229476]),
self.evaluate(var1),
rtol=1e-4)
def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self):
"""Verifies that l2 shrinkage in FTRL does not change lr schedule."""
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.1, 0.2], dtype=dtype)
opt0 = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
opt1 = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update0 = opt0.apply_gradients([(grads0, var0)])
update1 = opt1.apply_gradients([(grads1, var1)])
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var1))
# Run 10 steps FTRL
for _ in range(10):
update0.run()
update1.run()
# var0 is experiencing L2 shrinkage so it should be smaller than var1
# in magnitude.
self.assertTrue((var0.eval()**2 < self.evaluate(var1)**2).all())
accum0 = list(opt0._slots["accum"].values())[0].eval()
accum1 = list(opt1._slots["accum"].values())[0].eval()
# L2 shrinkage should not change how we update grad accumulator.
self.assertAllCloseAccordingToType(accum0, accum1)
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is idential
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
def testEquivAdagradwithoutRegularization(self):
steps = 5
for dtype in self.float_types:
with self.session(), self.test_scope():
val0, val1 = self.equivAdagradTest_FtrlPart(steps, dtype)
with self.session(), self.test_scope():
val2, val3 = self.equivAdagradTest_AdagradPart(steps, dtype)
self.assertAllCloseAccordingToType(val0, val2, rtol=1e-4, half_rtol=1e-2)
self.assertAllCloseAccordingToType(val1, val3, rtol=1e-4, half_rtol=1e-2)
def testEquivGradientDescentwithoutRegularization(self):
steps = 5
for dtype in self.float_types:
with self.session(), self.test_scope():
val0, val1 = self.equivGradientDescentTest_FtrlPart(steps, dtype)
with self.session(), self.test_scope():
val2, val3 = self.equivGradientDescentTest_GradientDescentPart(
steps, dtype)
self.assertAllCloseAccordingToType(val0, val2, rtol=1e-5)
self.assertAllCloseAccordingToType(val1, val3, rtol=1e-5)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/ftrl_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.MatrixTriangularSolve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def MakePlaceholder(x):
return array_ops.placeholder(dtypes.as_dtype(x.dtype), shape=x.shape)
class MatrixTriangularSolveOpTest(xla_test.XLATestCase):
# MatrixTriangularSolve defined for float64, float32, complex64, complex128
# (https://www.tensorflow.org/api_docs/python/tf/matrix_triangular_solve)
@property
def float_types(self):
return set(super(MatrixTriangularSolveOpTest,
self).float_types).intersection(
(np.float64, np.float32, np.complex64, np.complex128))
def _VerifyTriangularSolveBase(self, sess, placeholder_a, placeholder_ca,
placeholder_b, a, clean_a, b, verification,
atol):
feed_dict = {placeholder_a: a, placeholder_ca: clean_a, placeholder_b: b}
verification_np = sess.run(verification, feed_dict)
self.assertAllClose(b, verification_np, atol=atol)
def _VerifyTriangularSolve(self, a, b, lower, adjoint, atol):
clean_a = np.tril(a) if lower else np.triu(a)
with self.session() as sess:
placeholder_a = MakePlaceholder(a)
placeholder_ca = MakePlaceholder(clean_a)
placeholder_b = MakePlaceholder(b)
with self.test_scope():
x = linalg_ops.matrix_triangular_solve(
placeholder_a, placeholder_b, lower=lower, adjoint=adjoint)
with ops.device('/cpu:0'):
verification = math_ops.matmul(placeholder_ca, x, adjoint_a=adjoint)
self._VerifyTriangularSolveBase(sess, placeholder_a, placeholder_ca,
placeholder_b, a, clean_a, b,
verification, atol)
def _VerifyTriangularSolveCombo(self, a, b, atol=1e-4):
transp = lambda x: np.swapaxes(x, -1, -2)
for lower, adjoint in itertools.product([True, False], repeat=2):
self._VerifyTriangularSolve(
a if lower else transp(a), b, lower, adjoint, atol)
def testBasic(self):
rng = np.random.RandomState(0)
a = np.tril(rng.randn(5, 5))
b = rng.randn(5, 7)
for dtype in self.float_types:
self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype))
def testBasicNotActuallyTriangular(self):
rng = np.random.RandomState(0)
a = rng.randn(5, 5) # the `a` matrix is not lower-triangular
b = rng.randn(5, 7)
for dtype in self.float_types:
self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype))
def testBasicComplexDtypes(self):
rng = np.random.RandomState(0)
a = np.tril(rng.randn(5, 5) + rng.randn(5, 5) * 1j)
b = rng.randn(5, 7) + rng.randn(5, 7) * 1j
for dtype in self.complex_types:
self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype))
def testBatch(self):
rng = np.random.RandomState(0)
shapes = [((4, 3, 3), (4, 3, 5)), ((1, 2, 2), (1, 2, 1)),
((1, 1, 1), (1, 1, 2)), ((2, 3, 4, 4), (2, 3, 4, 1))]
tuples = itertools.product(self.float_types, shapes)
for dtype, (a_shape, b_shape) in tuples:
n = a_shape[-1]
a = np.tril(rng.rand(*a_shape) - 0.5) / (2.0 * n) + np.eye(n)
b = rng.randn(*b_shape)
self._VerifyTriangularSolveCombo(
a.astype(dtype), b.astype(dtype), atol=1e-3)
def testLarge(self):
n = 1024
rng = np.random.RandomState(0)
a = np.tril(rng.rand(n, n) - 0.5) / (2.0 * n) + np.eye(n)
b = rng.randn(n, n)
self._VerifyTriangularSolve(
a.astype(np.float32), b.astype(np.float32), True, False, 1e-4)
def testNonSquareCoefficientMatrix(self):
rng = np.random.RandomState(0)
for dtype in self.float_types:
a = rng.randn(3, 4).astype(dtype)
b = rng.randn(4, 4).astype(dtype)
with self.assertRaises(ValueError):
linalg_ops.matrix_triangular_solve(a, b)
with self.assertRaises(ValueError):
linalg_ops.matrix_triangular_solve(a, b)
def testWrongDimensions(self):
randn = np.random.RandomState(0).randn
for dtype in self.float_types:
lhs = constant_op.constant(randn(3, 3), dtype=dtype)
rhs = constant_op.constant(randn(4, 3), dtype=dtype)
with self.assertRaises(ValueError):
linalg_ops.matrix_triangular_solve(lhs, rhs)
with self.assertRaises(ValueError):
linalg_ops.matrix_triangular_solve(lhs, rhs)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/matrix_triangular_solve_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Proximal Adagrad optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import proximal_adagrad
class ProximalAdagradOptimizerTest(xla_test.XLATestCase):
def testResourceProximalAdagradwithoutRegularization(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0])
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run 3 steps Proximal Adagrad.
for _ in range(3):
update.run()
self.assertAllClose(
np.array([-2.60260963, -4.29698515]), self.evaluate(var0))
self.assertAllClose(
np.array([-0.28432083, -0.56694895]), self.evaluate(var1))
opt_vars = opt.variables()
self.assertStartsWith(opt_vars[0].name, var0._shared_name)
self.assertStartsWith(opt_vars[1].name, var1._shared_name)
self.assertEqual(2, len(opt_vars))
def testProximalAdagradwithoutRegularization2(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 3 steps Proximal Adagrad.
for _ in range(3):
update.run()
self.assertAllClose(np.array([-1.60261, -2.296985]), self.evaluate(var0))
self.assertAllClose(np.array([3.715679, 2.433051]), self.evaluate(var1))
def testProximalAdagradWithL1(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps Proximal Adagrad
for _ in range(10):
update.run()
self.assertAllClose(np.array([-6.663634, -9.190331]), self.evaluate(var0))
self.assertAllClose(np.array([2.959304, 1.029232]), self.evaluate(var1))
def testProximalAdagradWithL1_L2(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps Proximal Adagrad.
for _ in range(10):
update.run()
self.assertAllClose(np.array([-0.0495, -0.0995]), self.evaluate(var0))
self.assertAllClose(np.array([-0.0045, -0.0095]), self.evaluate(var1))
def applyOptimizer(self, opt, steps=5):
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run ProximalAdagrad for a few steps
for _ in range(steps):
update.run()
return self.evaluate(var0), self.evaluate(var1)
def testEquivAdagradwithoutRegularization(self):
with self.session(), self.test_scope():
val0, val1 = self.applyOptimizer(
proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0))
with self.session(), self.test_scope():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(
3.0, initial_accumulator_value=0.1))
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/proximal_adagrad_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the behavior of the auto-compilation pass."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
class ClusteringTest(xla_test.XLATestCase):
def testAdd(self):
val1 = np.array([4, 3, 2, 1], dtype=np.float32)
val2 = np.array([5, 6, 7, 8], dtype=np.float32)
expected = val1 + val2
with self.session():
with self.test_scope():
input1 = constant_op.constant(val1, name="const1")
input2 = constant_op.constant(val2, name="const2")
output = math_ops.add(input1, input2)
result = self.evaluate(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testAddFromCpuMultiple(self):
val1 = np.array([4, 3, 2, 1]).astype(np.float32)
val2 = np.array([5, 6, 7, 8]).astype(np.float32)
expected = val1 + val2
with self.session():
with ops.device(CPU_DEVICE):
input1 = constant_op.constant(val1, name="const1")
input2 = constant_op.constant(val2, name="const2")
with self.test_scope():
output = math_ops.add(input1, input2)
for _ in xrange(10):
result = self.evaluate(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testDeadlock(self):
# Builds a graph of the form:
# x -> y
# | \
# z -> w
# where x and z are placed on the CPU and y and w are placed on the XLA
# device. If y and w are clustered for compilation, then the graph will
# deadlock since the clustered graph will contain a self-loop.
with self.session() as sess:
with ops.device(CPU_DEVICE):
x = array_ops.placeholder(dtypes.float32, [2])
with self.test_scope():
y = x * 2
with ops.device(CPU_DEVICE):
z = y * y
with self.test_scope():
w = y + z
result = sess.run(w, {x: [1.5, 0.5]})
self.assertAllClose(result, [12., 2.], rtol=1e-3)
def testHostMemory(self):
with self.session() as sess:
x = array_ops.placeholder(dtypes.int32)
with self.test_scope():
y = x + 1
with ops.device(CPU_DEVICE):
# Place a computation on the CPU, so y and w cannot be merged into the
# same JIT compilation.
z = y * 2
with self.test_scope():
# Argument 'y' is a non-constant output of a previous cluster. Make sure
# it is properly copied to host memory so it can be used as a
# compile-time constant input for this cluster.
w = array_ops.reshape(z, y)
result = sess.run(w, {x: [1, 0]})
expected = np.array([[4], [2]], dtype=np.int32)
self.assertAllClose(expected, result, rtol=1e-3)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/clustering_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA op wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.compiler.xla import xla_data_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class XlaOpsNumericalTest(xla_test.XLATestCase, parameterized.TestCase):
def _assertOpOutputMatchesExpected(self, op, args, expected,
equality_fn=None):
with self.session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(*placeholders)
result = session.run(output, feeds)
if not equality_fn:
equality_fn = self.assertAllClose
equality_fn(result, expected, rtol=1e-3)
def testAdd(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
xla.add,
args=(np.array([1, 2, 3], dtype=dtype),
np.array([4, 5, 6], dtype=dtype)),
expected=np.array([5, 7, 9], dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x, y: xla.add(x, y, broadcast_dims=(0,)),
args=(np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([7, 11], dtype=dtype)),
expected=np.array([[8, 9], [14, 15]], dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x, y: xla.add(x, y, broadcast_dims=(1,)),
args=(np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([7, 11], dtype=dtype)),
expected=np.array([[8, 13], [10, 15]], dtype=dtype))
def testBroadcast(self):
for dtype in self.numeric_types:
v = np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2])
self._assertOpOutputMatchesExpected(
lambda x: xla.broadcast(x, (7, 42)),
args=(v,),
expected=np.tile(v, (7, 42, 1, 1)))
def testShiftRightLogical(self):
self._assertOpOutputMatchesExpected(
xla.shift_right_logical,
args=(np.array([-1, 16], dtype=np.int32), np.int32(4)),
expected=np.array([0x0FFFFFFF, 1], dtype=np.int32))
self._assertOpOutputMatchesExpected(
xla.shift_right_logical,
args=(np.array([0xFFFFFFFF, 16], dtype=np.uint32), np.uint32(4)),
expected=np.array([0x0FFFFFFF, 1], dtype=np.uint32))
def testShiftRightArithmetic(self):
self._assertOpOutputMatchesExpected(
xla.shift_right_arithmetic,
args=(np.array([-1, 16], dtype=np.int32), np.int32(4)),
expected=np.array([-1, 1], dtype=np.int32))
self._assertOpOutputMatchesExpected(
xla.shift_right_arithmetic,
args=(np.array([0xFFFFFFFF, 16], dtype=np.uint32), np.uint32(4)),
expected=np.array([0xFFFFFFFF, 1], dtype=np.uint32))
PRECISION_VALUES = (None, xla_data_pb2.PrecisionConfig.DEFAULT,
xla_data_pb2.PrecisionConfig.HIGH,
xla_data_pb2.PrecisionConfig.HIGHEST)
@parameterized.parameters(*PRECISION_VALUES)
def testConv(self, precision):
for dtype in set(self.float_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
def conv_1d_fn(lhs, rhs):
dnums = xla_data_pb2.ConvolutionDimensionNumbers()
num_spatial_dims = 1
dnums.input_batch_dimension = 0
dnums.input_feature_dimension = 1
dnums.output_batch_dimension = 0
dnums.output_feature_dimension = 1
dnums.kernel_output_feature_dimension = 0
dnums.kernel_input_feature_dimension = 1
dnums.input_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
dnums.kernel_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
dnums.output_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
precision_config = None
if precision:
precision_config = xla_data_pb2.PrecisionConfig()
precision_config.operand_precision.extend([precision, precision])
return xla.conv(
lhs,
rhs,
window_strides=(1,),
padding=((2, 1),),
lhs_dilation=(1,),
rhs_dilation=(2,),
dimension_numbers=dnums)
self._assertOpOutputMatchesExpected(
conv_1d_fn,
args=(
np.array([[[3, 4, 5, 6]]], dtype=dtype),
np.array([[[-2, -3]]], dtype=dtype),
),
expected=np.array([[[-9, -12, -21, -26, -10]]], dtype=dtype))
@parameterized.parameters(*PRECISION_VALUES)
def testDotGeneral(self, precision):
for dtype in self.float_types:
def dot_fn(lhs, rhs):
dnums = xla_data_pb2.DotDimensionNumbers()
dnums.lhs_contracting_dimensions.append(2)
dnums.rhs_contracting_dimensions.append(1)
dnums.lhs_batch_dimensions.append(0)
dnums.rhs_batch_dimensions.append(0)
precision_config = None
if precision:
precision_config = xla_data_pb2.PrecisionConfig()
precision_config.operand_precision.extend([precision, precision])
return xla.dot_general(
lhs,
rhs,
dimension_numbers=dnums,
precision_config=precision_config)
lhs = np.array(
[
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
], dtype=dtype)
rhs = np.array(
[
[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]],
], dtype=dtype)
self._assertOpOutputMatchesExpected(
dot_fn,
args=(lhs, rhs),
expected=np.array(
[
[[9, 12, 15], [19, 26, 33]],
[[95, 106, 117], [129, 144, 159]],
],
dtype=dtype))
def testNeg(self):
for dtype in self.numeric_types - {np.uint8, np.int8}:
self._assertOpOutputMatchesExpected(
xla.neg,
args=(np.array([1, 2, 3], dtype=dtype),),
expected=np.array([-1, -2, -3], dtype=dtype))
def testPad(self):
for dtype in self.numeric_types:
def pad_fn(x):
return xla.pad(
x,
padding_value=7,
padding_low=[2, 1],
padding_high=[1, 2],
padding_interior=[1, 0])
self._assertOpOutputMatchesExpected(
pad_fn,
args=(np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2]),),
expected=np.array(
[[7, 7, 7, 7, 7], [7, 7, 7, 7, 7], [7, 0, 1, 7, 7],
[7, 7, 7, 7, 7], [7, 2, 3, 7, 7], [7, 7, 7, 7, 7]],
dtype=dtype))
def testReduce(self):
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def sum_reducer(x, y):
return x + y
def sum_reduction(dims):
def fn(x):
return xla.reduce(
x, init_value=0, dimensions_to_reduce=dims, reducer=sum_reducer)
return fn
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[0]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([12, 15, 18, 21], dtype=dtype))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[1]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([6, 22, 38], dtype=dtype))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[0, 1]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=dtype(66))
@function.Defun(dtype, dtype)
def mul_reducer(x, y):
return x * y
def mul_reduction(dims):
def fn(x):
return xla.reduce(
x, init_value=1, dimensions_to_reduce=dims, reducer=mul_reducer)
return fn
self._assertOpOutputMatchesExpected(
mul_reduction(dims=[0]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([0, 45, 120, 231], dtype=dtype))
def testSelectAndScatter(self):
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def add_scatter(x, y):
return x + y
@function.Defun(dtype, dtype)
def ge_select(x, y):
return x >= y
def test_fn(operand, source):
return xla.select_and_scatter(
operand,
window_dimensions=[2, 3, 1, 1],
window_strides=[2, 2, 1, 1],
padding=[[0, 0]] * 4,
source=source,
init_value=0,
select=ge_select,
scatter=add_scatter)
self._assertOpOutputMatchesExpected(
test_fn,
args=(np.array(
[[7, 2, 5, 3, 8], [3, 8, 9, 3, 4], [1, 5, 7, 5, 6],
[0, 6, 2, 10, 2]],
dtype=dtype).reshape((4, 5, 1, 1)),
np.array([[2, 6], [3, 1]], dtype=dtype).reshape((2, 2, 1, 1))),
expected=np.array(
[[0, 0, 0, 0, 0], [0, 0, 8, 0, 0], [0, 0, 3, 0, 0],
[0, 0, 0, 1, 0]],
dtype=dtype).reshape((4, 5, 1, 1)))
def testTranspose(self):
for dtype in self.numeric_types:
v = np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2])
self._assertOpOutputMatchesExpected(
lambda x: xla.transpose(x, [1, 0]), args=(v,), expected=v.T)
def testDynamicSlice(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
xla.dynamic_slice,
args=(np.arange(1000,
dtype=np.int32).astype(dtype).reshape([10, 10, 10]),
np.array([5, 7, 3]), np.array([2, 3, 2])),
expected=np.array(
np.array([[[573, 574], [583, 584], [593, 594]],
[[673, 674], [683, 684], [693, 694]]]),
dtype=dtype))
def testDynamicSliceWithIncorrectStartIndicesShape(self):
with self.session() as session:
with self.test_scope():
output = xla.dynamic_slice(
np.arange(1000, dtype=np.int32).reshape([10, 10, 10]),
np.array([5, 7]), np.array([2, 3, 4]))
with self.assertRaises(errors.InvalidArgumentError) as invalid_arg_error:
session.run(output)
self.assertRegexpMatches(
invalid_arg_error.exception.message,
(r'start_indices must be a vector with length equal to input rank, '
r'but input rank is 3 and start_indices has shape \[2\].*'))
def testDynamicSliceWithIncorrectSizeIndicesShape(self):
with self.session() as session:
with self.test_scope():
output = xla.dynamic_slice(
np.arange(1000, dtype=np.int32).reshape([10, 10, 10]),
np.array([5, 7, 3]), np.array([2, 3]))
with self.assertRaises(errors.InvalidArgumentError) as invalid_arg_error:
session.run(output)
self.assertRegexpMatches(
invalid_arg_error.exception.message,
(r'size_indices must be a vector with length equal to input rank, '
r'but input rank is 3 and size_indices has shape \[2\].*'))
class XlaOpsShapeInferenceTest(xla_test.XLATestCase, parameterized.TestCase):
def testDotDifferentNumberOfContractingDimensions(self):
a = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
b = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(3)
with self.assertRaisesRegex(ValueError,
'Must specify the same number of contracting '
'dimensions for lhs and rhs. Got: 1 and 2'):
xla.dot_general(a, b, dim_nums)
def testDotDifferentContractingDimensionsSizes(self):
a = array_ops.placeholder(np.float32, shape=(2, 2, 2, 2))
b = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(3)
with self.assertRaisesRegex(ValueError,
'Contracting dimension sizes do not match. '
'Got: 2 and 4'):
xla.dot_general(a, b, dim_nums)
def testDotDifferentNumberOfBatchDimensions(self):
a = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
b = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_batch_dimensions.append(2)
dim_nums.rhs_batch_dimensions.append(2)
dim_nums.rhs_batch_dimensions.append(3)
with self.assertRaisesRegex(ValueError,
'Must specify the same number of batch '
'dimensions for lhs and rhs. Got: 1 and 2'):
xla.dot_general(a, b, dim_nums)
def testDotDifferentBatchDimensionsSizes(self):
a = array_ops.placeholder(np.float32, shape=(2, 2, 2, 2))
b = array_ops.placeholder(np.float32, shape=(4, 4, 4, 2))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(3)
dim_nums.lhs_batch_dimensions.append(0)
dim_nums.rhs_batch_dimensions.append(0)
with self.assertRaisesRegex(ValueError,
'Batch dimension sizes do not match. '
'Got: 2 and 4'):
xla.dot_general(a, b, dim_nums)
def testDotShapeInference(self):
a = array_ops.placeholder(np.float32, shape=(1, 2, 3, 4))
b = array_ops.placeholder(np.float32, shape=(4, 3, 2, 1))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(1)
dim_nums.rhs_contracting_dimensions.append(2)
dim_nums.lhs_batch_dimensions.append(3)
dim_nums.rhs_batch_dimensions.append(0)
c = xla.dot_general(a, b, dim_nums)
self.assertEqual(c.shape, tensor_shape.TensorShape([1, 3, 3, 1]))
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/xla_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
# Wrapper around AvgPoolGrad that ignores extra arguments needed by
# MaxPoolGrad.
def _AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding):
del outputs # Unused by average-pooling gradients.
return gen_nn_ops.avg_pool3d_grad(
inputs.get_shape().as_list(),
output_gradients,
ksize=ksize,
strides=strides,
padding=padding)
class Pooling3DTest(xla_test.XLATestCase):
def _VerifyValues(self, pool_func, input_sizes, window, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called: co.MaxPool, co.AvgPool.
input_sizes: Input tensor dimensions.
window: Tuple of kernel dims: planes, rows, cols.
strides: Tuple of strides for dims: planes, rows, cols.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.arange(1.0, total_size + 1, dtype=np.float32)
x = x.reshape(input_sizes)
with self.session() as sess, self.test_scope():
inputs = array_ops.placeholder(dtypes.float32)
t = pool_func(
inputs,
ksize=[1] + window + [1],
strides=[1] + strides + [1],
padding=padding)
vals = sess.run(t, {inputs: x})
# Verifies values.
actual = vals.flatten()
self.assertAllClose(expected, actual)
def testAvgPool3dValidPadding(self):
expected_output = [20.5, 21.5, 22.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID",
expected=expected_output)
def testAvgPool3dSamePadding(self):
expected_output = [20.5, 21.5, 22.5, 26.5, 27.5, 28.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 2, 2, 4, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME",
expected=expected_output)
def testAvgPool3dSamePaddingDifferentStrides(self):
expected_output = [1.5, 4.5, 7.5, 17.5, 20.5, 23.5, 33.5, 36.5, 39.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=[1, 2, 3],
strides=[2, 3, 1],
padding="SAME",
expected=expected_output)
def testMaxPool3dValidPadding(self):
expected_output = [40.0, 41.0, 42.0]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID",
expected=expected_output)
def testMaxPool3dSamePadding(self):
expected_output = [31., 32., 33., 34., 35., 36.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 2, 2, 3, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME",
expected=expected_output)
def testMaxPool3dSamePaddingDifferentStrides(self):
expected_output = [2., 5., 8., 18., 21., 24., 34., 37., 40.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=[1, 2, 3],
strides=[2, 3, 1],
padding="SAME",
expected=expected_output)
# Test pooling on a larger input, with different stride and kernel
# size for the 'z' dimension.
# Simulate max pooling in numpy to get the expected output.
input_data = np.arange(1, 5 * 27 * 27 * 64 + 1).reshape((5, 27, 27, 64))
input_data = np.pad(input_data, [[0, 0], [0, 1], [0, 1], [0, 0]],
mode="constant")
expected_output = input_data[:, 1::2, 1::2, :]
expected_output[:, -1, :, :] = input_data[:, -2, 1::2, :]
expected_output[:, :, -1, :] = input_data[:, 1::2, -2, :]
expected_output[:, -1, -1, :] = input_data[:, -2, -2, :]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 27, 27, 64],
window=[1, 2, 2],
strides=[1, 2, 2],
padding="SAME",
expected=expected_output.flatten())
def testKernelSmallerThanStride(self):
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[58, 61, 79, 82, 205, 208, 226, 229])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[29.5, 32.5, 50.5, 53.5, 176.5, 179.5, 197.5, 200.5])
def _VerifyGradient(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling gradient function.
Args:
pool_func: Forward pooling function
pool_grad_func: Pooling gradient function for pool_grad_func
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
pool_grad_grad_func: Second-order gradient function, if available.
"""
ksize = [1] + ksize + [1]
strides = [1] + strides + [1]
total_size = np.prod(input_sizes)
x = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_sizes)
with self.session() as sess:
# Use the forward pool function to compute some corresponding outputs
# (needed for the CPU device, and we need the shape in both cases).
with ops.device("CPU"):
inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
outputs = pool_func(
inputs,
ksize=ksize,
strides=strides,
padding=padding)
output_vals = np.array(sess.run(outputs, {inputs: x}))
output_gradient_vals = np.arange(
1, output_vals.size + 1, dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)
output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)
# Use the Tensorflow CPU pooling gradient to compute the expected input
# gradients.
with ops.device("CPU"):
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_vals.shape)
expected_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding)
expected_input_gradient_vals = sess.run(
expected_input_gradients,
{inputs: x,
output_gradients: output_gradient_vals})
output_grad_gradients = array_ops.placeholder(
dtypes.float32, shape=expected_input_gradient_vals.shape)
if pool_grad_grad_func is not None:
expected_grad_gradients = pool_grad_grad_func(
inputs,
outputs,
output_grad_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NDHWC")
expected_grad_gradients_vals = sess.run(expected_grad_gradients, {
inputs: x,
output_grad_gradients: output_grad_grad_vals
})
# Run the gradient op on the XLA device
with self.test_scope():
outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
actual_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding)
if pool_grad_grad_func is not None:
actual_grad_gradients = pool_grad_grad_func(
inputs,
outputs,
output_grad_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NDHWC")
actual = sess.run(actual_input_gradients, {
inputs: x,
outputs: output_vals,
output_gradients: output_gradient_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_input_gradient_vals.flatten(),
actual.flatten(),
rtol=1e-5,
atol=1e-6)
self.assertShapeEqual(actual, inputs)
if pool_grad_grad_func is not None:
actual_grad_gradients_vals = sess.run(
actual_grad_gradients, {
inputs: x,
outputs: output_vals,
output_grad_gradients: output_grad_grad_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_grad_gradients_vals,
actual_grad_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_grad_gradients_vals, outputs)
def testMaxPoolGradValidPadding1_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[1, 3, 3, 3, 1],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="VALID",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradValidPadding2_1_6_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 3, 3, 6, 3],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="VALID",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradValidPadding2_1_7_3d(self):
# TODO(b/73062247): the bfloat16 implementation of MaxPool3DGradGrad does
# not have enough precision for this test case to pass if
# pool_grad_grad_func is passed.
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 3, 5, 7, 3],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="VALID")
def testMaxPoolGradValidPadding2_2_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 2, 2, 2, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradSamePadding1_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 3, 2, 4, 1],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="SAME",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradSamePadding2_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 3, 2, 4, 1],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="SAME",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradSamePadding2_2_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 5, 2, 4, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradSamePadding3_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[1, 3, 3, 7, 1],
ksize=[3, 3, 3],
strides=[1, 1, 1],
padding="SAME",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testAvgPoolGradValidPadding1_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 3, 3, 3, 3],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="VALID")
def testAvgPoolGradValidPadding2_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 3, 3, 3, 3],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="VALID")
def testAvgPoolGradValidPadding2_2_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 2, 2, 2, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID")
def testAvgPoolGradSamePadding1_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 3, 2, 4, 3],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="SAME")
def testAvgPoolGradSamePadding2_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[1, 2, 2, 2, 1],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="SAME")
def testAvgPoolGradSamePadding2_2_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 5, 2, 4, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME")
def testAvgPoolGradSamePadding3_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[1, 3, 6, 7, 1],
ksize=[3, 3, 3],
strides=[1, 1, 1],
padding="SAME")
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/pooling_ops_3d_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for operators with > 3 or arbitrary numbers of arguments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class NAryOpsTest(xla_test.XLATestCase):
def _testNAry(self, op, args, expected, equality_fn=None):
with self.session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(placeholders)
result = session.run(output, feeds)
if not equality_fn:
equality_fn = self.assertAllClose
equality_fn(result, expected, rtol=1e-3)
def _nAryListCheck(self, results, expected, **kwargs):
self.assertEqual(len(results), len(expected))
for (r, e) in zip(results, expected):
self.assertAllClose(r, e, **kwargs)
def _testNAryLists(self, op, args, expected):
self._testNAry(op, args, expected, equality_fn=self._nAryListCheck)
def testFloat(self):
self._testNAry(math_ops.add_n,
[np.array([[1, 2, 3]], dtype=np.float32)],
expected=np.array([[1, 2, 3]], dtype=np.float32))
self._testNAry(math_ops.add_n,
[np.array([1, 2], dtype=np.float32),
np.array([10, 20], dtype=np.float32)],
expected=np.array([11, 22], dtype=np.float32))
self._testNAry(math_ops.add_n,
[np.array([-4], dtype=np.float32),
np.array([10], dtype=np.float32),
np.array([42], dtype=np.float32)],
expected=np.array([48], dtype=np.float32))
def testComplex(self):
for dtype in self.complex_types:
self._testNAry(
math_ops.add_n, [np.array([[1 + 2j, 2 - 3j, 3 + 4j]], dtype=dtype)],
expected=np.array([[1 + 2j, 2 - 3j, 3 + 4j]], dtype=dtype))
self._testNAry(
math_ops.add_n, [
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.array([10j, 20], dtype=dtype)
],
expected=np.array([1 + 12j, 22 - 3j], dtype=dtype))
self._testNAry(
math_ops.add_n, [
np.array([-4, 5j], dtype=dtype),
np.array([2 + 10j, -2], dtype=dtype),
np.array([42j, 3 + 3j], dtype=dtype)
],
expected=np.array([-2 + 52j, 1 + 8j], dtype=dtype))
@unittest.skip("IdentityN is temporarily CompilationOnly as workaround")
def testIdentityN(self):
self._testNAryLists(array_ops.identity_n,
[np.array([[1, 2, 3]], dtype=np.float32)],
expected=[np.array([[1, 2, 3]], dtype=np.float32)])
self._testNAryLists(array_ops.identity_n,
[np.array([[1, 2], [3, 4]], dtype=np.float32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)],
expected=[
np.array([[1, 2], [3, 4]], dtype=np.float32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)])
self._testNAryLists(array_ops.identity_n,
[np.array([[1], [2], [3], [4]], dtype=np.int32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)],
expected=[
np.array([[1], [2], [3], [4]], dtype=np.int32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)])
def testConcat(self):
self._testNAry(
lambda x: array_ops.concat(x, 0), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
],
expected=np.array(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.float32))
self._testNAry(
lambda x: array_ops.concat(x, 1), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
],
expected=np.array(
[[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], dtype=np.float32))
def testOneHot(self):
with self.session() as session, self.test_scope():
indices = array_ops.constant(np.array([[2, 3], [0, 1]], dtype=np.int32))
op = array_ops.one_hot(indices,
np.int32(4),
on_value=np.float32(7), off_value=np.float32(3))
output = session.run(op)
expected = np.array([[[3, 3, 7, 3], [3, 3, 3, 7]],
[[7, 3, 3, 3], [3, 7, 3, 3]]],
dtype=np.float32)
self.assertAllEqual(output, expected)
op = array_ops.one_hot(indices,
np.int32(4),
on_value=np.int32(2), off_value=np.int32(1),
axis=1)
output = session.run(op)
expected = np.array([[[1, 1], [1, 1], [2, 1], [1, 2]],
[[2, 1], [1, 2], [1, 1], [1, 1]]],
dtype=np.int32)
self.assertAllEqual(output, expected)
def testSplitV(self):
with self.session() as session:
with self.test_scope():
output = session.run(
array_ops.split(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2]],
dtype=np.float32),
[2, 2], 1))
expected = [np.array([[1, 2], [5, 6], [9, 0]], dtype=np.float32),
np.array([[3, 4], [7, 8], [1, 2]], dtype=np.float32)]
self.assertAllEqual(output, expected)
def testStridedSlice(self):
self._testNAry(lambda x: array_ops.strided_slice(*x),
[np.array([[], [], []], dtype=np.float32),
np.array([1, 0], dtype=np.int32),
np.array([3, 0], dtype=np.int32),
np.array([1, 1], dtype=np.int32)],
expected=np.array([[], []], dtype=np.float32))
if np.int64 in self.int_types:
self._testNAry(
lambda x: array_ops.strided_slice(*x), [
np.array([[], [], []], dtype=np.float32), np.array(
[1, 0], dtype=np.int64), np.array([3, 0], dtype=np.int64),
np.array([1, 1], dtype=np.int64)
],
expected=np.array([[], []], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice(*x),
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32),
np.array([1, 1], dtype=np.int32),
np.array([3, 3], dtype=np.int32),
np.array([1, 1], dtype=np.int32)],
expected=np.array([[5, 6], [8, 9]], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice(*x),
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32),
np.array([0, 2], dtype=np.int32),
np.array([2, 0], dtype=np.int32),
np.array([1, -1], dtype=np.int32)],
expected=np.array([[3, 2], [6, 5]], dtype=np.float32))
self._testNAry(lambda x: x[0][0:2, array_ops.newaxis, ::-1],
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32)],
expected=np.array([[[3, 2, 1]], [[6, 5, 4]]],
dtype=np.float32))
self._testNAry(lambda x: x[0][1, :, array_ops.newaxis],
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32)],
expected=np.array([[4], [5], [6]], dtype=np.float32))
def testStridedSliceGrad(self):
# Tests cases where input shape is empty.
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
np.float32(0.5)],
expected=np.array(np.float32(0.5), dtype=np.float32))
# Tests case where input shape is non-empty, but gradients are empty.
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3], dtype=np.int32),
np.array([0], dtype=np.int32),
np.array([0], dtype=np.int32),
np.array([1], dtype=np.int32),
np.array([], dtype=np.float32)],
expected=np.array([0, 0, 0], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 0], dtype=np.int32),
np.array([1, 0], dtype=np.int32),
np.array([3, 0], dtype=np.int32),
np.array([1, 1], dtype=np.int32),
np.array([[], []], dtype=np.float32)],
expected=np.array([[], [], []], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 3], dtype=np.int32),
np.array([1, 1], dtype=np.int32),
np.array([3, 3], dtype=np.int32),
np.array([1, 1], dtype=np.int32),
np.array([[5, 6], [8, 9]], dtype=np.float32)],
expected=np.array([[0, 0, 0], [0, 5, 6], [0, 8, 9]],
dtype=np.float32))
def ssg_test(x):
return array_ops.strided_slice_grad(*x, shrink_axis_mask=0x4,
new_axis_mask=0x1)
self._testNAry(ssg_test,
[np.array([3, 1, 3], dtype=np.int32),
np.array([0, 0, 0, 2], dtype=np.int32),
np.array([0, 3, 1, -4], dtype=np.int32),
np.array([1, 2, 1, -3], dtype=np.int32),
np.array([[[1], [2]]], dtype=np.float32)],
expected=np.array([[[0, 0, 1]], [[0, 0, 0]], [[0, 0, 2]]],
dtype=np.float32))
ssg_test2 = lambda x: array_ops.strided_slice_grad(*x, new_axis_mask=0x15)
self._testNAry(ssg_test2,
[np.array([4, 4], dtype=np.int32),
np.array([0, 0, 0, 1, 0], dtype=np.int32),
np.array([0, 3, 0, 4, 0], dtype=np.int32),
np.array([1, 2, 1, 2, 1], dtype=np.int32),
np.array([[[[[1], [2]]], [[[3], [4]]]]], dtype=np.float32)],
expected=np.array([[0, 1, 0, 2], [0, 0, 0, 0], [0, 3, 0, 4],
[0, 0, 0, 0]], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 3], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
np.array([2, 0], dtype=np.int32),
np.array([1, -1], dtype=np.int32),
np.array([[1, 2], [3, 4]], dtype=np.float32)],
expected=np.array([[0, 2, 1], [0, 4, 3], [0, 0, 0]],
dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 3], dtype=np.int32),
np.array([2, 2], dtype=np.int32),
np.array([0, 1], dtype=np.int32),
np.array([-1, -2], dtype=np.int32),
np.array([[1], [2]], dtype=np.float32)],
expected=np.array([[0, 0, 0], [0, 0, 2], [0, 0, 1]],
dtype=np.float32))
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/nary_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for JIT compilation on the CPU and GPU devices."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import os
import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.contrib.compiler import jit
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
jit_scope = jit.experimental_jit_scope
# Disable rewrites to make sure we don't end up having to update this test
# whenever we implement new ones.
def NoRewriteSessionConfig():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
function_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def CompiledKernel(fn, *inputs, **kwargs):
"""Execute 'fn' as a compiled XLA kernel, with 'inputs'."""
name = kwargs.pop("name", None)
noinline = kwargs.pop("noinline", None)
@function.Defun(func_name=name, noinline=noinline, compiled=True)
def Compiled(*args):
return fn(*args)
return Compiled(*inputs)
def RunMetadataLabels(run_metadata):
"""Returns all labels in run_metadata."""
labels = []
for dev_stats in run_metadata.step_stats.dev_stats:
for node_stats in dev_stats.node_stats:
labels.append(node_stats.timeline_label)
return labels
def InLabels(labels, substr):
"""Returns true iff one of the labels contains substr."""
return any(substr in x for x in labels)
def MetadataHasXlaRunOp(run_metadata):
"""Returns true if there are XlaRun kernels in run_metadata's timeline."""
# TODO(phawkins): find a less hacky way to test whether a kernel ran.
return InLabels(RunMetadataLabels(run_metadata), "_XlaRun")
class JitLaunchTest(test.TestCase):
# Evaluates 'fn' on 'args' both directly and as a compiled XLA kernel.
# Verifies that the outputs match and that XLA was invoked. 'fn' must take
# the same number of tensors as arguments that are in 'args', and must return
# a tuple of output tensors.
#
# If 'require_kernel_launch' is True, then we verify that an XlaCompile/XlaRun
# node actually ran. However, it is sometimes possible for XlaCompile/XlaRun
# ops to be constant-folded away, so the check is optional.
def _compare(self,
fn,
args,
require_kernel_launch=True,
name=None,
noinline=None):
with session_lib.Session(config=NoRewriteSessionConfig()) as sess:
placeholders = []
feeds = {}
for arg in args:
placeholder = array_ops.placeholder(
dtypes.as_dtype(arg.dtype), list(arg.shape))
placeholders.append(placeholder)
feeds[placeholder] = arg
compiled_op = CompiledKernel(
fn, *placeholders, name=name, noinline=noinline)
direct_op = fn(*placeholders)
run_metadata = config_pb2.RunMetadata()
compiled = test_utils.RunWithWarmup(
sess, compiled_op, feeds,
config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata)
print("Compiled Result {}".format(compiled))
if require_kernel_launch:
self.assert_(MetadataHasXlaRunOp(run_metadata))
direct = sess.run(direct_op, feeds)
print("Direct Result {}".format(direct))
if (isinstance(compiled, (tuple, list)) and
(isinstance(direct, (tuple, list)))):
for (x, y) in zip(compiled, direct):
self.assertAllClose(x, y, rtol=1e-1)
else:
self.assertAllClose(compiled, direct, rtol=1e-2)
def testNoOutputs(self):
with session_lib.Session() as sess:
# Check that calling the result as a compiled kernel doesn't crash.
@function.Defun(compiled=True)
def KernelWithNoOutputs():
a = constant_op.constant(100) # pylint: disable=unused-variable
call = KernelWithNoOutputs() # pylint: disable=assignment-from-no-return
test_utils.RunWithWarmup(sess, call, {})
def testAliasing(self):
"""Regression test for compiled functions that return an aliased buffer.
XLA returns aliased buffers if outputs are identical. Tests that
we handle that case.
"""
def AddOnceReturnTwice(x):
y = math_ops.add(x, x)
return y, y
# Exercises compiling a function (say, Foo) which calls another function
# (say, Bar) which is not inlined. When the compiler compiles Foo, it needs
# to symbolically execute Bar correctly regardless of whether Bar is inlined
# or not.
# Tests compiled=True and noinline=True.
self._compare(
AddOnceReturnTwice, [np.array([[[0.5, -1.0]]], dtype=np.float32)],
name="AddOnceReturnTwice_inline",
noinline=True)
# Tests compiled=True and noinline=False.
self._compare(
AddOnceReturnTwice, [np.array([[[0.5, -1.0]]], dtype=np.float32)],
name="AddOnceReturnTwice_noinline",
noinline=False)
def testOneConstOutput(self):
"""Test consisting of a single constant return value."""
def OneConstOutput():
return constant_op.constant([-3, 44, 99])
self._compare(OneConstOutput, [], require_kernel_launch=False)
def testConstZeroElementOutput(self):
"""Test consisting of a constant zero element return value."""
def ConstZeroElementOutput():
return array_ops.fill([7, 0], 3.0)
self._compare(ConstZeroElementOutput, [], require_kernel_launch=False)
def testSomeConstOutputs(self):
"""Test kernels that return a mixture of const and non-const outputs."""
def SomeConstOutputs(x):
return constant_op.constant(
[-2, 7]), array_ops.identity(x), constant_op.constant(3.5)
self._compare(
SomeConstOutputs, [np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32)])
def testInt32Input(self):
"""Test an int32-typed input.
On a GPU, int32 tensors will be placed in host memory.
"""
def AddToSelf(x):
return math_ops.add(x, x)
self._compare(AddToSelf, [np.array([7, 1, 3], dtype=np.int32)])
def testMandatoryConstantInput(self):
"""Tests an operator that has a mandatory-constant shape input."""
def FillWithFloat(x):
return array_ops.fill(x, 9.5)
self._compare(FillWithFloat, [np.array([3, 2], dtype=np.int32)])
def testMnistForwardFunc(self):
"""Compute inference function from MNIST beginners tutorial."""
batch_size = 16
image_size = 28 * 28
num_classes = 10
# Define a TensorFlow function to compute the forward pass.
def MnistForward(w, b, x):
return nn_ops.softmax(math_ops.matmul(x, w) + b)
w = np.random.random_sample((image_size, num_classes)).astype(np.float32)
b = np.random.random_sample((num_classes)).astype(np.float32)
x = np.random.random_sample((batch_size, image_size)).astype(np.float32)
self._compare(MnistForward, [w, b, x])
def testExplicitMarking(self):
"""Test explicit marking of operators to compile."""
batch_size = 16
image_size = 28 * 28
num_classes = 10
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32)
w = array_ops.placeholder(dtypes.float32)
b = array_ops.placeholder(dtypes.float32)
with jit_scope():
y1 = math_ops.matmul(x, w)
y2 = math_ops.add(y1, b)
with jit_scope():
y = math_ops.square(y2)
dw = np.random.random_sample((image_size, num_classes)).astype(np.float32)
db = np.random.random_sample((num_classes)).astype(np.float32)
dx = np.random.random_sample((batch_size, image_size)).astype(np.float32)
with session_lib.Session() as sess:
run_metadata = config_pb2.RunMetadata()
output = test_utils.RunWithWarmup(
sess,
y, {
x: dx,
w: dw,
b: db
},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
# TODO(phawkins): really we would like to test that there were exactly
# two kernel launches. However, we have no reliable way to determine
# that.
self.assert_(MetadataHasXlaRunOp(run_metadata))
expected = np.square(np.dot(dx, dw) + db)
self.assertAllClose(expected, output, rtol=1e-1)
class XlaCompilationTest(test.TestCase):
"""Tests for auto-compilation on CPU/GPU devices."""
def testReshape(self):
"""Tests an operator with compile-time constant and non-constant inputs."""
with self.session(config=NoRewriteSessionConfig()) as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.int32)
with jit_scope():
# Reshape's first argument is non-constant in the JIT, but its second
# (shape) argument will be treated as a compile-time constant for
# each JIT compilation.
# We do not use a tf.const() argument since we want to ensure the
# shape is still a run-time argument to the JIT, and not
# statically known as part of the JIT compilation's input graph.
z = array_ops.reshape(x, y)
run_metadata = config_pb2.RunMetadata()
out = test_utils.RunWithWarmup(
sess,
z, {
x: np.array([1, 2, 3, 4, 5, 6], np.float32),
y: [-1, 3]
},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assert_(MetadataHasXlaRunOp(run_metadata))
self.assertAllClose(np.array([[1, 2, 3], [4, 5, 6]], np.float32), out)
def testIgnoredArguments(self):
"""Tests that JIT computations can ignore formal parameters."""
with self.session(config=NoRewriteSessionConfig()) as sess:
x = array_ops.placeholder(dtypes.int32)
y = array_ops.placeholder(dtypes.int32)
with jit_scope():
z = math_ops.add(x, x)
w = math_ops.add(y, y)
# Pulls 'w' into the same compilation via control dependencies.
with ops.control_dependencies([w]):
n = control_flow_ops.no_op()
with ops.control_dependencies([n]):
t = math_ops.add(z, z)
run_metadata = config_pb2.RunMetadata()
out = test_utils.RunWithWarmup(
sess,
t, {
x: np.int32(7),
y: np.int32(404)
},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assert_(MetadataHasXlaRunOp(run_metadata))
self.assertAllClose(28, out)
def testLoops(self):
"""Tests that compilation accepts computations containing loops."""
with self.session(config=NoRewriteSessionConfig()) as session:
x = array_ops.placeholder(dtypes.float32)
with jit_scope():
c = lambda i, _: math_ops.less(i, 5)
b = lambda i, x: (i + 1, x * 2.0 + 1.0)
_, y = control_flow_ops.while_loop(c, b, (constant_op.constant(0), x))
run_metadata = config_pb2.RunMetadata()
result = session.run(y, {x: np.float32(2)},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assert_(MetadataHasXlaRunOp(run_metadata))
self.assertAllClose(result, np.float32(95), rtol=1e-1)
def testCond(self):
"""Tests that compilation handles switch operators."""
with self.session(config=NoRewriteSessionConfig()) as session:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = array_ops.placeholder(dtypes.bool)
with jit_scope():
z = x + 1.0
w = control_flow_ops.cond(c, lambda: z, lambda: y)
t = math_ops.add(z, w)
# If JIT compilation chooses to cluster z and t, then execution will
# deadlock.
run_metadata = config_pb2.RunMetadata()
result = test_utils.RunWithWarmup(
session,
t, {
x: np.float32(2),
y: np.float32(4),
c: True
},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assert_(MetadataHasXlaRunOp(run_metadata))
self.assertAllClose(result, np.float32(6), rtol=1e-1)
def testNestedFunction(self):
g = ops.Graph()
with g.as_default():
@function.Defun(compiled=True)
def Bar(x, y):
return x + 2 * y
@function.Defun(compiled=True)
def Foo(x):
return Bar(x * x, x * x * x)
@function.Defun()
def Entry(x):
return Foo(x)
inp = array_ops.placeholder(dtypes.float32)
out = Entry(inp)
with self.session(
config=NoRewriteSessionConfig(), graph=g, use_gpu=True) as sess:
run_metadata = config_pb2.RunMetadata()
val = sess.run(out,
feed_dict={inp: [2., 10.]},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertAllClose(val, [20., 2100.])
def testLoopDeadlock(self):
"""Regression test for bug that caused deadlocks in graphs with loops."""
with self.session(config=NoRewriteSessionConfig()) as session:
x = array_ops.placeholder(dtypes.float32)
with jit_scope():
y = x + 1.0
c = lambda i, _x, _y: math_ops.less(i, 5)
b = lambda i, x, _y: (i + 1, x * 2.0 + 1.0, x - 3.0)
_, _, w = control_flow_ops.while_loop(c, b,
(constant_op.constant(0), y, x))
u = w + y
result = session.run(u, {x: np.float32(2)})
self.assertAllClose(result, np.float32(63), rtol=1e-1)
def testGradient(self):
"""Tests that the backprop function is properly compiled."""
def _Run(compiled):
@function.Defun(compiled=compiled)
def Forward(x):
return math_ops.log(x)
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y = Forward(x)
dx, = gradients_impl.gradients(y, [x], 1.0)
cfg = NoRewriteSessionConfig()
cfg.graph_options.optimizer_options.opt_level = (
config_pb2.OptimizerOptions.L1)
cfg.graph_options.optimizer_options.do_function_inlining = True
with session_lib.Session(graph=g, config=cfg) as sess:
run_metadata = config_pb2.RunMetadata()
dx_val = test_utils.RunWithWarmup(
sess,
dx,
feed_dict={x: 100.},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertAllClose(dx_val, 0.01)
return RunMetadataLabels(run_metadata)
# SymGrad[f=log(x)](x, dy) = 1/x * dy
#
# Note: we don't need to compute log(x) for dx due to graph pruning.
# Do not compile the backprop. We should see one Reciprocal and one Mul.
labels = _Run(compiled=False)
self.assertFalse(InLabels(labels, "Log"))
self.assertTrue(InLabels(labels, "Reciprocal"))
self.assertTrue(InLabels(labels, "Mul"))
self.assertFalse(InLabels(labels, "XlaCompile"))
self.assertFalse(InLabels(labels, "XlaRun"))
# Compile the backprop. One XlaCompile/XlaRun pair.
labels = _Run(compiled=True)
self.assertFalse(InLabels(labels, "Log"))
self.assertFalse(InLabels(labels, "Reciprocal"))
self.assertFalse(InLabels(labels, "Mul"))
self.assertTrue(InLabels(labels, "XlaCompile"))
self.assertTrue(InLabels(labels, "XlaRun"))
class ElementWiseFusionTest(test.TestCase):
# Runs a simple test with the input jit_level and fusion_only flag.
def simpleTest(self, arg0, arg1, global_jit_level):
config = config_pb2.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = global_jit_level
with session_lib.Session(config=config) as sess:
a1 = array_ops.placeholder(dtypes.float32, [2, 2], name="a1")
a2 = array_ops.placeholder(dtypes.float32, [2, 2], name="a2")
# Two element-wise ops. We need at least two ops since single
# element clusters are not passed to XLA in fusion_only mode.
a3 = a1 * a2
a4 = a3 + a1
# A matmul to break XLA clustering.
a5 = math_ops.matmul(a4, a1)
# Two more element-wise ops.
a6 = a5 - a4
a7 = a6 + a2
run_metadata = config_pb2.RunMetadata()
output = test_utils.RunWithWarmup(
sess,
a7, {
a1: arg0,
a2: arg1
},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
labels = RunMetadataLabels(run_metadata)
xla_compile_count = sum("XlaCompile(" in x for x in labels)
xla_run_count = sum("XlaRun(" in x for x in labels)
self.assertEqual(xla_compile_count, xla_run_count)
return output, xla_run_count
class LazyCompilationTest(test.TestCase):
@unittest.skip("test too dependant on XLA compilation protocol")
def testLazyCompilation(self):
@function.Defun(compiled=True)
def CompiledFunction(x):
return math_ops.log(x)
with session_lib.Session(config=NoRewriteSessionConfig()) as sess:
x = array_ops.placeholder(dtypes.float32)
y = CompiledFunction(x)
# The very first run of the cluster is always compiled (non-lazily).
run_metadata_for_first_run = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [2., 10., 19., 77., 100.]},
run_metadata=run_metadata_for_first_run,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(
InLabels(
RunMetadataLabels(run_metadata_for_first_run), "_XlaCompile"))
self.assertTrue(
InLabels(RunMetadataLabels(run_metadata_for_first_run), "_XlaRun"))
run_metadata_before_warmup = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [2., 10.]},
run_metadata=run_metadata_before_warmup,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(
InLabels(
RunMetadataLabels(run_metadata_before_warmup), "_XlaCompile"))
self.assertFalse(
InLabels(RunMetadataLabels(run_metadata_before_warmup), "_XlaRun"))
# We compile when we see the same shape a second time.
run_metadata_after_warmup = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [2., 10.]},
run_metadata=run_metadata_after_warmup,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(
InLabels(RunMetadataLabels(run_metadata_after_warmup), "_XlaCompile"))
self.assertTrue(
InLabels(RunMetadataLabels(run_metadata_after_warmup), "_XlaRun"))
run_metadata_for_new_shape = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [2., 10., 12.]},
run_metadata=run_metadata_for_new_shape,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(
InLabels(
RunMetadataLabels(run_metadata_for_new_shape), "_XlaCompile"))
self.assertFalse(
InLabels(RunMetadataLabels(run_metadata_for_new_shape), "_XlaRun"))
def testIsMegamorphic(self):
@function.Defun(compiled=True)
def CompiledFunction(x):
return math_ops.log(x)
with session_lib.Session(config=NoRewriteSessionConfig()) as sess:
x = array_ops.placeholder(dtypes.float32)
y = CompiledFunction(x)
# Make the cluster go megamorphic by running it with lots of shape
# signatures where the cluster is executed with each signature only a few
# times. Then check that we don't compile the cluster ever again.
for shape in range(10, 50):
for _ in range(0, 49):
sess.run(y, feed_dict={x: [0.] * shape})
for _ in range(0, 50):
run_metadata = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [0.] * 60},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(
InLabels(RunMetadataLabels(run_metadata), "_XlaCompile"))
self.assertFalse(InLabels(RunMetadataLabels(run_metadata), "_XlaRun"))
def testIsNotMegamorphic(self):
@function.Defun(compiled=True)
def CompiledFunction(x):
return math_ops.log(x)
with session_lib.Session(config=NoRewriteSessionConfig()) as sess:
x = array_ops.placeholder(dtypes.float32)
y = CompiledFunction(x)
# Run the cluster with lots of shape signatures, but in a way that it
# isn't megamorphic (i.e. each shape signature sees a lot of executions).
# Then check that the cluster has not been marked as megamorphic.
for shape in range(10, 50):
for _ in range(0, 1000):
sess.run(y, feed_dict={x: [0.] * shape})
for _ in range(0, 10):
sess.run(y, feed_dict={x: [0.] * 60})
run_metadata = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [0.] * 60},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(InLabels(RunMetadataLabels(run_metadata), "_XlaCompile"))
self.assertTrue(InLabels(RunMetadataLabels(run_metadata), "_XlaRun"))
if __name__ == "__main__":
os.environ["TF_XLA_FLAGS"] = ("--tf_xla_enable_lazy_compilation=true " +
os.environ.get("TF_XLA_FLAGS", ""))
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/jit_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class InverseOpTest(xla_test.XLATestCase):
def _verifyInverse(self, x, np_type):
for adjoint in False, True:
y = x.astype(np_type)
with self.session() as sess:
# Verify that x^{-1} * x == Identity matrix.
p = array_ops.placeholder(dtypes.as_dtype(y.dtype), y.shape, name="x")
with self.test_scope():
inv = linalg_ops.matrix_inverse(p, adjoint=adjoint)
tf_ans = math_ops.matmul(inv, p, adjoint_b=adjoint)
np_ans = np.identity(y.shape[-1])
if x.ndim > 2:
tiling = list(y.shape)
tiling[-2:] = [1, 1]
np_ans = np.tile(np_ans, tiling)
out = sess.run(tf_ans, feed_dict={p: y})
self.assertAllClose(np_ans, out, rtol=1e-3, atol=1e-3)
self.assertShapeEqual(y, tf_ans)
def _verifyInverseReal(self, x):
for np_type in self.float_types & {np.float64, np.float32}:
self._verifyInverse(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0),
np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
def testNonsymmetric(self):
# 2x2 matrices
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefinite(self):
# 2x2 matrices
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
def testEmpty(self):
self._verifyInverseReal(np.empty([0, 2, 2]))
self._verifyInverseReal(np.empty([2, 0, 0]))
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/matrix_inverse_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for 3D convolutions using the XLA JIT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
# Test cloned from
# tensorflow/python/kernel_tests/conv3d_backprop_filter_v2_grad_test.py
class Conv3DBackpropFilterV2GradTest(xla_test.XLATestCase):
def testGradient(self):
with self.session(), self.test_scope():
for padding in ["SAME", "VALID"]:
for stride in [1, 2]:
np.random.seed(1)
in_shape = [2, 4, 3, 3, 2]
in_val = constant_op.constant(
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 3, 2, 3]
strides = [1, stride, stride, stride, 1]
# Make a convolution op with the current settings, just to easily get
# the shape of the output.
conv_out = nn_ops.conv3d(in_val,
array_ops.zeros(filter_shape), strides,
padding)
out_backprop_shape = conv_out.get_shape().as_list()
out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
dtype=dtypes.float32)
output = nn_ops.conv3d_backprop_filter_v2(in_val, filter_shape,
out_backprop_val, strides,
padding)
err = gradient_checker.compute_gradient_error(
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
output, filter_shape)
print("conv3d_backprop_filter gradient err = %g " % err)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
# Test cloned from tensorflow/python/kernel_tests/conv3d_transpose_test.py
class Conv3DTransposeTest(xla_test.XLATestCase):
def testConv3DTransposeSingleStride(self):
with self.session(), self.test_scope():
strides = [1, 1, 1, 1, 1]
# Input, output: [batch, depth, height, width, channel]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 5, 6, 4, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
# We count the number of cells being added at the locations in the output.
# At the center, #cells = kernel_depth * kernel_height * kernel_width
# At the corners, #cells = ceil(kernel_depth/2) * ceil(kernel_height/2)
# * ceil(kernel_width/2)
# At the edges, #cells =
# kernel_depth * ceil(kernel_height/2) * ceil(kernel_width/2) or
# ceil(kernel_depth/2) * kernel_height * ceil(kernel_width/2) or
# ceil(kernel_depth/2) * ceil(kernel_height/2) * kernel_width
# At the borders, #cells =
# ceil(kernel_depth/2) * kernel_height * kernel_width or
# kernel_depth * ceil(kernel_height/2) * kernel_width or
# kernel_depth * kernel_height * ceil(kernel_width/2)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
for d in xrange(y_shape[1]):
d_in = d > 0 and d < y_shape[1] - 1
h_in = h > 0 and h < y_shape[2] - 1
w_in = w > 0 and w < y_shape[3] - 1
if d_in + h_in + w_in == 3:
target = 27 * 3.0
elif d_in + h_in + w_in == 2:
target = 18 * 3.0
elif d_in or h_in or w_in:
target = 12 * 3.0
else:
target = 8 * 3.0
self.assertAllClose(target, value[n, d, h, w, k])
def testConv3DTransposeSame(self):
with self.session(), self.test_scope():
strides = [1, 2, 2, 2, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 10, 12, 8, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
for d in xrange(y_shape[1]):
# We add a case for locations divisible by the stride.
d_in = d % strides[1] == 0 and 0 < d < y_shape[1] - 1
h_in = h % strides[2] == 0 and 0 < h < y_shape[2] - 1
w_in = w % strides[3] == 0 and 0 < w < y_shape[3] - 1
if d_in + h_in + w_in == 3:
target = 8 * 3.0
elif d_in + h_in + w_in == 2:
target = 4 * 3.0
elif d_in or h_in or w_in:
target = 2 * 3.0
else:
target = 3.0
self.assertAllClose(target, value[n, d, h, w, k])
def testConv3DTransposeValid(self):
with self.session(), self.test_scope():
strides = [1, 2, 2, 2, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 11, 13, 9, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
for d in xrange(y_shape[1]):
# We add a case for locations divisible by the stride.
d_in = d % strides[1] == 0 and pad < d < y_shape[1] - 1 - pad
h_in = h % strides[2] == 0 and pad < h < y_shape[2] - 1 - pad
w_in = w % strides[3] == 0 and pad < w < y_shape[3] - 1 - pad
if d_in + h_in + w_in == 3:
target = 8 * 3.0
elif d_in + h_in + w_in == 2:
target = 4 * 3.0
elif d_in or h_in or w_in:
target = 2 * 3.0
else:
target = 3.0
cache_values[n, d, h, w, k] = target
# copy values in the border
cache_values[n, :, :, 0, k] = cache_values[n, :, :, 1, k]
cache_values[n, :, :, -1, k] = cache_values[n, :, :, -2, k]
cache_values[n, :, 0, :, k] = cache_values[n, :, 1, :, k]
cache_values[n, :, -1, :, k] = cache_values[n, :, -2, :, k]
cache_values[n, 0, :, :, k] = cache_values[n, 1, :, :, k]
cache_values[n, -1, :, :, k] = cache_values[n, -2, :, :, k]
self.assertAllClose(cache_values, value)
def testGradient(self):
x_shape = [2, 3, 4, 3, 2]
f_shape = [3, 3, 3, 2, 2]
y_shape = [2, 6, 8, 6, 2]
strides = [1, 2, 2, 2, 1]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.session(), self.test_scope():
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
print("conv3d_transpose gradient err = %g " % err)
err_tolerance = 0.001
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/conv3d_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Conv2D via the XLA JIT.
The canned results in these tests are created by running each test using the
Tensorflow CPU device and saving the output.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
DATA_FORMATS = (
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
)
class Conv2DTest(xla_test.XLATestCase, parameterized.TestCase):
def _VerifyValues(self,
input_sizes=None,
filter_sizes=None,
strides=None,
dilations=None,
padding=None,
data_format_src="NHWC",
data_format_dst="NHWC",
expected=None):
"""Tests that tf.nn.conv2d produces the expected value.
Args:
input_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
strides: Strides.
dilations: RHS dilations.
padding: Padding type.
data_format_src: Data format input is in.
data_format_dst: Data format verification will run and input is converted
to.
expected: Expected output.
"""
total_size_1 = np.prod(input_sizes)
total_size_2 = np.prod(filter_sizes)
x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)
x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(filter_sizes)
strides = [1] + strides + [1]
if dilations is None:
dilations = [1, 1]
dilations = [1] + dilations + [1]
# Convert between data formats.
expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src,
data_format_dst)
x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src,
data_format_dst)
input_sizes = test_utils.PermuteDimsBetweenDataFormats(
input_sizes, data_format_src, data_format_dst)
strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src,
data_format_dst)
dilations = test_utils.PermuteDimsBetweenDataFormats(
dilations, data_format_src, data_format_dst)
with self.session() as sess:
t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)
t2 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)
with self.test_scope():
out = nn_ops.conv2d(
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format_dst,
dilations=dilations)
value = sess.run(out, {t1: x1, t2: x2})
self.assertAllClose(expected, value, 1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x1Filter(self, data_format):
expected_output = np.reshape([
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
], [1, 2, 3, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Filter(self, data_format):
expected_output = np.reshape(
[2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0], [1, 1, 2, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Filter2x1Dilation(self, data_format):
expected_output = np.array([[[[72], [82], [92]], [[112], [122], [132]]]])
self._VerifyValues(
input_sizes=[1, 4, 4, 1],
filter_sizes=[2, 2, 1, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2Filter(self, data_format):
expected_output = np.reshape([
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
], [1, 2, 2, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2(self, data_format):
expected_output = np.reshape([2271.0, 2367.0, 2463.0], [1, 1, 1, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2Same(self, data_format):
expected_output = np.reshape(
[2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0], [1, 1, 2, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DEmptyDilation(self, data_format):
self._VerifyValues(
input_sizes=[0, 2, 3, 3],
filter_sizes=[1, 1, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.zeros([0, 2, 3, 3]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterDilation(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.reshape([2667, 2781, 2895], [1, 1, 1, 3]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterDilation(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[1, 2, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.array([[[[231, 252, 273], [384, 423, 462]],
[[690, 765, 840], [843, 936, 1029]]]]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DKernelSizeMatchesInputSizeDilation(self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.reshape([108, 128], [1, 1, 1, 2]))
class Conv2DBackpropInputTest(xla_test.XLATestCase, parameterized.TestCase):
def _VerifyValues(self,
input_sizes=None,
filter_sizes=None,
out_backprop_sizes=None,
strides=None,
dilations=None,
padding=None,
data_format_src="NHWC",
data_format_dst="NHWC",
expected=None):
"""Tests that gen_nn_ops.conv2d_backprop_input produces the expected output.
Args:
input_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
out_backprop_sizes: Output gradients tensor dimensions.
strides: Strides.
dilations: Dilations.
padding: Padding type.
data_format_src: Data format input is in.
data_format_dst: Data format verification will run and input is converted
to.
expected: Expected output.
"""
total_size_1 = np.prod(filter_sizes)
total_size_2 = np.prod(out_backprop_sizes)
x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(filter_sizes)
x2 = np.arange(
1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes)
strides = [1] + strides + [1]
if dilations is not None:
dilations = [1] + dilations + [1]
expected = np.reshape(expected, input_sizes)
# Convert between data formats.
expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src,
data_format_dst)
x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src,
data_format_dst)
input_sizes = test_utils.PermuteDimsBetweenDataFormats(
input_sizes, data_format_src, data_format_dst)
out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(
out_backprop_sizes, data_format_src, data_format_dst)
strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src,
data_format_dst)
if dilations is not None:
dilations = test_utils.PermuteDimsBetweenDataFormats(
dilations, data_format_src, data_format_dst)
with self.session() as sess:
t1 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)
t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)
with self.test_scope():
out = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=t1,
out_backprop=t2,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format_dst)
value = sess.run(out, {t1: x1, t2: x2})
self.assertAllEqual(input_sizes, value.shape)
self.assertAllClose(expected, value, 1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x1Filter(self, data_format):
expected_output = [
5, 11, 17, 11, 25, 39, 17, 39, 61, 23, 53, 83, 29, 67, 105, 35, 81, 127,
41, 95, 149, 47, 109, 171, 53, 123, 193, 59, 137, 215, 65, 151, 237, 71,
165, 259, 77, 179, 281, 83, 193, 303, 89, 207, 325, 95, 221, 347.
]
self._VerifyValues(
input_sizes=[1, 4, 4, 3],
filter_sizes=[1, 1, 3, 2],
out_backprop_sizes=[1, 4, 4, 2],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width5(self, data_format):
expected_output = [1, 2, 0, 2, 4]
self._VerifyValues(
input_sizes=[1, 1, 5, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width6(self, data_format):
expected_output = [1, 2, 0, 2, 4, 0]
self._VerifyValues(
input_sizes=[1, 1, 6, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width7(self, data_format):
expected_output = [1, 2, 0, 2, 4, 0, 0]
self._VerifyValues(
input_sizes=[1, 1, 7, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterC1Same(self, data_format):
expected_output = [1, 4, 7, 7, 23, 33]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 2, 3, 1],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Filter(self, data_format):
expected_output = [
14, 32, 50, 100, 163, 226, 167, 212, 257, 122, 140, 158, 478, 541, 604,
437, 482, 527
]
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterSame(self, data_format):
expected_output = [
14, 32, 50, 100, 163, 226, 217, 334, 451, 190, 307, 424, 929, 1217,
1505, 1487, 1883, 2279
]
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2Filter(self, data_format):
expected_output = [1, 4, 4, 3, 10, 8, 5, 16, 12]
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 3, 2, 1],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterSame(self, data_format):
expected_output = [1, 4, 7, 4, 13, 16, 7, 22, 25]
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 3, 3, 1],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2(self, data_format):
expected_output = [1, 2, 5, 4, 6, 0, 0, 0, 0, 0, 3, 6, 13, 8, 12]
self._VerifyValues(
input_sizes=[1, 3, 5, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2Same(self, data_format):
expected_output = [1, 2, 2, 3, 4, 6]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(
self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[1, 4, 7, 10, 13, 10, 0, 0, 0, 0, 0, 0, 3, 10, 17, 24, 31, 20])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 1, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[1, 0, 2, 3, 0, 4])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DEmptyBackpropInputDilation1x2(self, data_format):
self._VerifyValues(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[0, 1, 1, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.zeros([0]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self, data_format):
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._VerifyValues(
input_sizes=[1, 3, 2, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 1, 1, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[
14, 32, 50, 68, 86, 104, 0, 0, 0, 0, 0, 0, 122, 140, 158, 176, 194,
212
])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(
self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
out_backprop_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[5, 0, 11, 0, 0, 0, 17, 0, 23])
class Conv2DBackpropFilterTest(xla_test.XLATestCase, parameterized.TestCase):
def _VerifyValues(self,
input_sizes=None,
filter_sizes=None,
out_backprop_sizes=None,
strides=None,
dilations=None,
padding=None,
data_format_src="NHWC",
data_format_dst="NHWC",
expected=None):
"""Tests that gen_nn_ops.conv2d_backprop_filter produces the right output.
Args:
input_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
out_backprop_sizes: Output gradients tensor dimensions.
strides: Stride.
dilations: Dilations.
padding: Padding type.
data_format_src: Data format input is in.
data_format_dst: Data format verification will run and input is converted
to.
expected: Expected output.
"""
total_size_1 = np.prod(input_sizes)
total_size_2 = np.prod(out_backprop_sizes)
x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)
x2 = np.arange(
1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes)
strides = [1] + strides + [1]
if dilations is not None:
dilations = [1] + dilations + [1]
expected = np.reshape(expected, filter_sizes)
# Convert between data formats.
x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src,
data_format_dst)
x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src,
data_format_dst)
input_sizes = test_utils.PermuteDimsBetweenDataFormats(
input_sizes, data_format_src, data_format_dst)
out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(
out_backprop_sizes, data_format_src, data_format_dst)
strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src,
data_format_dst)
if dilations is not None:
dilations = test_utils.PermuteDimsBetweenDataFormats(
dilations, data_format_src, data_format_dst)
with self.session() as sess:
t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)
t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)
with self.test_scope():
tensor = gen_nn_ops.conv2d_backprop_filter(
input=t1,
filter_sizes=filter_sizes,
out_backprop=t2,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format_dst)
value = sess.run(tensor, {t1: x1, t2: x2})
self.assertAllEqual(filter_sizes, value.shape)
self.assertAllClose(expected, value, 1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x1Filter(self, data_format):
expected_output = [8056, 8432, 8312, 8704, 8568, 8976]
self._VerifyValues(
input_sizes=[1, 4, 4, 3],
filter_sizes=[1, 1, 3, 2],
out_backprop_sizes=[1, 4, 4, 2],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2Filter(self, data_format):
expected_output = [120, 141]
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 3, 2, 1],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterDepth1(self, data_format):
expected_output = [5, 8, 14, 17]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Filter(self, data_format):
expected_output = [
17, 22, 27, 22, 29, 36, 27, 36, 45, 32, 43, 54, 37, 50, 63, 42, 57, 72,
62, 85, 108, 67, 92, 117, 72, 99, 126, 77, 106, 135, 82, 113, 144, 87,
120, 153
]
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width5(self, data_format):
expected_output = [9, 12]
self._VerifyValues(
input_sizes=[1, 1, 5, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width6(self, data_format):
expected_output = [9, 12]
self._VerifyValues(
input_sizes=[1, 1, 6, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width7(self, data_format):
expected_output = [9, 12]
self._VerifyValues(
input_sizes=[1, 1, 7, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x3Filter(self, data_format):
expected_output = [5, 8, 11]
self._VerifyValues(
input_sizes=[1, 1, 4, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x3FilterSame(self, data_format):
expected_output = [20, 30, 20]
self._VerifyValues(
input_sizes=[1, 1, 4, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 1, 4, 1],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x3FilterSameOutbackprop2(self, data_format):
expected_output = [7, 10, 3]
self._VerifyValues(
input_sizes=[1, 1, 4, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterC1Same(self, data_format):
expected_output = [91, 58, 32, 17]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 2, 3, 1],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2(self, data_format):
expected_output = [92, 102, 112]
self._VerifyValues(
input_sizes=[1, 3, 5, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2Same(self, data_format):
expected_output = [7, 2, 16, 5]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(
self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[55, 70, 235, 250])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 1, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[1, 3, 4, 6])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DEmptyBackpropFilterDilation1x2(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
out_backprop_sizes=[1, 1, 1, 0],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.zeros([0]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 4, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[
17, 22, 27, 22, 29, 36, 27, 36, 45, 47, 64, 81, 52, 71, 90, 57, 78,
99, 137, 190, 243, 142, 197, 252, 147, 204, 261, 167, 232, 297, 172,
239, 306, 177, 246, 315
])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(
self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
out_backprop_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[1, 2, 3, 6, 7, 14, 9, 18])
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/conv2d_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the DataFormatVecPermute operator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class XlaDataFormatDimMapTest(xla_test.XLATestCase):
def _test(self, input_data, src_format, dst_format, expected):
for dtype in {np.int32, np.int64}:
x = np.array(input_data, dtype=dtype)
with self.session() as session:
with self.test_scope():
placeholder = array_ops.placeholder(dtypes.as_dtype(x.dtype), x.shape)
param = {placeholder: x}
output = nn_ops.data_format_dim_map(
placeholder, src_format=src_format, dst_format=dst_format)
result = session.run(output, param)
self.assertAllEqual(result, expected)
def test(self):
self._test(0, "NHWC", "NCHW", 0)
self._test(1, "NHWC", "NCHW", 2)
self._test(2, "NHWC", "NCHW", 3)
self._test(3, "NHWC", "NCHW", 1)
self._test(-1, "NHWC", "NCHW", 1)
self._test(-2, "NHWC", "NCHW", 3)
self._test(-3, "NHWC", "NCHW", 2)
self._test(-4, "NHWC", "NCHW", 0)
self._test([1, 3], "NHWC", "NCHW", [2, 1])
self._test([1, 3, -2], "NHWC", "NCHW", [2, 1, 3])
self._test([1, -3, -2], "NHWC", "NCHW", [2, 2, 3])
self._test([[1, -3], [1, -1]], "NHWC", "NCHW", [[2, 2], [2, 1]])
self._test([1, -3, -2], "NHWC", "NCHW", [2, 2, 3])
self._test([-4, -3, -2, -1, 0, 1, 2, 3], "NHWC", "HWNC",
[2, 0, 1, 3, 2, 0, 1, 3])
self._test([-4, -3, -2, -1, 0, 1, 2, 3], "NHWC", "WHCN",
[3, 1, 0, 2, 3, 1, 0, 2])
self._test([-4, -3, -2, -1, 0, 1, 2, 3], "qwer", "rewq",
[3, 2, 1, 0, 3, 2, 1, 0])
self._test(0, "NDHWC", "NCDHW", 0)
self._test(1, "NDHWC", "NCDHW", 2)
self._test(2, "NDHWC", "NCDHW", 3)
self._test(3, "NDHWC", "NCDHW", 4)
self._test(4, "NDHWC", "NCDHW", 1)
self._test([1, 4], "NDHWC", "NCDHW", [2, 1])
self._test([1, 4, -2], "NDHWC", "NCDHW", [2, 1, 4])
self._test([1, -3, -2], "NDHWC", "NCDHW", [2, 3, 4])
self._test([[1, -4], [1, -1]], "NDHWC", "NCDHW", [[2, 2], [2, 1]])
self._test([1, -3, -2], "NDHWC", "NCDHW", [2, 3, 4])
self._test([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4], "NDHWC", "DHWNC",
[3, 0, 1, 2, 4, 3, 0, 1, 2, 4])
self._test([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4], "NDHWC", "WHDCN",
[4, 2, 1, 0, 3, 4, 2, 1, 0, 3])
class XlaPermuteOpTest(xla_test.XLATestCase):
def _runPermuteAndCompare(self, x, src_format, dst_format, expected):
with self.session() as session:
with self.test_scope():
placeholder = array_ops.placeholder(dtypes.as_dtype(x.dtype), x.shape)
param = {placeholder: x}
output = nn_ops.data_format_vec_permute(
placeholder, src_format=src_format, dst_format=dst_format)
result = session.run(output, param)
self.assertAllEqual(result, expected)
def testNHWCToNCHW(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "NCHW", [7, 3, 4, 9])
def testNCHWToNHWC(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "NCHW", "NHWC", [7, 9, 3, 4])
def testNHWCToHWNC(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "HWNC", [4, 9, 7, 3])
def testHWNCToNHWC(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "HWNC", "NHWC", [9, 7, 4, 3])
def testNDHWCToNCDHW(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3, 2], dtype=dtype)
self._runPermuteAndCompare(x, "NDHWC", "NCDHW", [7, 2, 4, 9, 3])
def testNCDHWToNDHWC(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3, 2], dtype=dtype)
self._runPermuteAndCompare(x, "NCDHW", "NDHWC", [7, 9, 3, 2, 4])
def testNHWCToNCHW2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "NCHW",
[[7, 4], [5, 1], [9, 3], [4, 5]])
def testNHWCToHWNC2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "HWNC",
[[9, 3], [4, 5], [7, 4], [5, 1]])
def testHWNCToNHWC2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "HWNC", "NHWC",
[[4, 5], [7, 4], [9, 3], [5, 1]])
def testNCHWToNHWC2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "NCHW", "NHWC",
[[7, 4], [4, 5], [5, 1], [9, 3]])
def testNDHWCToNCDHW2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1], [6, 8]], dtype=dtype)
self._runPermuteAndCompare(x, "NDHWC", "NCDHW",
[[7, 4], [6, 8], [9, 3], [4, 5], [5, 1]])
def testNCDHWToNDHWC2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1], [6, 8]], dtype=dtype)
self._runPermuteAndCompare(x, "NCDHW", "NDHWC",
[[7, 4], [4, 5], [5, 1], [6, 8], [9, 3]])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/data_format_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Convolution node name match via the XLA JIT.
The canned results in these tests are created by running each test using the
Tensorflow CPU device and saving the output.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import ops
from tensorflow.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import googletest
class ConvolutionNodeNameTest(xla_test.XLATestCase):
"""Verify convolution node name match.
Verify convolution node names on TPU and CPU match with dilation > 1.
"""
def _verifyNodeNameMatch(self, layer, input_sizes, filter_sizes, strides,
dilations):
def _GetNodeNames(use_xla):
with self.session():
input_tensor = array_ops.placeholder(np.float32, shape=input_sizes)
if use_xla:
with self.test_scope():
# pylint: disable=protected-access
graph = ops.get_default_graph()
graph._set_control_flow_context(
control_flow_ops.XLAControlFlowContext())
# pylint: enable=protected-access
conv2d_op = layer(
filters=64,
kernel_size=filter_sizes,
dilation_rate=dilations,
padding="same")
_ = conv2d_op(input_tensor)
return [n.name for n in ops.get_default_graph().as_graph_def().node]
else:
with ops.device("CPU"):
conv2d_op = layer(
filters=64,
kernel_size=filter_sizes,
dilation_rate=dilations,
padding="same")
_ = conv2d_op(input_tensor)
names = [
n.name for n in ops.get_default_graph().as_graph_def().node
]
# filter out space to depth ops.
return [
name for name in names
if "space" not in name and "Space" not in name
]
xla_names = _GetNodeNames(use_xla=True)
no_xla_names = _GetNodeNames(use_xla=False)
self.assertListEqual(
xla_names,
no_xla_names,
)
def testConv1DNodeNameMatch(self):
input_sizes = [8, 16, 3]
filter_sizes = [7]
strides = 1
dilations = [2]
layer = layers.Conv1D
self._verifyNodeNameMatch(layer, input_sizes, filter_sizes, strides,
dilations)
def testConv2DNodeNameMatch(self):
input_sizes = [8, 16, 16, 3]
filter_sizes = [7, 7]
strides = 1
dilations = [2, 2]
layer = layers.Conv2D
self._verifyNodeNameMatch(layer, input_sizes, filter_sizes, strides,
dilations)
def testConv3DNodeNameMatch(self):
input_sizes = [8, 16, 16, 16, 3]
filter_sizes = [7, 7, 7]
strides = 1
dilations = [2, 2, 2]
layer = layers.Conv3D
self._verifyNodeNameMatch(layer, input_sizes, filter_sizes, strides,
dilations)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/conv_node_name_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slicing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class SliceTest(xla_test.XLATestCase):
def test1D(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.slice(i, [2], [4])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([2, 3, 4, 5], result)
def testZeroSlice(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[2])
with self.test_scope():
o = array_ops.slice(i, [0], [0])
params = {
i: [0, 1],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([], result)
def test3D(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
with self.test_scope():
o = array_ops.slice(i, [1, 2, 2], [1, 1, 4])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[6, 5, 4, 3]]], result)
def test3DWithDynamicBegin(self):
"""Tests a slice where the start offset is not known at compile time."""
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
begin = array_ops.placeholder(dtypes.int32, shape=[3])
with self.test_scope():
o = array_ops.slice(i, begin, [1, 1, 4])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]],
begin: [1, 2, 2]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[6, 5, 4, 3]]], result)
def test3DWithDynamicBeginAndNegativeSize(self):
"""Tests a slice where `begin` is fed dynamically and `size` contains -1."""
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
begin = array_ops.placeholder(dtypes.int32, shape=[3])
with self.test_scope():
o = array_ops.slice(i, begin, [1, -1, 4])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]],
begin: [1, 1, 2]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[1, 1, 1, 1], [6, 5, 4, 3]]], result)
class StridedSliceTest(xla_test.XLATestCase):
def test1D(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.strided_slice(i, [2], [6], [2])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([2, 4], result)
def test1DNegativeStride(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.strided_slice(i, [6], [2], [-2])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([6, 4], result)
def test2DDegenerate(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[2, 3])
with self.test_scope():
o = array_ops.strided_slice(i, [-1, 0], [0, 3])
params = {
i: [[0, 1, 2],
[3, 4, 5]]
}
result = o.eval(feed_dict=params)
self.assertEqual(tensor_shape.TensorShape((0, 3)), result.shape)
def test2DDegenerateNegativeStride(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[2, 3])
with self.test_scope():
o = array_ops.strided_slice(i, [0, 0], [-1, 3], [-1, 1])
params = {
i: [[0, 1, 2],
[3, 4, 5]]
}
result = o.eval(feed_dict=params)
self.assertEqual(tensor_shape.TensorShape((0, 3)), result.shape)
def test3D(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
with self.test_scope():
o = array_ops.strided_slice(i, [0, 2, 2], [2, 3, 6], [1, 1, 2])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[1, 9]], [[6, 4]]], result)
def test3DNegativeStride(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[3, 4, 10])
with self.test_scope():
o = array_ops.strided_slice(i, [2, 2, 6], [0, 0, 2], [-1, -1, -2])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0],
[4, 5, 2, 4, 3, 7, 6, 8, 9, 4]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[4, 3, 4, 5, 7, 6, 5, 3, 4, 5],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7],
[7, 1, 7, 1, 8, 1, 8, 1, 3, 1]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9],
[9, 9, 5, 5, 6, 6, 3, 3, 6, 6]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[9, 8],
[1, 1]],
[[2, 4],
[5, 7]]], result)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/slice_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adadelta Optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adadelta
class AdadeltaOptimizerTest(xla_test.XLATestCase):
def testBasic(self):
num_updates = 4 # number of ADADELTA steps to perform
if "CPU" in self.device:
# To avoid timeout on CPU.
all_grad = [0.2, 0.01]
all_lr = [1.0, 0.1]
else:
all_grad = [0.2, 0.1, 0.01]
all_lr = [1.0, 0.5, 0.1]
for dtype in self.float_types:
with self.session(), self.test_scope():
for grad in all_grad:
for lr in all_lr:
var0_init = [1.0, 2.0]
var1_init = [3.0, 4.0]
var0 = resource_variable_ops.ResourceVariable(
var0_init, dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(
var1_init, dtype=dtype)
grads = constant_op.constant([grad, grad], dtype=dtype)
accum = 0.0
accum_update = 0.0
# ADADELTA gradient optimizer
rho = 0.95
epsilon = 1e-8
adadelta_opt = adadelta.AdadeltaOptimizer(
learning_rate=lr, rho=rho, epsilon=epsilon)
adadelta_update = adadelta_opt.apply_gradients(
zip([grads, grads], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
opt_vars = adadelta_opt.variables()
self.assertStartsWith(opt_vars[0].name, var0._shared_name)
self.assertStartsWith(opt_vars[1].name, var0._shared_name)
self.assertStartsWith(opt_vars[2].name, var1._shared_name)
self.assertStartsWith(opt_vars[3].name, var1._shared_name)
self.assertEqual(4, len(opt_vars))
# Assign slots
slot = [None] * 2
slot_update = [None] * 2
self.assertEqual(["accum", "accum_update"],
adadelta_opt.get_slot_names())
slot[0] = adadelta_opt.get_slot(var0, "accum")
self.assertEquals(slot[0].get_shape(), var0.get_shape())
self.assertFalse(slot[0] in variables.trainable_variables())
slot_update[0] = adadelta_opt.get_slot(var0, "accum_update")
self.assertEquals(slot_update[0].get_shape(), var0.get_shape())
self.assertFalse(slot_update[0] in variables.trainable_variables())
slot[1] = adadelta_opt.get_slot(var1, "accum")
self.assertEquals(slot[1].get_shape(), var1.get_shape())
self.assertFalse(slot[1] in variables.trainable_variables())
slot_update[1] = adadelta_opt.get_slot(var1, "accum_update")
self.assertEquals(slot_update[1].get_shape(), var1.get_shape())
self.assertFalse(slot_update[1] in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose(var0_init, self.evaluate(var0))
self.assertAllClose(var1_init, self.evaluate(var1))
update = [None] * num_updates
tot_update = 0
for step in range(num_updates):
# Run adadelta update for comparison
self.evaluate(adadelta_update)
# Perform initial update without previous accum values
accum = accum * rho + (grad**2) * (1 - rho)
update[step] = (
np.sqrt(accum_update + epsilon) *
(1. / np.sqrt(accum + epsilon)) * grad)
accum_update = (
accum_update * rho + (update[step]**2) * (1.0 - rho))
tot_update += update[step] * lr
# Check that the accumulators have been updated
for slot_idx in range(2):
self.assertAllCloseAccordingToType(
np.array([accum, accum], dtype=dtype),
self.evaluate(slot[slot_idx]),
rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([accum_update, accum_update], dtype=dtype),
self.evaluate(slot_update[slot_idx]),
rtol=1e-5)
# Check that the parameters have been updated
self.assertAllCloseAccordingToType(
np.array(
[var0_init[0] - tot_update, var0_init[1] - tot_update],
dtype=dtype),
self.evaluate(var0),
rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array(
[var1_init[0] - tot_update, var1_init[1] - tot_update],
dtype=dtype),
self.evaluate(var1),
rtol=1e-5)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/adadelta_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sorting operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class XlaSortOpTest(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self, op, args, expected):
with self.session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(*placeholders)
if isinstance(output, ops.Tensor):
output = [output]
results = session.run(output, feeds)
for result, v in zip(results, expected):
self.assertAllClose(v, result, rtol=1e-3)
def testSort(self):
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
x = np.arange(101, dtype=dtype)
np.random.shuffle(x)
self._assertOpOutputMatchesExpected(
xla.sort, [x], expected=[np.arange(101, dtype=dtype)])
def testKeyValueSort(self):
supported_key_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
supported_value_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32,
dtypes.int64.as_numpy_dtype, dtypes.uint64.as_numpy_dtype])
for key_type in supported_key_types.intersection(self.numeric_types):
for value_type in supported_value_types.intersection(self.numeric_types):
x = np.arange(101, dtype=key_type)
np.random.shuffle(x)
y = (-x).astype(value_type)
self._assertOpOutputMatchesExpected(
xla.key_value_sort, [x, y],
expected=[
np.arange(101, dtype=key_type),
-np.arange(101, dtype=value_type)
])
def testTopK(self):
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
# Use small input size for bfloat16. Otherwise, we'll get duplicate values
# after conversion to bfloat16, so the possible resulting index array is
# no longer unique.
if dtype == dtypes.bfloat16.as_numpy_dtype:
array_size = 20
k_options = [0, 1, 2, 10, 20]
else:
array_size = 200 * 1000
k_options = [0, 1, 2, 10, 20, 100, 1000, 200 * 1000]
for x in [np.arange(array_size)]:
np.random.shuffle(x)
for k in k_options:
indices = x.argsort()[::-1][:k]
def topk(v, k=k):
return nn_ops.top_k(v, k=k, sorted=True)
self._assertOpOutputMatchesExpected(
topk, [x.astype(dtype)],
expected=[x[indices].astype(dtype), indices])
def testTopK2D(self):
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
# Use small input size for bfloat16. Otherwise, we'll get duplicate values
# after conversion to bfloat16, so the possible resulting index array is
# no longer unique.
if dtype == dtypes.bfloat16.as_numpy_dtype:
array_size = 10
k_options = [0, 1, 2, 10]
else:
array_size = 200 * 1000
k_options = [0, 1, 2, 10, 20, 100, 1000, 200 * 1000]
batch = 16
for x in [np.arange(batch * array_size)]:
np.random.shuffle(x)
x = np.reshape(x, [batch, array_size])
for k in k_options:
indices = x.argsort(axis=1)[::, -1:-k - 1:-1]
expected = np.sort(x, axis=1)[::, -1:-k - 1:-1]
def topk(v, k=k):
return nn_ops.top_k(v, k=k, sorted=True)
self._assertOpOutputMatchesExpected(
topk, [x.astype(dtype)],
expected=[expected.astype(dtype), indices])
def testTopKZeros(self):
"""Tests that positive and negative zeros sort correctly."""
# Only bfloat16 is implemented.
bfloat16 = dtypes.bfloat16.as_numpy_dtype
if bfloat16 not in self.numeric_types:
return
with self.session() as sess:
p = array_ops.placeholder(dtypes.bfloat16)
with self.test_scope():
topk = nn_ops.top_k(p, k=4)
results = sess.run(
topk,
{p: np.array([0., -0., 0., 3., -0., -4., 0., -0.], dtype=bfloat16)})
self.assertAllEqual(
np.array([3., 0., 0., 0.], dtype=bfloat16), results[0])
self.assertEqual(list([3, 0, 2, 6]), list(results[1]))
def testTopKInfinities(self):
"""Tests that positive and negative infinity sort correctly."""
# Only bfloat16 is implemented.
bfloat16 = dtypes.bfloat16.as_numpy_dtype
if bfloat16 not in self.numeric_types:
return
with self.session() as sess:
p = array_ops.placeholder(dtypes.bfloat16)
with self.test_scope():
topk = nn_ops.top_k(p, k=6)
results = sess.run(topk, {
p: np.array(
[1, 2, float("inf"), -float("inf"), -1, -2], dtype=bfloat16)
})
self.assertAllEqual(
np.array(
[float("inf"), 2.0, 1.0, -1.0, -2.0, -float("inf")],
dtype=bfloat16), results[0])
self.assertEqual(list([2, 1, 0, 4, 5, 3]), list(results[1]))
def testInTopK(self):
supported_types = set([np.int32, np.int64])
for dtype in supported_types.intersection(self.numeric_types):
array_size = 200 * 1000
k_options = [0, 1, 2, 10, 20, 100, 1000, 200 * 1000]
batch = 16
for x in [np.arange(batch * array_size)]:
np.random.shuffle(x)
x = np.reshape(x, [batch, array_size])
y = np.random.randint(0, array_size, size=batch)
for k in k_options:
indices = x.argsort(axis=1)[::, -1:-k - 1:-1]
expected = [y[i] in indices[i] for i in range(batch)]
def in_topk(predictions, targets, k=k):
return nn_ops.in_top_k(predictions, targets, k)
self._assertOpOutputMatchesExpected(
in_topk,
[x.astype(np.float32), y.astype(dtype)],
expected=[expected])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/sort_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.platform import googletest
class FakeQuantWithMinMaxArgsTest(xla_test.XLATestCase):
"""Test cases for FakeQuantWithMinMaxArgs operation."""
# 8 bits, wide range.
def testOp_with8BitsNoScalingNoNudging(self):
self._TestOp(0.0, 255.0, 8, False, 0.0, 255.0, 1.0)
def testOp_with8BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 128.0, 8, False, 0.0, 127.5, 0.5)
def testOp_with8BitsScalingAndNudgingUp(self):
self._TestOp(-128.0, -0.5, 8, False, -127.5, 0.0, 0.5)
def testOp_with8BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 127.4, 8, False, 0.0, 127.5, 0.5)
# 8 bits, narrow range.
def testOp_with8BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 254.0, 8, True, 0.0, 254.0, 1.0)
def testOp_with8BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 127.1, 8, True, 0.0, 127.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-127.1, -0.1, 8, True, -127.0, 0.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 126.9, 8, True, 0.0, 127.0, 0.5)
# 7 bits, wide range.
def testOp_with7BitsNoScalingNoNudging(self):
self._TestOp(0.0, 127.0, 7, False, 0.0, 127.0, 1.0)
def testOp_with7BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 64.0, 7, False, 0.0, 63.5, 0.5)
def testOp_with7BitsScalingAndNudgingUp(self):
self._TestOp(-64.0, -0.5, 7, False, -63.5, 0.0, 0.5)
def testOp_with7BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 63.4, 7, False, 0.0, 63.5, 0.5)
# 7 bits, narrow range.
def testOp_with7BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 126.0, 7, True, 0.0, 126.0, 1.0)
def testOp_with7BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 63.1, 7, True, 0.0, 63.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-63.1, -0.1, 7, True, -63.0, 0.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 62.9, 7, True, 0.0, 63.0, 0.5)
def _TestOp(self, input_min, input_max, num_bits, narrow_range,
expected_nudged_input_min, expected_nudged_input_max,
expected_step):
inputs = np.array(
[
expected_nudged_input_min - expected_step,
expected_nudged_input_min - 0.01, expected_nudged_input_min,
expected_nudged_input_min + 0.01,
expected_nudged_input_min + expected_step - 0.01,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step + 0.01,
expected_nudged_input_max - 0.01, expected_nudged_input_max,
expected_nudged_input_max + 0.01,
expected_nudged_input_max + expected_step
],
dtype=np.float32)
expected = np.array(
[
expected_nudged_input_min, expected_nudged_input_min,
expected_nudged_input_min, expected_nudged_input_min,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step,
expected_nudged_input_max, expected_nudged_input_max,
expected_nudged_input_max, expected_nudged_input_max
],
dtype=np.float32)
with self.session() as session:
with self.test_scope():
input_placeholder = array_ops.placeholder(
dtypes.float32, inputs.shape, name="inputs")
outputs = array_ops.fake_quant_with_min_max_args(
input_placeholder,
min=input_min,
max=input_max,
num_bits=num_bits,
narrow_range=narrow_range)
result = session.run(outputs, {input_placeholder: inputs})
self.assertAllCloseAccordingToType(
result, expected, rtol=1e-3, atol=1e-5, bfloat16_rtol=0.03)
class FakeQuantWithMinMaxArgsGradientTest(xla_test.XLATestCase):
"""Test cases for FakeQuantWithMinMaxArgsGradient operation."""
# 8 bits, wide range.
def testOp_with8BitsNoScalingNoNudging(self):
self._TestOp(0.0, 255.0, 8, False, 0.0, 255.0, 1.0)
def testOp_with8BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 128.0, 8, False, 0.0, 127.5, 0.5)
def testOp_with8BitsScalingAndNudgingUp(self):
self._TestOp(-128.0, -0.5, 8, False, -127.5, 0.0, 0.5)
def testOp_with8BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 127.4, 8, False, 0.0, 127.5, 0.5)
# 8 bits, narrow range.
def testOp_with8BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 254.0, 8, True, 0.0, 254.0, 1.0)
def testOp_with8BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 127.1, 8, True, 0.0, 127.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-127.1, -0.1, 8, True, -127.0, 0.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 126.9, 8, True, 0.0, 127.0, 0.5)
# 7 bits, wide range.
def testOp_with7BitsNoScalingNoNudging(self):
self._TestOp(0.0, 127.0, 7, False, 0.0, 127.0, 1.0)
def testOp_with7BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 64.0, 7, False, 0.0, 63.5, 0.5)
def testOp_with7BitsScalingAndNudgingUp(self):
self._TestOp(-64.0, -0.5, 7, False, -63.5, 0.0, 0.5)
def testOp_with7BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 63.4, 7, False, 0.0, 63.5, 0.5)
# 7 bits, narrow range.
def testOp_with7BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 126.0, 7, True, 0.0, 126.0, 1.0)
def testOp_with7BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 63.1, 7, True, 0.0, 63.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-63.1, -0.1, 7, True, -63.0, 0.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 62.9, 7, True, 0.0, 63.0, 0.5)
def _TestOp(self, input_min, input_max, num_bits, narrow_range,
expected_nudged_input_min, expected_nudged_input_max,
expected_step):
inputs = np.array(
[
expected_nudged_input_min - expected_step,
expected_nudged_input_min - 0.01, expected_nudged_input_min,
expected_nudged_input_min + 0.01,
expected_nudged_input_min + expected_step - 0.01,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step + 0.01,
expected_nudged_input_max - 0.01, expected_nudged_input_max,
expected_nudged_input_max + 0.01,
expected_nudged_input_max + expected_step
],
dtype=np.float32)
gradients = np.arange(1, len(inputs) + 1, dtype=np.float32)
expected_backprops = np.array(
[0.0, 0.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 0.0, 0.0],
dtype=np.float32)
with self.session() as session:
with self.test_scope():
gradient_placeholder = array_ops.placeholder(
dtypes.float32, gradients.shape, name="gradients")
input_placeholder = array_ops.placeholder(
dtypes.float32, inputs.shape, name="inputs")
outputs = gen_array_ops.fake_quant_with_min_max_args_gradient(
gradient_placeholder,
input_placeholder,
min=input_min,
max=input_max,
num_bits=num_bits,
narrow_range=narrow_range)
backprops = session.run(outputs, {
gradient_placeholder: gradients,
input_placeholder: inputs
})
self.assertAllCloseAccordingToType(
backprops,
expected_backprops,
rtol=1e-3,
atol=1e-5,
bfloat16_rtol=0.03)
class FakeQuantWithMinMaxVarsTest(xla_test.XLATestCase):
"""Test cases for FakeQuantWithMinMaxVars operation."""
# 8 bits, wide range.
def testOp_with8BitsNoScalingNoNudging(self):
self._TestOp(0.0, 255.0, 8, False, 0.0, 255.0, 1.0)
def testOp_with8BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 128.0, 8, False, 0.0, 127.5, 0.5)
def testOp_with8BitsScalingAndNudgingUp(self):
self._TestOp(-128.0, -0.5, 8, False, -127.5, 0.0, 0.5)
def testOp_with8BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 127.4, 8, False, 0.0, 127.5, 0.5)
# 8 bits, narrow range.
def testOp_with8BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 254.0, 8, True, 0.0, 254.0, 1.0)
def testOp_with8BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 127.1, 8, True, 0.0, 127.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-127.1, -0.1, 8, True, -127.0, 0.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 126.9, 8, True, 0.0, 127.0, 0.5)
# 7 bits, wide range.
def testOp_with7BitsNoScalingNoNudging(self):
self._TestOp(0.0, 127.0, 7, False, 0.0, 127.0, 1.0)
def testOp_with7BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 64.0, 7, False, 0.0, 63.5, 0.5)
def testOp_with7BitsScalingAndNudgingUp(self):
self._TestOp(-64.0, -0.5, 7, False, -63.5, 0.0, 0.5)
def testOp_with7BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 63.4, 7, False, 0.0, 63.5, 0.5)
# 7 bits, narrow range.
def testOp_with7BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 126.0, 7, True, 0.0, 126.0, 1.0)
def testOp_with7BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 63.1, 7, True, 0.0, 63.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-63.1, -0.1, 7, True, -63.0, 0.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 62.9, 7, True, 0.0, 63.0, 0.5)
def _TestOp(self, input_min, input_max, num_bits, narrow_range,
expected_nudged_input_min, expected_nudged_input_max,
expected_step):
inputs = np.array(
[
expected_nudged_input_min - expected_step,
expected_nudged_input_min - 0.01, expected_nudged_input_min,
expected_nudged_input_min + 0.01,
expected_nudged_input_min + expected_step - 0.01,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step + 0.01,
expected_nudged_input_max - 0.01, expected_nudged_input_max,
expected_nudged_input_max + 0.01,
expected_nudged_input_max + expected_step
],
dtype=np.float32)
expected = np.array(
[
expected_nudged_input_min, expected_nudged_input_min,
expected_nudged_input_min, expected_nudged_input_min,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step,
expected_nudged_input_max, expected_nudged_input_max,
expected_nudged_input_max, expected_nudged_input_max
],
dtype=np.float32)
with self.session() as session:
with self.test_scope():
input_placeholder = array_ops.placeholder(
dtypes.float32, inputs.shape, name="inputs")
min_placeholder = array_ops.placeholder(dtypes.float32, (), name="min")
max_placeholder = array_ops.placeholder(dtypes.float32, (), name="max")
outputs = array_ops.fake_quant_with_min_max_vars(
input_placeholder,
min_placeholder,
max_placeholder,
num_bits=num_bits,
narrow_range=narrow_range)
result = session.run(
outputs, {
input_placeholder: inputs,
min_placeholder: input_min,
max_placeholder: input_max
})
self.assertAllCloseAccordingToType(
result, expected, rtol=1e-3, atol=1e-5, bfloat16_rtol=0.03)
class FakeQuantWithMinMaxVarsGradientTest(xla_test.XLATestCase):
"""Test cases for FakeQuantWithMinMaxVarsGradient operation."""
# 8 bits, wide range.
def testOp_with8BitsNoScalingNoNudging(self):
self._TestOp(0.0, 255.0, 8, False, 0.0, 255.0, 1.0)
def testOp_with8BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 128.0, 8, False, 0.0, 127.5, 0.5)
def testOp_with8BitsScalingAndNudgingUp(self):
self._TestOp(-128.0, -0.5, 8, False, -127.5, 0.0, 0.5)
def testOp_with8BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 127.4, 8, False, 0.0, 127.5, 0.5)
# 8 bits, narrow range.
def testOp_with8BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 254.0, 8, True, 0.0, 254.0, 1.0)
def testOp_with8BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 127.1, 8, True, 0.0, 127.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-127.1, -0.1, 8, True, -127.0, 0.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 126.9, 8, True, 0.0, 127.0, 0.5)
# 7 bits, wide range.
def testOp_with7BitsNoScalingNoNudging(self):
self._TestOp(0.0, 127.0, 7, False, 0.0, 127.0, 1.0)
def testOp_with7BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 64.0, 7, False, 0.0, 63.5, 0.5)
def testOp_with7BitsScalingAndNudgingUp(self):
self._TestOp(-64.0, -0.5, 7, False, -63.5, 0.0, 0.5)
def testOp_with7BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 63.4, 7, False, 0.0, 63.5, 0.5)
# 7 bits, narrow range.
def testOp_with7BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 126.0, 7, True, 0.0, 126.0, 1.0)
def testOp_with7BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 63.1, 7, True, 0.0, 63.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-63.1, -0.1, 7, True, -63.0, 0.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 62.9, 7, True, 0.0, 63.0, 0.5)
def _TestOp(self, input_min, input_max, num_bits, narrow_range,
expected_nudged_input_min, expected_nudged_input_max,
expected_step):
inputs = np.array(
[
expected_nudged_input_min - expected_step,
expected_nudged_input_min - 0.01, expected_nudged_input_min,
expected_nudged_input_min + 0.01,
expected_nudged_input_min + expected_step - 0.01,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step + 0.01,
expected_nudged_input_max - 0.01, expected_nudged_input_max,
expected_nudged_input_max + 0.01,
expected_nudged_input_max + expected_step
],
dtype=np.float32)
gradients = np.arange(1, len(inputs) + 1, dtype=np.float32)
expected_backprops_wrt_input = np.array(
[0.0, 0.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 0.0, 0.0],
dtype=np.float32)
expected_backprops_wrt_min = 1.0 + 2.0
expected_backprops_wrt_max = 10.0 + 11.0
with self.session() as session:
with self.test_scope():
gradient_placeholder = array_ops.placeholder(
dtypes.float32, gradients.shape, name="gradients")
input_placeholder = array_ops.placeholder(
dtypes.float32, inputs.shape, name="inputs")
min_placeholder = array_ops.placeholder(dtypes.float32, (), name="min")
max_placeholder = array_ops.placeholder(dtypes.float32, (), name="max")
outputs = array_ops.fake_quant_with_min_max_vars_gradient(
gradient_placeholder,
input_placeholder,
min_placeholder,
max_placeholder,
num_bits=num_bits,
narrow_range=narrow_range)
backprops_wrt_input, backprops_wrt_min, backprops_wrt_max = session.run(
outputs, {
gradient_placeholder: gradients,
input_placeholder: inputs,
min_placeholder: input_min,
max_placeholder: input_max
})
self.assertAllCloseAccordingToType(
backprops_wrt_input,
expected_backprops_wrt_input,
rtol=1e-3,
atol=1e-5,
bfloat16_rtol=0.03)
self.assertAllCloseAccordingToType(
backprops_wrt_min,
expected_backprops_wrt_min,
rtol=1e-3,
atol=1e-5,
bfloat16_rtol=0.03)
self.assertAllCloseAccordingToType(
backprops_wrt_max,
expected_backprops_wrt_max,
rtol=1e-3,
atol=1e-5,
bfloat16_rtol=0.03)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/fake_quant_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Momentum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class MomentumOptimizerTest(xla_test.XLATestCase):
def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum):
var += accum * lr * momentum
accum = accum * momentum + g
var -= lr * accum
var -= accum * lr * momentum
return var, accum
def testBasic(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in variables.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
def testNesterovMomentum(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([0.1, 0.2], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.3, 0.4], dtype=dtype)
var0_np = np.array([0.1, 0.2], dtype=dtype)
var1_np = np.array([0.3, 0.4], dtype=dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype)
cost = 0.4 * var0 * var0 + 0.9 * var1
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int32), name="global_step")
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=0.1, momentum=0.9, use_nesterov=True)
opt_op = mom_op.minimize(cost, global_step, [var0, var1])
variables.global_variables_initializer().run()
for _ in range(1, 5):
opt_op.run()
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 0.8, 0.1, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(
var1_np, accum1_np, 0.9, 0.1, 0.9)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTensorLearningRateAndMomentum(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=constant_op.constant(2.0),
momentum=constant_op.constant(0.9))
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in variables.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/momentum_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.cond in XLA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.client import session
from tensorflow.python.compiler.xla import xla
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
@test_util.with_control_flow_v2
class CondTest(xla_test.XLATestCase):
def testCondAndTensorArrayInDefun(self):
# TODO(b/132430685): Make test more useful. Also b/129396295, b/127846988
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
@function.defun
def f():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=1)
output = control_flow_ops.cond(
constant_op.constant(True),
lambda: ta.write(0, 5.), lambda: ta.write(0, 10.))
return output.stack()
output_t = f()
self.assertAllEqual([5.], self.evaluate(output_t))
xla_context.Exit()
def testCondAndTensorArrayInDefun_constFolding(self):
g = ops.Graph()
with session.Session(graph=g), g.as_default(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
@function.defun
def f():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=1)
output = control_flow_ops.cond(
constant_op.constant(False),
lambda: ta.write(0, 5.), lambda: ta.write(0, 10.))
return output.stack()
output_t = f()
self.assertAllEqual([10.], self.evaluate(output_t))
xla_context.Exit()
def testCondAndTensorArray_xlaCompile(self):
self.skipTest("b/127846988")
# Fails with "Uninitialized arguments" in XlaIfOp::Compile
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
def f():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=1)
output = control_flow_ops.cond(
constant_op.constant(True),
lambda: ta.write(0, 5.), lambda: ta.write(0, 10.))
return output.stack()
output_t, = xla.compile(f)
self.assertAllEqual([5.], self.evaluate(output_t))
xla_context.Exit()
def testCondConstPropagation(self):
with self.session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
x = array_ops.placeholder(dtypes.float32)
p = array_ops.placeholder(dtypes.int32)
# TODO(b/129021699): Wrapping this in a tf.function does not work.
def if_true():
# This emits a StridedSlice op which expects the index to be a
# compile-time const.
return x[p]
def if_false():
return 5.
output = control_flow_ops.cond(
constant_op.constant(True), if_true, if_false)
self.assertAllEqual(1.,
sess.run(output, feed_dict={
x: [0., 1., 2.],
p: 1
}))
xla_context.Exit()
def testCondConstPropagation_xlaCompile(self):
self.skipTest("b/132430685")
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
x = array_ops.placeholder_with_default([0., 1., 2.], shape=[3])
p = constant_op.constant(1)
def f():
# TODO(b/129021699): Wrapping this in a tf.function does not work.
def if_true():
# This emits a StridedSlice op which expects the index to be a
# compile-time const.
return x[p]
def if_false():
return 5.
return control_flow_ops.cond(
constant_op.constant(True), if_true, if_false)
output = xla.compile(f)
self.assertAllEqual(1., self.evaluate(output))
xla_context.Exit()
def testCondConstPropagation_errorMsg(self):
self.skipTest("b/132430685")
with self.session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
x = array_ops.placeholder(dtypes.float32)
p = random_ops.random_uniform([], minval=1, maxval=3, dtype=dtypes.int32)
# TODO(b/129021699): Wrapping this in a tf.function does not work.
def if_true():
# This emits a StridedSlice op which expects the index to be a
# compile-time const.
return x[:p]
def if_false():
return array_ops.fill([p], 5.)
output = control_flow_ops.cond(
constant_op.constant(True), if_true, if_false)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"must be a compile-time constant"):
sess.run(
output, feed_dict={
x: [0., 1., 2.],
})
xla_context.Exit()
def testCondConstPropagation_errorMsg_xlaCompile(self):
with self.session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
x = array_ops.placeholder(dtypes.float32)
p = random_ops.random_uniform([], minval=1, maxval=3, dtype=dtypes.int32)
condition = math_ops.cast(
random_ops.random_uniform([], minval=0, maxval=2, dtype=dtypes.int32),
dtypes.bool)
def f():
# TODO(b/129021699): Wrapping this in a tf.function does not work.
def if_true():
# This emits a StridedSlice op which expects the index to be a
# compile-time const.
return x[:p]
def if_false():
return array_ops.fill([p], 5.)
return control_flow_ops.cond(condition, if_true, if_false)
output = xla.compile(f)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"must be a compile-time constant"):
sess.run(
output, feed_dict={
x: [0., 1., 2.],
})
xla_context.Exit()
def testSwitchCaseAndTensorArrayInDefun(self):
self.skipTest("b/127846988")
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
@function.defun
def f():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=1)
output = control_flow_ops.switch_case(
constant_op.constant(1), {
0: lambda: ta.write(0, 5.),
1: lambda: ta.write(0, 10.),
2: lambda: ta.write(0, 15.),
})
return output.stack()
output_t = f()
self.assertAllEqual([10.], self.evaluate(output_t))
xla_context.Exit()
def testSwitchCaseAndTensorArray_xlaCompile(self):
self.skipTest("b/127846988")
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
def f():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=1)
output = control_flow_ops.switch_case(
constant_op.constant(1), {
0: lambda: ta.write(0, 5.),
1: lambda: ta.write(0, 10.),
2: lambda: ta.write(0, 15.),
})
return output.stack()
output_t, = xla.compile(f)
self.assertAllEqual([10.], self.evaluate(output_t))
xla_context.Exit()
def testSwitchCaseConstPropagation(self):
self.skipTest("b/127846988")
with self.session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
x = array_ops.placeholder(dtypes.float32)
p = array_ops.placeholder(dtypes.int32)
def branch0():
return 5.
def branch1():
return 15.
# TODO(b/129021699): Wrapping this in a tf.function does not work.
def branch2():
# This emits a StridedSlice op which expects the index to be a
# compile-time const.
return x[p]
output = control_flow_ops.switch_case(
constant_op.constant(2), {
0: branch0,
1: branch1,
2: branch2,
})
self.assertAllEqual(7.,
sess.run(output, feed_dict={
x: [0., 1., 7.],
p: 2,
}))
xla_context.Exit()
def testCondNoInputs(self):
"""Verifies against `Failed precondition: Expected one input shape`."""
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
for pred in True, False:
cond_out = control_flow_ops.cond(
array_ops.placeholder_with_default(pred, []),
lambda: constant_op.constant(2.),
lambda: constant_op.constant(1.))
self.assertEqual(int(pred) + 1., self.evaluate(cond_out))
xla_context.Exit()
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/cond_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RMSProp optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import rmsprop
class RmspropTest(xla_test.XLATestCase):
def _rmsprop_update_numpy(self,
var,
g,
mg,
rms,
mom,
lr,
decay=0.9,
momentum=0.0,
epsilon=1e-10,
centered=False):
rms_t = rms * decay + (1 - decay) * g * g
denom_t = rms_t + epsilon
if centered:
mg_t = mg * decay + (1 - decay) * g
denom_t -= mg_t * mg_t
else:
mg_t = mg
mom_t = momentum * mom + lr * g / np.sqrt(denom_t, dtype=denom_t.dtype)
var_t = var - mom_t
return var_t, mg_t, rms_t, mom_t
def testBasic(self):
for dtype in self.float_types:
for centered in [False, True]:
with self.session(), self.test_scope():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
mg0_np = np.array([0.0, 0.0], dtype=dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype)
rms0_np = np.array([1.0, 1.0], dtype=dtype)
rms1_np = np.array([1.0, 1.0], dtype=dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
rms_opt = rmsprop.RMSPropOptimizer(learning_rate, centered=centered)
rms_update = rms_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
mg0 = rms_opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = rms_opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
rms0 = rms_opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = rms_opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mom0 = rms_opt.get_slot(var0, "momentum")
self.assertTrue(mom0 is not None)
mom1 = rms_opt.get_slot(var1, "momentum")
self.assertTrue(mom1 is not None)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSProp
for _ in range(3):
self.evaluate(rms_update)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np,
grads0_np,
mg0_np,
rms0_np,
mom0_np,
learning_rate,
centered=centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np,
grads1_np,
mg1_np,
rms1_np,
mom1_np,
learning_rate,
centered=centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/rmsprop_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for binary operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.compat import compat
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class BinaryOpsTest(xla_test.XLATestCase):
"""Test cases for binary operators."""
def _testBinary(self,
op,
a,
b,
expected,
equality_test=None,
rtol=None,
atol=None):
with self.session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
output = op(pa, pb)
result = session.run(output, {pa: a, pb: b})
if equality_test is None:
equality_test = self.assertAllCloseAccordingToType
if rtol is None:
rtol = 1e-15 if a.dtype == np.float64 else 1e-3
if atol is None:
atol = 1e-15 if a.dtype == np.float64 else 1e-6
equality_test(result, expected, rtol=rtol, atol=atol)
def _testSymmetricBinary(self, op, a, b, expected, equality_test=None):
self._testBinary(op, a, b, expected, equality_test)
self._testBinary(op, b, a, expected, equality_test)
def ListsAreClose(self, result, expected, rtol, atol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in range(len(result)):
self.assertAllCloseAccordingToType(
result[i], expected[i], rtol=rtol, atol=atol)
def testFloatOps(self):
for dtype in self.float_types:
if dtype == dtypes.bfloat16.as_numpy_dtype:
a = -1.01
b = 4.1
else:
a = -1.001
b = 4.01
self._testBinary(
lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),
np.array([[[[-1, 2.00009999], [-3, b]]]], dtype=dtype),
np.array([[[[a, 2], [-3.00009, 4]]]], dtype=dtype),
expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
self._testBinary(
gen_math_ops.real_div,
np.array([3, 3, -1.5, -8, 44], dtype=dtype),
np.array([2, -2, 7, -4, 0], dtype=dtype),
expected=np.array(
[1.5, -1.5, -0.2142857, 2, float("inf")], dtype=dtype),
rtol=1e-6,
atol=1e-8)
self._testBinary(
math_ops.atan2,
np.array([0, np.sqrt(2), 1, np.sqrt(2), 0], dtype),
np.array([1, np.sqrt(2), 0, -np.sqrt(2), -1], dtype),
expected=np.array(
[0, np.pi / 4, np.pi / 2, np.pi * 3 / 4, np.pi], dtype=dtype))
self._testBinary(
gen_math_ops.reciprocal_grad,
np.array([4, -3, -2, 1], dtype=dtype),
np.array([5, -6, 7, -8], dtype=dtype),
expected=np.array([-80, 54, -28, 8], dtype=dtype))
self._testBinary(
gen_math_ops.sigmoid_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-60, -36, -14, 0], dtype=dtype))
self._testBinary(
gen_math_ops.rsqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-160, -81, -28, -4], dtype=dtype))
self._testBinary(
gen_math_ops.sqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([0.625, 1, 1.75, 4], dtype=dtype))
self._testBinary(
gen_nn_ops.softplus_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([3.97322869, 2.99258232, 1.99817801, 0.99966466],
dtype=dtype),
rtol=1e-4,
atol=1e-8)
self._testBinary(
gen_nn_ops.softsign_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([0.11111111, 0.06122449, 0.03125, 0.01234568],
dtype=dtype),
rtol=1e-6,
atol=1e-8)
self._testBinary(
gen_math_ops.tanh_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-75, -48, -21, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.elu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, 0, .2, .4], dtype=dtype),
expected=np.array([0.4, 1.2, 2.4, 4, 5, 6], dtype=dtype))
self._testBinary(
gen_nn_ops.selu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, .2, .4, .6], dtype=dtype),
expected=np.array([
1.158099340847, 2.7161986816948, 4.67429802254, 4.202803949422,
5.2535049367774, 6.30420592413
],
dtype=dtype),
rtol=1e-10,
atol=1e-10)
self._testBinary(
gen_nn_ops.relu_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10], dtype=dtype))
self._testBinary(
gen_nn_ops.relu6_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtype),
np.array(
[0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9, 6.1, 10.0], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 0, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.leaky_relu_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9], dtype=dtype),
expected=np.array([0.2, 0.4, 0.6, 0.8, 1, 6, 7, 8, 9, 10],
dtype=dtype),
rtol=1e-8,
atol=1e-8)
self._testBinary(
gen_nn_ops.softmax_cross_entropy_with_logits,
np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=dtype),
np.array([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]], dtype=dtype),
expected=[
np.array([1.44019, 2.44019], dtype=dtype),
np.array([[-0.067941, -0.112856, -0.063117, 0.243914],
[-0.367941, -0.212856, 0.036883, 0.543914]],
dtype=dtype),
],
equality_test=self.ListsAreClose,
rtol=1e-4,
atol=1e-8)
# TODO(b/68813416): Fails with bfloat16.
if dtype != dtypes.bfloat16.as_numpy_dtype:
self._testBinary(
gen_nn_ops.sparse_softmax_cross_entropy_with_logits,
np.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8],
[0.9, 1.0, 1.1, 1.2]],
dtype=dtype),
np.array([2, 1, 7], dtype=np.int32),
expected=[
np.array([1.342536, 1.442536, np.nan], dtype=dtype),
np.array([[0.213838, 0.236328, -0.738817, 0.288651],
[0.213838, -0.763672, 0.261183, 0.288651],
[np.nan, np.nan, np.nan, np.nan]],
dtype=dtype),
],
equality_test=self.ListsAreClose,
rtol=1e-5,
atol=1e-8)
# TF doesn't define these for bf16.
if dtype != dtypes.bfloat16.as_numpy_dtype:
self._testBinary(
gen_math_ops.xdivy,
np.array([0, 4, 3, 2, 1, 0], dtype=dtype),
np.array([0, 5, 6, 7, 8, float("NaN")], dtype=dtype),
expected=np.array([0, 0.8, 0.5, 0.285714, 0.125, 0], dtype=dtype),
rtol=1e-6,
atol=1e-6)
self._testBinary(
gen_math_ops.xlogy,
np.array([0, 4, 3, 2, 1, 0], dtype=dtype),
np.array([0, 5, 6, 7, 8, float("NaN")], dtype=dtype),
expected=np.array([0, 6.437752, 5.375278, 3.89182, 2.079442, 0],
dtype=dtype),
rtol=1e-4,
atol=1e-6)
def testIntOps(self):
for dtype in self.signed_int_types:
self._testBinary(
gen_math_ops.truncate_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -1, 0, -4, 2], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_and,
np.array([0b1, 0b101, 0b1000], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b0, 0b101, 0b1000], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_or,
np.array([0b1, 0b101, 0b1000], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b1, 0b101, 0b1001], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_xor,
np.array([0b1, 0b111, 0b1100], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b1, 0b010, 0b0101], dtype=dtype))
lhs = np.array([0, 5, 3, 14], dtype=dtype)
rhs = np.array([5, 0, 7, 11], dtype=dtype)
self._testBinary(
bitwise_ops.left_shift, lhs, rhs,
expected=np.left_shift(lhs, rhs))
self._testBinary(
bitwise_ops.right_shift, lhs, rhs,
expected=np.right_shift(lhs, rhs))
if dtype in [np.int8, np.int16, np.int32, np.int64]:
lhs = np.array([-1, -5, -3, -14, -2], dtype=dtype)
rhs = np.array([5, 0, 1, 11, 36], dtype=dtype)
# HLO has saturating shift behavior.
bits = np.ceil(
np.log(np.iinfo(dtype).max - np.iinfo(dtype).min) / np.log(2))
expected = [
np.right_shift(l, r) if r < bits else np.sign(l)
for l, r in zip(lhs, rhs)
]
self._testBinary(bitwise_ops.right_shift, lhs, rhs, expected=expected)
def testAdd(self):
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array([1, 2, 0], dtype=dtype),
np.array([10, 20, 0], dtype=dtype),
expected=np.array([11, 22, 0], dtype=dtype))
self._testBinary(
math_ops.add,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([6, 7], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[8], [9]], dtype=dtype))
if dtype not in self.int_types:
self._testBinary(
math_ops.add,
np.array([1.9131952969218875, 1.596299504298079], dtype=dtype),
np.array([1.1137667913355869, 1.7186636469261405], dtype=dtype),
expected=np.array([3.0269620882574744, 3.3149631512242195],
dtype=dtype))
def testMultiply(self):
for dtype in self.numeric_types:
self._testBinary(
math_ops.multiply,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([10, 40], dtype=dtype))
self._testBinary(
math_ops.multiply,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 100], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[70], [14]], dtype=dtype))
if dtype not in self.int_types:
self._testBinary(
math_ops.multiply,
np.array([1.9131952969218875, 1.596299504298079], dtype=dtype),
np.array([1.1137667913355869, 1.7186636469261405], dtype=dtype),
expected=np.array([2.130853387051026, 2.743501927643327],
dtype=dtype),
rtol=1e-14)
def testPow(self):
for dtype in self.float_types:
rtol = 1e-14 if dtype == np.float64 else None
self._testBinary(
math_ops.pow,
dtype(3),
dtype(4),
expected=dtype(81),
rtol=rtol)
self._testBinary(
math_ops.pow,
np.array([1, 2], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype),
rtol=rtol)
self._testBinary(
math_ops.pow,
np.array([10, 4], dtype=dtype),
np.array([2, 3], dtype=dtype),
expected=np.array([100, 64], dtype=dtype),
rtol=rtol)
self._testBinary(
math_ops.pow,
dtype(2),
np.array([3, 4], dtype=dtype),
expected=np.array([8, 16], dtype=dtype),
rtol=rtol)
self._testBinary(
math_ops.pow,
np.array([[2], [3]], dtype=dtype),
dtype(4),
expected=np.array([[16], [81]], dtype=dtype),
rtol=rtol)
def testNumericOps(self):
for dtype in self.numeric_types:
self._testBinary(
math_ops.subtract,
np.array([1, 2, 100], dtype=dtype),
np.array([10, 20, -1], dtype=dtype),
expected=np.array([-9, -18, 101], dtype=dtype))
self._testBinary(
math_ops.subtract,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([4, 3], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[-6], [-5]], dtype=dtype))
# min/max not supported for complex
if dtype not in self.complex_types | {np.uint8, np.int8}:
self._testBinary(
math_ops.maximum,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([10, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[10], [7]], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._testBinary(
math_ops.minimum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([1, 5], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[7], [2]], dtype=dtype))
# Complex support for squared_difference is incidental, see b/68205550
if dtype not in self.complex_types | {np.uint8, np.int8}:
self._testBinary(
math_ops.squared_difference,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([81, 324], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([16, 9], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[36], [25]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[3, 1], [5, 3]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[[[1, 2], [3, 4]]]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[[[3, 1], [5, 3]]]], dtype=dtype))
if np.int64 in self.numeric_types:
self._testBinary(
math_ops.add,
np.array([0xffffffff, 0xfffffffff, 1, 1], dtype=np.int64),
np.array([1, 1, 0xffffffff, 0xfffffffff], dtype=np.int64),
expected=np.array([1 << 32, 1 << 36, 1 << 32, 1 << 36],
dtype=np.int64))
def testNextAfter(self):
for dtype in self.numeric_types:
if dtype in [np.float32, np.float64]:
x = np.array([
-0.0, 0.0, -0.0, +0.0, np.inf, np.inf, -np.inf, -np.inf, 2.0, 2.0,
1.0
],
dtype=dtype)
y = np.array(
[-0.0, 0.0, +0.0, -0.0, 1.0, -1.0, 1.0, -1.0, 2.0, 1.0, 2.0],
dtype=dtype)
expected = np.nextafter(x, y)
# We use assertAllEqual to expose any bugs hidden by relative or
# absolute error tolerances.
def NextAfterEqualityTest(result, expected, rtol, atol):
del rtol
del atol
return self.assertAllEqual(result, expected)
self._testBinary(
math_ops.nextafter,
x,
y,
expected=expected,
equality_test=NextAfterEqualityTest)
def testComplexOps(self):
for dtype in self.complex_types:
ctypes = {np.complex64: np.float32, np.complex128: np.float64}
self._testBinary(
math_ops.complex,
np.array([[[[-1, 2], [2, 0]]]], dtype=ctypes[dtype]),
np.array([[[[2, -3], [0, 4]]]], dtype=ctypes[dtype]),
expected=np.array([[[[-1 + 2j, 2 - 3j], [2, 4j]]]], dtype=dtype))
self._testBinary(
lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),
np.array(
[[[[-1 + 2j, 2.00009999 - 3j], [2 - 3j, 3 + 4.01j]]]],
dtype=dtype),
np.array(
[[[[-1.001 + 2j, 2 - 3j], [2 - 3.00009j, 3 + 4j]]]], dtype=dtype),
expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
self._testBinary(
gen_math_ops.real_div,
np.array([3, 3j, -1.5j, -8, 2 + 3j, 2 + 4j], dtype=dtype),
np.array([2, -2, 7j, -4j, 4 - 6j, 1 + 2j], dtype=dtype),
expected=np.array(
[1.5, -1.5j, -0.2142857, -2j, (2 + 3j) / (4 - 6j), 2],
dtype=dtype))
self._testBinary(
math_ops.pow,
dtype(3 + 2j),
dtype(4 - 5j),
expected=np.power(dtype(3 + 2j), dtype(4 - 5j)))
self._testBinary( # empty rhs
math_ops.pow,
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype))
self._testBinary( # to zero power
math_ops.pow,
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.zeros(shape=[1, 2], dtype=dtype),
expected=np.ones(shape=[1, 2], dtype=dtype))
lhs = np.array([1 - 2j, 4 + 3j, 2 - 3j, 3, 2j, 1, 4], dtype=dtype)
rhs = np.array([2, 3j, 3 + 4j, 2 + 3j, 3 - 2j, 2, 3 + 3j], dtype=dtype)
scalar = dtype(2 + 2j)
self._testBinary(math_ops.pow, lhs, rhs, expected=np.power(lhs, rhs))
self._testBinary(
math_ops.pow, scalar, rhs, expected=np.power(scalar, rhs))
self._testBinary(math_ops.pow, lhs, scalar, np.power(lhs, scalar))
lhs = np.array([4 + 2j, -3 - 1j, 2j, 1], dtype=dtype)
rhs = np.array([5, -6j, 7 - 3j, -8j], dtype=dtype)
self._testBinary(
gen_math_ops.reciprocal_grad, lhs, rhs, expected=-rhs * lhs * lhs)
self._testBinary(
gen_math_ops.sigmoid_grad, lhs, rhs, expected=rhs * lhs * (1 - lhs))
self._testBinary(
gen_math_ops.rsqrt_grad, lhs, rhs, expected=lhs**3 * rhs / -2)
self._testBinary(
gen_math_ops.sqrt_grad, lhs, rhs, expected=rhs / (2 * lhs))
self._testBinary(
gen_math_ops.tanh_grad, lhs, rhs, expected=rhs * (1 - lhs * lhs))
def testComplexMath(self):
for dtype in self.complex_types:
self._testBinary(
math_ops.add,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array([11 - 1j, 22 + 24j], dtype=dtype))
self._testBinary(
math_ops.add,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array([6 - 5j, 7 - 3j], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array([[8 + 3j], [9 + 6j]], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array([-9 + 7j, -18 - 10j], dtype=dtype))
self._testBinary(
math_ops.subtract,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array([4 - 9j, 3 - 11j], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array([[-6 - 7j], [-5 - 4j]], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array(
[(1 + 3j) * (10 - 4j), (2 + 7j) * (20 + 17j)], dtype=dtype))
self._testBinary(
math_ops.multiply,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array(
[(5 - 7j) * (1 + 2j), (5 - 7j) * (2 + 4j)], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array(
[[(7 + 5j) * (1 - 2j)], [(7 + 5j) * (2 + 1j)]], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([8 - 1j, 2 + 16j], dtype=dtype),
np.array([2 + 4j, 4 - 8j], dtype=dtype),
expected=np.array(
[(8 - 1j) / (2 + 4j), (2 + 16j) / (4 - 8j)], dtype=dtype))
self._testBinary(
math_ops.div,
dtype(1 + 2j),
np.array([2 + 4j, 4 - 8j], dtype=dtype),
expected=np.array(
[(1 + 2j) / (2 + 4j), (1 + 2j) / (4 - 8j)], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([2 + 4j, 4 - 8j], dtype=dtype),
dtype(1 + 2j),
expected=np.array(
[(2 + 4j) / (1 + 2j), (4 - 8j) / (1 + 2j)], dtype=dtype))
# TODO(b/68205550): math_ops.squared_difference shouldn't be supported.
self._testBinary(
nn_ops.bias_add,
np.array([[1 + 2j, 2 + 7j], [3 - 5j, 4 + 2j]], dtype=dtype),
np.array([2 + 6j, -1 - 3j], dtype=dtype),
expected=np.array([[3 + 8j, 1 + 4j], [5 + 1j, 3 - 1j]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[[[1 + 4j, 2 - 1j], [3 + 7j, 4]]]], dtype=dtype),
np.array([2 + 1j, -1 + 2j], dtype=dtype),
expected=np.array(
[[[[3 + 5j, 1 + 1j], [5 + 8j, 3 + 2j]]]], dtype=dtype))
def _testDivision(self, dtype):
"""Test cases for division operators."""
self._testBinary(
math_ops.div,
np.array([10, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 10], dtype=dtype))
self._testBinary(
math_ops.div,
dtype(40),
np.array([2, 20], dtype=dtype),
expected=np.array([20, 2], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([[10], [4]], dtype=dtype),
dtype(2),
expected=np.array([[5], [2]], dtype=dtype))
if dtype in [np.float32, np.float64]:
nums = np.arange(-10, 10, .25, dtype=dtype).reshape(80, 1)
divs = np.arange(-3, 3, .25, dtype=dtype).reshape(1, 24)
np_result = np.true_divide(nums, divs)
np_result[:, divs[0] == 0] = 0
self._testBinary(
gen_math_ops.div_no_nan,
nums,
divs,
expected=np_result,
rtol=7e-15 if dtype == np.float64 else None,
atol=3.9e-15 if dtype == np.float64 else None)
if dtype not in self.complex_types: # floordiv unsupported for complex.
self._testBinary(
gen_math_ops.floor_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -2, -1, -5, 2], dtype=dtype))
def testIntDivision(self):
for dtype in self.signed_int_types:
self._testDivision(dtype)
def testFloatDivision(self):
for dtype in self.float_types | self.complex_types:
self._testDivision(dtype)
def _testRemainder(self, dtype):
"""Test cases for remainder operators."""
self._testBinary(
gen_math_ops.floor_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, -1, 6, 0], dtype=dtype))
self._testBinary(
gen_math_ops.truncate_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, 1, -1, 0], dtype=dtype))
def testIntRemainder(self):
for dtype in self.signed_int_types - {np.int8}:
self._testRemainder(dtype)
def testFloatRemainder(self):
for dtype in self.float_types:
self._testRemainder(dtype)
def testLogicalOps(self):
self._testBinary(
math_ops.logical_and,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[False, False], [False, True]], dtype=np.bool))
self._testBinary(
math_ops.logical_or,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[True, True], [False, True]], dtype=np.bool))
def testComparisons(self):
self._testBinary(
math_ops.equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [False]], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [True]], dtype=np.bool))
for greater_op in [math_ops.greater, (lambda x, y: x > y)]:
self._testBinary(
greater_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
greater_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
greater_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [False]], dtype=np.bool))
for greater_equal_op in [math_ops.greater_equal, (lambda x, y: x >= y)]:
self._testBinary(
greater_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [True], [False]], dtype=np.bool))
for less_op in [math_ops.less, (lambda x, y: x < y)]:
self._testBinary(
less_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
less_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
less_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [False], [True]], dtype=np.bool))
if np.int64 in self.numeric_types:
self._testBinary(
less_op,
np.array([[10], [7], [2], [-1]], dtype=np.int64),
np.int64(7),
expected=np.array(
[[False], [False], [True], [True]], dtype=np.bool))
for less_equal_op in [math_ops.less_equal, (lambda x, y: x <= y)]:
self._testBinary(
less_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
less_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
less_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [True]], dtype=np.bool))
def testS64Comparisons(self):
for op in [(lambda x, y: x < y), (lambda x, y: x <= y),
(lambda x, y: x >= y), (lambda x, y: x > y)]:
lhs = np.array(
[
np.int64(0x000000007FFFFFFF),
np.int64(0x000000007FFFFFFF),
np.int64(0x0000000080000000),
np.int64(0x0000000080000000),
np.int64(0x0000000080000001),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFF),
np.int64(0x0000000100000000),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x7ffffffefff00010),
np.int64(0x7ffffffefff00010),
np.int64(-1),
np.int64(-1)
],
dtype=np.int64)
rhs = np.array(
[
np.int64(0x000000007FFFFFFE),
np.int64(0x000000007FFFFFFF),
np.int64(0x000000007FFFFFFF),
np.int64(0x0000000080000000),
np.int64(0x0000000080000001),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFF0001),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFF),
np.int64(0x0000000100000001),
np.int64(0x0000000100000002),
np.int64(0x0000000100000003),
np.int64(0x0000000200000001),
np.int64(0x0000000200000002),
np.int64(0x0000000200000003),
np.int64(0x0000000300000001),
np.int64(0x0000000300000002),
np.int64(0x0000000300000003),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000001),
np.int64(-2),
np.int64(-1)
],
dtype=np.int64)
expected = np.array([op(l, r) for l, r in zip(lhs, rhs)], dtype=np.bool)
self._testBinary(op, lhs, rhs, expected=expected)
def testBroadcasting(self):
"""Tests broadcasting behavior of an operator."""
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array(3, dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([13, 23], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([10, 20], dtype=dtype),
np.array(4, dtype=dtype),
expected=np.array([14, 24], dtype=dtype))
# [1,3] x [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([[10, 20, 30]], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
# [3] * [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([10, 20, 30], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
def testFill(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.fill,
np.array([], dtype=np.int32),
dtype(-42),
expected=dtype(-42))
self._testBinary(
array_ops.fill,
np.array([1, 2], dtype=np.int32),
dtype(7),
expected=np.array([[7, 7]], dtype=dtype))
self._testBinary(
array_ops.fill,
np.array([3, 2], dtype=np.int32),
dtype(50),
expected=np.array([[50, 50], [50, 50], [50, 50]], dtype=dtype))
# Helper method used by testMatMul, testSparseMatMul below.
def _testMatMul(self, op, test_dtypes):
for dtype in test_dtypes:
self._testBinary(
op,
np.array([[-0.25]], dtype=dtype),
np.array([[8]], dtype=dtype),
expected=np.array([[-2]], dtype=dtype))
self._testBinary(
op,
np.array([[100, 10, 0.5]], dtype=dtype),
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
expected=np.array([[123, 354]], dtype=dtype))
self._testBinary(
op,
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
np.array([[100], [10]], dtype=dtype),
expected=np.array([[130], [250], [680]], dtype=dtype))
self._testBinary(
op,
np.array([[1000, 100], [10, 1]], dtype=dtype),
np.array([[1, 2], [3, 4]], dtype=dtype),
expected=np.array([[1300, 2400], [13, 24]], dtype=dtype))
self._testBinary(
op,
np.array([], dtype=dtype).reshape((2, 0)),
np.array([], dtype=dtype).reshape((0, 3)),
expected=np.array([[0, 0, 0], [0, 0, 0]], dtype=dtype))
def testMatMul(self):
self._testMatMul(math_ops.matmul, self.float_types | {np.float64})
for dtype in self.float_types | {np.float64}:
self._testBinary(
math_ops.matmul,
np.array([[3.1415926535897932]], dtype=dtype),
np.array([[2.7182818284590452]], dtype=dtype),
expected=np.array([[8.5397342226735668]], dtype=dtype))
# Edge case with a large range of exponent. Not supported by float16.
if dtype != np.float16:
self._testBinary(
math_ops.matmul,
np.array([[9.4039548065783000e-38]], dtype=dtype),
np.array([[4.5070591730234615e37]], dtype=dtype),
expected=np.array([[4.2384180773686798]], dtype=dtype))
# TODO(phawkins): failing on GPU, no registered kernel.
def DISABLED_testSparseMatMul(self):
# Binary wrappers for sparse_matmul with different hints
def SparseMatmulWrapperTF(a, b):
return math_ops.sparse_matmul(a, b, a_is_sparse=True)
def SparseMatmulWrapperFT(a, b):
return math_ops.sparse_matmul(a, b, b_is_sparse=True)
def SparseMatmulWrapperTT(a, b):
return math_ops.sparse_matmul(a, b, a_is_sparse=True, b_is_sparse=True)
self._testMatMul(math_ops.sparse_matmul, self.float_types)
self._testMatMul(SparseMatmulWrapperTF, self.float_types)
self._testMatMul(SparseMatmulWrapperFT, self.float_types)
self._testMatMul(SparseMatmulWrapperTT, self.float_types)
def testBatchMatMul(self):
# Tests with batches of matrices.
for dtype in self.float_types | {np.float64}:
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]]], dtype=dtype),
np.array([[[8]]], dtype=dtype),
expected=np.array([[[-2]]], dtype=dtype))
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]], [[4]]], dtype=dtype),
np.array([[[8]], [[2]]], dtype=dtype),
expected=np.array([[[-2]], [[8]]], dtype=dtype))
self._testBinary(
math_ops.matmul,
np.array([[[[7, 13], [10, 1]], [[2, 0.25], [20, 2]]],
[[[3, 5], [30, 3]], [[0.75, 1], [40, 4]]]],
dtype=dtype),
np.array([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[11, 22], [33, 44]], [[55, 66], [77, 88]]]],
dtype=dtype),
expected=np.array(
[[[[46, 66], [13, 24]], [[11.75, 14], [114, 136]]],
[[[198, 286], [429, 792]], [[118.25, 137.5], [2508, 2992]]]],
dtype=dtype))
self._testBinary(
math_ops.matmul,
np.array([], dtype=dtype).reshape((2, 2, 0)),
np.array([], dtype=dtype).reshape((2, 0, 3)),
expected=np.array([[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
dtype=dtype))
self._testBinary(
math_ops.matmul,
np.array([], dtype=dtype).reshape((0, 2, 4)),
np.array([], dtype=dtype).reshape((0, 4, 3)),
expected=np.array([], dtype=dtype).reshape(0, 2, 3))
# Regression test for b/31472796.
if dtype != np.float16 and hasattr(np, "matmul"):
x = np.arange(0, 3 * 5 * 2 * 7, dtype=dtype).reshape((3, 5, 2, 7))
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_b=True),
x,
x,
expected=np.matmul(x, x.transpose([0, 1, 3, 2])))
def testExpandDims(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.expand_dims,
dtype(7),
np.int32(0),
expected=np.array([7], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([42], dtype=dtype),
np.array([0], dtype=np.int64),
expected=np.array([[42]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([], dtype=dtype),
np.int32(0),
expected=np.array([[]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(0),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(1),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(2),
expected=np.array([[[[1, 2]], [[3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(3),
expected=np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.array([2], dtype=np.int64),
expected=np.array([[[[1, 2]], [[3, 4]]]], dtype=dtype))
def testBatchMatMulBroadcast(self):
"""Tests broadcasting behavior of BatchMatMul."""
with compat.forward_compatibility_horizon(2019, 4, 26):
# [2, 3] @ [1, 3, 4] -> [1, 2, 4]
self._testBinary(
math_ops.matmul,
np.array([[10, 20, 30], [11, 21, 31]], dtype=np.float32),
np.array([[[1, 2, 3, 4], [2, 4, 6, 8], [3, 6, 9, 12]]],
dtype=np.float32),
expected=np.array([[[140, 280, 420, 560], [146, 292, 438, 584]]],
dtype=np.float32))
# [1, 2, 3] @ [3, 4] -> [1, 2, 4]
self._testBinary(
math_ops.matmul,
np.array([[[10, 20, 30], [11, 21, 31]]], dtype=np.float32),
np.array([[1, 2, 3, 4], [2, 4, 6, 8], [3, 6, 9, 12]],
dtype=np.float32),
expected=np.array([[[140, 280, 420, 560], [146, 292, 438, 584]]],
dtype=np.float32))
# [2, 1, 3] @ [3, 1] -> [2, 1, 1]
self._testBinary(
math_ops.matmul,
np.array([[[10, 20, 30]], [[11, 21, 31]]], dtype=np.float32),
np.array([[1], [2], [3]], dtype=np.float32),
expected=np.array([[[140]], [[146]]], dtype=np.float32))
# [2, 1, 3] @ [1, 3] -> [2, 1, 1] (adjoint_b)
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_b=True),
np.array([[[10, 20, 30]], [[11, 21, 31]]], dtype=np.float32),
np.array([[1, 2, 3]], dtype=np.float32),
expected=np.array([[[140]], [[146]]], dtype=np.float32))
# [2, 3, 1] @ [3, 1] -> [2, 1, 1] (adjoint_a)
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_a=True),
np.array([[[10], [20], [30]], [[11], [21], [31]]], dtype=np.float32),
np.array([[1], [2], [3]], dtype=np.float32),
expected=np.array([[[140]], [[146]]], dtype=np.float32))
# [2, 3, 1] @ [1, 3] -> [2, 1, 1] (adjoint_a and adjoint_b)
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_a=True, adjoint_b=True),
np.array([[[10], [20], [30]], [[11], [21], [31]]], dtype=np.float32),
np.array([[1, 2, 3]], dtype=np.float32),
expected=np.array([[[140]], [[146]]], dtype=np.float32))
# [5, 1, 2, 3] @ [1, 7, 3, 4] -> [5, 7, 2, 4]
self._testBinary(
math_ops.matmul,
np.ones([5, 1, 2, 3], dtype=np.float32),
np.ones([1, 7, 3, 4], dtype=np.float32),
expected=np.full([5, 7, 2, 4], 3, dtype=np.float32))
# [4, 5, 1, 2, 3] @ [1, 1, 3, 5] -> [4, 5, 1, 2, 5]
self._testBinary(
math_ops.matmul,
np.full([4, 5, 1, 2, 3], 2., dtype=np.float32),
np.full([1, 1, 3, 5], 3., dtype=np.float32),
expected=np.full([4, 5, 1, 2, 5], 18., dtype=np.float32))
def testPad(self):
for dtype, pad_type in itertools.product(
self.numeric_types, [np.int32, np.int64]):
self._testBinary(
array_ops.pad,
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array(
[[1, 2], [2, 1]], dtype=pad_type),
expected=np.array(
[[0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0],
[0, 0, 4, 5, 6, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
dtype=dtype))
self._testBinary(
lambda x, y: array_ops.pad(x, y, constant_values=7),
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array(
[[0, 3], [2, 1]], dtype=pad_type),
expected=np.array(
[[7, 7, 1, 2, 3, 7],
[7, 7, 4, 5, 6, 7],
[7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7]],
dtype=dtype))
def testSymmetricMirrorPad(self):
mirror_pad = lambda t, paddings: array_ops.pad(t, paddings, "SYMMETRIC")
for dtype in self.numeric_types:
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
],
dtype=dtype),
np.array([[
2,
2,
], [3, 3]], dtype=np.int32),
expected=np.array(
[
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[0, 0], [0, 0]], dtype=np.int32),
expected=np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype))
def testReflectMirrorPad(self):
mirror_pad = lambda t, paddings: array_ops.pad(t, paddings, "REFLECT")
for dtype in self.numeric_types:
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
],
dtype=dtype),
np.array([[
1,
1,
], [2, 2]], dtype=np.int32),
expected=np.array(
[
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1], #
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[0, 0], [0, 0]], dtype=np.int32),
expected=np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9]
],
dtype=dtype),
np.array([[2, 2], [0, 0]], dtype=np.int32),
expected=np.array(
[
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]],
], dtype=dtype),
np.array([[0, 0], [1, 1], [1, 1]], dtype=np.int32),
expected=np.array(
[
[
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
],
[
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
]
],
dtype=dtype))
def testReshape(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.reshape,
np.array([], dtype=dtype),
np.array([0, 4], dtype=np.int32),
expected=np.zeros(shape=[0, 4], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array([[0, 1], [2, 3], [4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 6], dtype=np.int32),
expected=np.array([[0, 1, 2, 3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([6, -1], dtype=np.int32),
expected=np.array([[0], [1], [2], [3], [4], [5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, -1], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
def testSplit(self):
for dtype in self.numeric_types:
for axis in [0, -3]:
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=3, axis=x),
np.int32(axis),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1], [2]]], dtype=dtype),
np.array([[[3], [4]]], dtype=dtype),
np.array([[[5], [6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
for axis in [1, -2]:
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=2, axis=x),
np.int32(axis),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1]], [[3]], [[5]]], dtype=dtype),
np.array([[[2]], [[4]], [[6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
def splitvOp(x, y): # pylint: disable=invalid-name
return array_ops.split(value=y, num_or_size_splits=[2, 3], axis=x)
for axis in [1, -1]:
self._testBinary(
splitvOp,
np.int32(axis),
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
dtype=dtype),
expected=[
np.array([[0, 1], [5, 6]], dtype=dtype),
np.array([[2, 3, 4], [7, 8, 9]], dtype=dtype),
],
equality_test=self.ListsAreClose)
def testTile(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.tile,
np.array([[6], [3], [4]], dtype=dtype),
np.array([2, 0], dtype=np.int32),
expected=np.empty([6, 0], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[6, 3, 4]], dtype=dtype),
np.array([2, 0], dtype=np.int32),
expected=np.empty([2, 0], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[6]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[6, 6]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1], [2]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[1, 1], [2, 2]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array(
[[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2]], dtype=dtype),
np.array([3, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[1, 2],
[1, 2]],
dtype=dtype))
def testTranspose(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.transpose,
np.zeros(shape=[1, 0, 4], dtype=dtype),
np.array([1, 2, 0], dtype=np.int32),
expected=np.zeros(shape=[0, 4, 1], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 0], dtype=np.int32),
expected=np.array([[1, 3], [2, 4]], dtype=dtype))
def testConjugateTranspose(self):
for dtype in self.complex_types:
self._testBinary(
array_ops.conjugate_transpose,
np.zeros(shape=[1, 0, 4], dtype=dtype),
np.array([1, 2, 0], dtype=np.int32),
expected=np.zeros(shape=[0, 4, 1], dtype=dtype))
self._testBinary(
array_ops.conjugate_transpose,
np.array([[1 - 1j, 2 + 2j], [3 - 3j, 4 + 4j]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
expected=np.array([[1 + 1j, 2 - 2j], [3 + 3j, 4 - 4j]], dtype=dtype))
self._testBinary(
array_ops.conjugate_transpose,
np.array([[1 - 1j, 2 + 2j], [3 - 3j, 4 + 4j]], dtype=dtype),
np.array([1, 0], dtype=np.int32),
expected=np.array([[1 + 1j, 3 + 3j], [2 - 2j, 4 - 4j]], dtype=dtype))
def testCross(self):
for dtype in self.float_types:
self._testBinary(
gen_math_ops.cross,
np.zeros((4, 3), dtype=dtype),
np.zeros((4, 3), dtype=dtype),
expected=np.zeros((4, 3), dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([1, 2, 3], dtype=dtype),
np.array([4, 5, 6], dtype=dtype),
expected=np.array([-3, 6, -3], dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([[1, 2, 3], [10, 11, 12]], dtype=dtype),
np.array([[4, 5, 6], [40, 50, 60]], dtype=dtype),
expected=np.array([[-3, 6, -3], [60, -120, 60]], dtype=dtype))
def testBroadcastArgs(self):
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([1], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([5], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 5], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([3, 1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 1], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 1, 5], dtype=np.int32),
np.array([3, 1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 1], dtype=np.int32),
np.array([2, 1, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"Incompatible shapes"):
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([1, 2, 3], dtype=np.int32),
np.array([4, 5, 6], dtype=np.int32),
expected=None)
def testBroadcastTo(self):
for dtype in self.all_types:
x = np.random.randint(0, high=100, size=[2, 3])
self._testBinary(
array_ops.broadcast_to,
x,
np.array([2, 3], dtype=np.int32),
expected=x)
self._testBinary(
array_ops.broadcast_to,
x,
np.array([6, 6], dtype=np.int32),
expected=np.tile(x, [3, 2]))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 4, 3], dtype=np.int32),
expected=np.tile(x, [7, 2, 1]))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 0, 3], dtype=np.int32),
expected=np.zeros([7, 0, 3], dtype=dtype))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 1, 2, 9], dtype=np.int32),
expected=np.tile(x, [7, 1, 1, 3]))
self._testBinary(
array_ops.broadcast_to,
np.zeros([2, 0], dtype=dtype),
np.array([4, 0], dtype=np.int32),
expected=np.zeros([4, 0], dtype=dtype))
x = np.arange(3).reshape((3, 1, 1, 1)).astype(dtype)
self._testBinary(
array_ops.broadcast_to,
x,
np.array((3, 7, 8, 9), dtype=np.int32),
expected=np.tile(x, (1, 7, 8, 9)))
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/binary_ops_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.svd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test
class SvdOpTest(xla_test.XLATestCase, parameterized.TestCase):
def _compute_usvt(self, s, u, v):
m = u.shape[-1]
n = v.shape[-1]
if m <= n:
v = v[..., :m]
else:
u = u[..., :n]
return np.matmul(u * s[..., None, :], np.swapaxes(v, -1, -2))
def _testSvdCorrectness(self, dtype, shape):
np.random.seed(1)
x_np = np.random.uniform(low=-1.0, high=1.0, size=shape).astype(dtype)
m, n = shape[-2], shape[-1]
_, s_np, _ = np.linalg.svd(x_np)
with self.session() as sess:
x_tf = array_ops.placeholder(dtype)
with self.test_scope():
s, u, v = linalg_ops.svd(x_tf, full_matrices=True)
s_val, u_val, v_val = sess.run([s, u, v], feed_dict={x_tf: x_np})
u_diff = np.matmul(u_val, np.swapaxes(u_val, -1, -2)) - np.eye(m)
v_diff = np.matmul(v_val, np.swapaxes(v_val, -1, -2)) - np.eye(n)
# Check u_val and v_val are orthogonal matrices.
self.assertLess(np.linalg.norm(u_diff), 1e-2)
self.assertLess(np.linalg.norm(v_diff), 1e-2)
# Check that the singular values are correct, i.e., close to the ones from
# numpy.lingal.svd.
self.assertLess(np.linalg.norm(s_val - s_np), 1e-2)
# The tolerance is set based on our tests on numpy's svd. As our tests
# have batch dimensions and all our operations are on float32, we set the
# tolerance a bit larger. Numpy's svd calls LAPACK's svd, which operates
# on double precision.
self.assertLess(
np.linalg.norm(self._compute_usvt(s_val, u_val, v_val) - x_np), 2e-2)
# Check behavior with compute_uv=False. We expect to still see 3 outputs,
# with a sentinel scalar 0 in the last two outputs.
with self.test_scope():
no_uv_s, no_uv_u, no_uv_v = gen_linalg_ops.svd(
x_tf, full_matrices=True, compute_uv=False)
no_uv_s_val, no_uv_u_val, no_uv_v_val = sess.run(
[no_uv_s, no_uv_u, no_uv_v], feed_dict={x_tf: x_np})
self.assertAllClose(no_uv_s_val, s_val, atol=1e-4, rtol=1e-4)
self.assertEqual(no_uv_u_val, 0.0)
self.assertEqual(no_uv_v_val, 0.0)
SIZES = [1, 2, 5, 10, 32, 64]
DTYPES = [np.float32]
PARAMS = itertools.product(SIZES, DTYPES)
@parameterized.parameters(*PARAMS)
def testSvd(self, n, dtype):
for batch_dims in [(), (3,)] + [(3, 2)] * (n < 10):
self._testSvdCorrectness(dtype, batch_dims + (n, n))
self._testSvdCorrectness(dtype, batch_dims + (2 * n, n))
self._testSvdCorrectness(dtype, batch_dims + (n, 2 * n))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/svd_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple LSTM layer with benchmarks.
This sets up a simple LSTM (Long Short Term Memory) layer, unrolled to a fixed
length sequence. The only deviation from standard LSTM cells is that
activations are clipped, inspired by the GNMT machine translation model.
The GNMT paper has more details: https://arxiv.org/abs/1609.08144
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
def Clip(x):
"""Clips x to the range [-1., 1.]."""
return math_ops.maximum(math_ops.minimum(x, 1.), -1.)
def LSTMCellWeightsShape(num_inputs, num_nodes):
"""Returns the shape of the weights for a single LSTM cell."""
# Dimension 0 accounts for combining x with the previous m state.
# Dimension 1 accounts for the in value and the (in, forget, out) gates.
return [num_inputs + num_nodes, 4 * num_nodes]
def LSTMCell(weights, m_prev, c_prev, x, pad):
"""Unrolls a single LSTM cell with clipped activations forward by one step.
Args:
weights: Weight matrix with shape LSTMCellWeightsShape.
m_prev: Previous m states with shape [batch_size, num_nodes].
c_prev: Previous c states with shape [batch_size, num_nodes].
x: Input with shape [batch_size, num_inputs].
pad: Padding with shape [batch_size, 1]. Each padding value is either
0 or 1, where 1 indicates padding; i.e. the input is shorter than the
sequence length, and the (m, c) states should simply be passed through
from the previous states.
Returns:
The next (m, c) states, each with shape [batch_size, num_nodes].
"""
# Apply weights to the input and previous hidden state.
# The matmul here is the "big" operation.
xm = array_ops.concat([x, m_prev], 1)
xmw = math_ops.matmul(xm, weights)
# Element-wise ops for the standard LSTM cell, with clipped activations.
# XLA can fuse these operations into a single loop.
in_value, in_gate, forget_gate, out_gate = array_ops.split(
value=xmw, num_or_size_splits=4, axis=1)
in_value = math_ops.tanh(in_value)
in_gate = math_ops.sigmoid(in_gate)
forget_gate = math_ops.sigmoid(forget_gate)
out_gate = math_ops.sigmoid(out_gate)
c_next = Clip(Clip(forget_gate * c_prev) + Clip(in_gate * in_value))
m_next = Clip(out_gate * c_next)
# Account for padding.
c_next = c_prev * pad + c_next * (1.0 - pad)
m_next = m_prev * pad + m_next * (1.0 - pad)
return m_next, c_next
def LSTMLayer(cell_name, weights, m, c, x_seq, pad_seq):
"""Unrolls a layer of LSTM cells forward by the sequence length.
The sequence length is determined by the length of x_seq and pad_seq, which
must be the same.
Args:
cell_name: Base name of each cell.
weights: Weight matrix with shape LSTMCellWeightsShape.
m: Initial m states with shape [batch_size, num_nodes].
c: Initial c states with shape [batch_size, num_nodes].
x_seq: List of inputs, each with shape [batch_size, num_inputs].
The length of the list is the sequence length.
pad_seq: List of paddings, each with shape [batch_size, 1].
The length of the list is the sequence length.
Each padding value is either 0 or 1, where 1 indicates padding;
i.e. the input is shorter than the sequence length.
Returns:
List of per-sequence-step outputs, each with shape [batch_size, num_nodes].
Raises:
ValueError: If len(x_seq) != len(pad_seq).
"""
if len(x_seq) != len(pad_seq):
raise ValueError('length of x_seq(%d) != pad_seq(%d)' %
(len(x_seq), len(pad_seq)))
out_seq = []
for seq in range(len(x_seq)):
with ops.name_scope('%s_%d' % (cell_name, seq)):
m, c = LSTMCell(weights, m, c, x_seq[seq], pad_seq[seq])
out_seq.append(array_ops.identity(m, name='out'))
return out_seq
def RandomVar(shape, name=None):
"""Returns a variable of the given shape initialized to random values."""
return variables.VariableV1(
random_ops.random_uniform(shape), dtype=dtypes.float32, name=name)
def RandomInputs(batch_size, seq_length, num_inputs):
"""Returns randomly initialized (x_seq, pad_seq) sequences."""
x_seq = []
pad_seq = []
with ops.name_scope('inputs'):
for seq in range(seq_length):
x_seq.append(RandomVar([batch_size, num_inputs], name='x_seq_%d' % seq))
# Real padding values are always a sequence of 0 followed by a
# sequence of 1, but random values are fine for benchmarking.
pad_seq.append(RandomVar([batch_size, 1], name='pad_seq_%d' % seq))
return x_seq, pad_seq
def BuildLSTMLayer(batch_size, seq_length, num_inputs, num_nodes):
"""Builds a single LSTM layer with random weights and inputs.
Args:
batch_size: Inputs are fed in batches of this size.
seq_length: The sequence length to unroll the LSTM layer.
num_inputs: Dimension of inputs that are fed into each LSTM cell.
num_nodes: The number of nodes in each LSTM cell.
Returns:
(out_seq, weights) pair. The out_seq is a list of per-sequence-step
outputs, each with shape [batch_size, num_nodes]. The weights are a list of
weight variables that may be trained.
"""
weights = RandomVar(
LSTMCellWeightsShape(num_inputs, num_nodes), name='weights')
m = array_ops.zeros([batch_size, num_nodes], name='init_m')
c = array_ops.zeros([batch_size, num_nodes], name='init_c')
x_seq, pad_seq = RandomInputs(batch_size, seq_length, num_inputs)
out_seq = LSTMLayer('lstm', weights, m, c, x_seq, pad_seq)
return out_seq, [weights]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/lstm.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the LSTM cell and layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import numpy as np
from tensorflow.compiler.tests import lstm
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _DumpGraph(graph, basename):
if FLAGS.dump_graph_dir:
name = os.path.join(FLAGS.dump_graph_dir, basename + '.pbtxt')
with open(name, 'w') as f:
f.write(str(graph.as_graph_def()))
def _Sigmoid(x):
return 1. / (1. + np.exp(-x))
def _Clip(x):
return np.maximum(np.minimum(x, 1.), -1.)
class LSTMTest(test.TestCase):
def setUp(self):
# The tests for a single LSTM cell and LSTM layer use these values as
# inputs. We always set the dimensionality of num_inputs=1; thus batch_size
# actually represents the different input cases.
self._inputs = np.array([[-1.], [-.5], [0.], [.5], [1.]], np.float32)
self._batch_size = len(self._inputs)
def _NextC(self, inputs, weight, m_prev, c_prev):
"""Returns the next c states of an LSTM cell."""
x = (inputs + m_prev) * weight
return _Clip(_Clip(_Sigmoid(x) * c_prev) + _Clip(_Sigmoid(x) * np.tanh(x)))
def _NextM(self, inputs, weight, m_prev, c_prev):
"""Returns the next m states of an LSTM cell."""
x = (inputs + m_prev) * weight
return _Clip(_Sigmoid(x) * self._NextC(inputs, weight, m_prev, c_prev))
def _RunLSTMCell(self, basename, init_weights, m_prev_scalar, c_prev_scalar,
pad_scalar):
with self.session() as sess:
num_inputs = 1
num_nodes = 1
weights = init_weights(lstm.LSTMCellWeightsShape(num_inputs, num_nodes))
m_prev = constant_op.constant([[m_prev_scalar]] * self._batch_size)
c_prev = constant_op.constant([[c_prev_scalar]] * self._batch_size)
x = constant_op.constant(self._inputs)
pad = constant_op.constant([[pad_scalar]] * self._batch_size)
m, c = lstm.LSTMCell(weights, m_prev, c_prev, x, pad)
_DumpGraph(sess.graph, 'lstm_cell_%s_%d_%d_%d' %
(basename, m_prev_scalar, c_prev_scalar, pad_scalar))
# Initialize variables and run the unrolled LSTM step.
self.evaluate(variables.global_variables_initializer())
return self.evaluate([m, c])
def testLSTMCell(self):
# Run with all-0 weights, no padding.
m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 0., 0., 0.)
self.assertAllClose(m, [[0.]] * self._batch_size)
self.assertAllClose(c, [[0.]] * self._batch_size)
m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 0., 1., 0.)
self.assertAllClose(m, [[.25]] * self._batch_size)
self.assertAllClose(c, [[.5]] * self._batch_size)
m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 1., 0., 0.)
self.assertAllClose(m, [[.0]] * self._batch_size)
self.assertAllClose(c, [[.0]] * self._batch_size)
m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 1., 1., 0.)
self.assertAllClose(m, [[.25]] * self._batch_size)
self.assertAllClose(c, [[.5]] * self._batch_size)
# Run with all-1 weights, no padding.
for m_prev in [0., 1.]:
for c_prev in [0., 1.]:
m, c = self._RunLSTMCell('ones',
init_ops.ones_initializer(), m_prev, c_prev,
0.)
self.assertAllClose(m, self._NextM(self._inputs, 1., m_prev, c_prev))
self.assertAllClose(c, self._NextC(self._inputs, 1., m_prev, c_prev))
# Run with random weights.
for weight in np.random.rand(3):
weight_tf = constant_op.constant(weight, dtypes.float32)
random_weight = lambda shape, w=weight_tf: array_ops.fill(shape, w)
# No padding.
for m_prev in [0., 1.]:
for c_prev in [0., 1.]:
m, c = self._RunLSTMCell('random', random_weight, m_prev, c_prev, 0.)
self.assertAllClose(m,
self._NextM(self._inputs, weight, m_prev, c_prev))
self.assertAllClose(c,
self._NextC(self._inputs, weight, m_prev, c_prev))
# Set padding.
for m_prev in [0., 1.]:
for c_prev in [0., 1.]:
m, c = self._RunLSTMCell('random', random_weight, m_prev, c_prev, 1.)
self.assertAllClose(m, [[m_prev]] * self._batch_size)
self.assertAllClose(c, [[c_prev]] * self._batch_size)
def testLSTMLayerErrors(self):
num_inputs = 1
num_nodes = 1
seq_length = 3
weights = array_ops.zeros(lstm.LSTMCellWeightsShape(num_inputs, num_nodes))
m = constant_op.constant([[0.]] * self._batch_size)
c = constant_op.constant([[0.]] * self._batch_size)
x_seq = [constant_op.constant(self._inputs)] * seq_length
pad = constant_op.constant([[0.]] * self._batch_size)
with self.assertRaisesWithPredicateMatch(ValueError, 'length of x_seq'):
lstm.LSTMLayer('lstm', weights, m, c, x_seq, [pad])
with self.assertRaisesWithPredicateMatch(ValueError, 'length of x_seq'):
lstm.LSTMLayer('lstm', weights, m, c, x_seq, [pad] * 2)
with self.assertRaisesWithPredicateMatch(ValueError, 'length of x_seq'):
lstm.LSTMLayer('lstm', weights, m, c, x_seq, [pad] * 4)
def _RunLSTMLayer(self, basename, init_weights, m_init_scalar, c_init_scalar,
pad_scalar):
with self.session() as sess:
num_inputs = 1
num_nodes = 1
seq_length = 3
weights = init_weights(lstm.LSTMCellWeightsShape(num_inputs, num_nodes))
m_init = constant_op.constant([[m_init_scalar]] * self._batch_size)
c_init = constant_op.constant([[c_init_scalar]] * self._batch_size)
x_seq = [constant_op.constant(self._inputs)] * seq_length
pad_seq = [constant_op.constant([[pad_scalar]] * self._batch_size)
] * seq_length
out_seq = lstm.LSTMLayer('lstm', weights, m_init, c_init, x_seq, pad_seq)
_DumpGraph(sess.graph, 'lstm_layer_%s_%d_%d_%d' %
(basename, m_init_scalar, c_init_scalar, pad_scalar))
# Initialize variables and run the unrolled LSTM layer.
self.evaluate(variables.global_variables_initializer())
return self.evaluate(out_seq)
def testLSTMLayer(self):
# Run with all-0 weights, no padding.
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 0., 0., 0.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 0., 1., 0.)
self.assertAllClose(o, [[[.25]] * self._batch_size,
[[.125]] * self._batch_size,
[[.0625]] * self._batch_size])
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 1., 0., 0.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 1., 1., 0.)
self.assertAllClose(o, [[[.25]] * self._batch_size,
[[.125]] * self._batch_size,
[[.0625]] * self._batch_size])
# Run with all-1 weights, no padding.
weight1 = 1.
for m_init in [0., 1.]:
for c_init in [0., 1.]:
o = self._RunLSTMLayer('ones',
init_ops.ones_initializer(), m_init, c_init, 0.)
m0 = self._NextM(self._inputs, weight1, m_init, c_init)
c0 = self._NextC(self._inputs, weight1, m_init, c_init)
self.assertAllClose(o[0], m0)
m1 = self._NextM(self._inputs, weight1, m0, c0)
c1 = self._NextC(self._inputs, weight1, m0, c0)
self.assertAllClose(o[1], m1)
m2 = self._NextM(self._inputs, weight1, m1, c1)
self.assertAllClose(o[2], m2)
# Run with random weights.
for weight in np.random.rand(3):
weight_tf = constant_op.constant(weight, dtypes.float32)
random_weight = lambda shape, w=weight_tf: array_ops.fill(shape, w)
# No padding.
for m_init in [0., 1.]:
for c_init in [0., 1.]:
o = self._RunLSTMLayer('random', random_weight, m_init, c_init, 0.)
m0 = self._NextM(self._inputs, weight, m_init, c_init)
c0 = self._NextC(self._inputs, weight, m_init, c_init)
self.assertAllClose(o[0], m0)
m1 = self._NextM(self._inputs, weight, m0, c0)
c1 = self._NextC(self._inputs, weight, m0, c0)
self.assertAllClose(o[1], m1)
m2 = self._NextM(self._inputs, weight, m1, c1)
self.assertAllClose(o[2], m2)
# Set padding.
o = self._RunLSTMLayer('random', random_weight, 0., 0., 1.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('random', random_weight, 0., 1., 1.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('random', random_weight, 1., 0., 1.)
self.assertAllClose(o, [[[1.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('random', random_weight, 1., 1., 1.)
self.assertAllClose(o, [[[1.]] * self._batch_size] * 3)
class LSTMBenchmark(test.Benchmark):
"""Mcro-benchmarks for a single layer of LSTM cells."""
def _LayerBuilder(self, do_training):
out_seq, weights = lstm.BuildLSTMLayer(FLAGS.batch_size, FLAGS.seq_length,
FLAGS.num_inputs, FLAGS.num_nodes)
name, fetches = ('lstm_layer_inference', out_seq)
if do_training:
# Not a real loss function, but good enough for benchmarking backprop.
loss = math_ops.reduce_sum(math_ops.add_n(out_seq))
dw = gradients_impl.gradients(loss, weights)
name, fetches = ('lstm_layer_training', dw)
_DumpGraph(ops.get_default_graph(),
'%s_%d_%d_%d_%d' % (name, FLAGS.batch_size, FLAGS.seq_length,
FLAGS.num_inputs, FLAGS.num_nodes))
return name, fetches
def benchmarkLayerInference(self):
xla_test.Benchmark(self, lambda: self._LayerBuilder(False), False,
FLAGS.device)
def benchmarkLayerInferenceXLA(self):
xla_test.Benchmark(self, lambda: self._LayerBuilder(False), True,
FLAGS.device)
def benchmarkLayerTraining(self):
xla_test.Benchmark(self, lambda: self._LayerBuilder(True), False,
FLAGS.device)
def benchmarkLayerTrainingXLA(self):
xla_test.Benchmark(self, lambda: self._LayerBuilder(True), True,
FLAGS.device)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--batch_size',
type=int,
default=128,
help="""\
Inputs are fed in batches of this size, for both inference and training.
Larger values cause the matmul in each LSTM cell to have higher
dimensionality.\
"""
)
parser.add_argument(
'--seq_length',
type=int,
default=60,
help="""\
Length of the unrolled sequence of LSTM cells in a layer.Larger values
cause more LSTM matmuls to be run.\
"""
)
parser.add_argument(
'--num_inputs',
type=int,
default=1024,
help='Dimension of inputs that are fed into each LSTM cell.'
)
parser.add_argument(
'--num_nodes',
type=int,
default=1024,
help='Number of nodes in each LSTM cell.'
)
parser.add_argument(
'--device',
type=str,
default='gpu',
help="""\
TensorFlow device to assign ops to, e.g. "gpu", "cpu". For details see
documentation for tf.Graph.device.\
"""
)
parser.add_argument(
'--dump_graph_dir',
type=str,
default='',
help='If non-empty, dump graphs in *.pbtxt format to this directory.'
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
test.main(argv=[sys.argv[0]] + unparsed)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/lstm_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA Concat Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class ConcatTest(xla_test.XLATestCase):
def testHStack(self):
with self.session():
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
with self.test_scope():
c = array_ops.concat([p1, p2], 0)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:4, :], params[p1])
self.assertAllEqual(result[4:, :], params[p2])
def testVStack(self):
with self.session():
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
with self.test_scope():
c = array_ops.concat([p1, p2], 1)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:, :4], params[p1])
self.assertAllEqual(result[:, 4:], params[p2])
def testInt32(self):
with self.session():
p1 = np.random.rand(2, 3).astype("i")
p2 = np.random.rand(2, 3).astype("i")
x1 = constant_op.constant(p1)
x2 = constant_op.constant(p2)
with self.test_scope():
c = array_ops.concat([x1, x2], 0)
result = self.evaluate(c)
self.assertAllEqual(result[:2, :], p1)
self.assertAllEqual(result[2:, :], p2)
def _testRandom(self, dtype):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
# Random number of tensors, but always > 1.
num_tensors = np.random.randint(2, 10)
# Random dim to concat on
concat_dim = np.random.randint(5)
params = {}
if dtype == dtypes.bfloat16:
dtype_feed = dtypes.float32
else:
dtype_feed = dtype
with self.session():
p = []
for i in np.arange(num_tensors):
input_shape = shape
input_shape[concat_dim] = np.random.randint(1, 5)
placeholder = array_ops.placeholder(dtype_feed, shape=input_shape)
p.append(placeholder)
t = dtype_feed.as_numpy_dtype
params[placeholder] = np.random.rand(*input_shape).astype(t)
if dtype != dtype_feed:
concat_inputs = [math_ops.cast(p_i, dtype) for p_i in p]
else:
concat_inputs = p
with self.test_scope():
c = array_ops.concat(concat_inputs, concat_dim)
if dtype != dtype_feed:
c = math_ops.cast(c, dtype_feed)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
ind = [slice(0, params[p[i]].shape[j]) for j in np.arange(5)]
ind[concat_dim] = slice(cur_offset,
cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
if dtype == dtype_feed:
self.assertAllEqual(result[ind], params[p[i]])
else:
self.assertAllClose(result[ind], params[p[i]], 0.01)
def testRandom(self):
self._testRandom(dtypes.float32)
self._testRandom(dtypes.int32)
def _testGradientsSimple(self):
with self.session():
inp = []
inp_tensors = []
with self.test_scope():
for x in [1, 2, 6]:
shape = [10, x, 2]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 1)
output_shape = [10, 9, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 1)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
def testGradientsSimpleAll(self):
self._testGradientsSimple()
def _testGradientsFirstDim(self):
with self.session():
inp = []
inp_tensors = []
with self.test_scope():
for x in [1, 2, 6]:
shape = [x, 10, 2]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 0)
output_shape = [9, 10, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 0)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
def testGradientsFirstDimAll(self):
self._testGradientsFirstDim()
def _testGradientsLastDim(self):
with self.session():
inp = []
inp_tensors = []
with self.test_scope():
for x in [1, 2, 6]:
shape = [10, 2, x]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 2)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
def testGradientsLastDimAll(self):
self._testGradientsLastDim()
def _RunAndVerifyGradientsRandom(self):
# Random dims of rank 5
input_shape = np.random.randint(1, 5, size=5)
# Random number of tensors
num_tensors = np.random.randint(1, 10)
# Random dim to concat on
concat_dim = np.random.randint(5)
concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)
with self.session():
inp = []
inp_tensors = []
with self.test_scope():
for x in concat_dim_sizes:
shape = input_shape
shape[concat_dim] = x
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, concat_dim)
output_shape = input_shape
output_shape[concat_dim] = concat_dim_sizes.sum()
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, concat_dim)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
def testGradientsRandom(self):
for _ in range(5):
self._RunAndVerifyGradientsRandom()
# Re-enable once zero-element Retvals are handled correctly.
def DISABLED_testZeroSize(self):
# Verify that concat doesn't crash and burn for zero size inputs
np.random.seed(7)
with self.session():
with self.test_scope():
for shape0 in (), (2,):
axis = len(shape0)
for shape1 in (), (3,):
for n0 in 0, 1, 2:
for n1 in 0, 1, 2:
x0 = np.random.randn(*(shape0 + (n0,) + shape1))
x1 = np.random.randn(*(shape0 + (n1,) + shape1))
correct = np.concatenate([x0, x1], axis=axis)
# TODO(irving): Make tf.concat handle map, then drop list().
xs = list(map(constant_op.constant, [x0, x1]))
c = array_ops.concat(xs, axis)
self.assertAllEqual(c.eval(), correct)
# Check gradients
dc = np.random.randn(*c.get_shape().as_list())
dxs = self.evaluate(gradients_impl.gradients(c, xs, dc))
self.assertAllEqual(dc, np.concatenate(dxs, axis=axis))
def testConcatTuple(self):
c1 = np.random.rand(4, 4).astype(np.float32)
c2 = np.random.rand(4, 4).astype(np.float32)
with self.session():
with self.test_scope():
concat_list_t = array_ops.concat([c1, c2], 0)
concat_tuple_t = array_ops.concat((c1, c2), 0)
self.assertAllEqual(concat_list_t.eval(), self.evaluate(concat_tuple_t))
def testConcatNoScalars(self):
with self.session():
with self.test_scope():
scalar = constant_op.constant(7)
dim = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegexp(
ValueError, r"Can't concatenate scalars \(use tf\.stack instead\)"):
array_ops.concat([scalar, scalar, scalar], dim)
# The purpose of this is to ensure that XLA on GPU will not run out of memory
# with too many arguments.
def testConcatLargeNumberOfTensors(self):
if "CPU" in self.device:
self.skipTest("This test can time out on CPU, so we will just allow "
"other backends to catch this specific error.")
with self.session():
with self.test_scope():
for concat_dim in range(2):
params = {}
p = []
shape = np.array([7, 13])
num_tensors = 1001
for i in np.arange(num_tensors):
input_shape = shape
placeholder = array_ops.placeholder(
dtypes.float32, shape=input_shape)
p.append(placeholder)
params[placeholder] = np.random.rand(*input_shape).astype(
np.float32)
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
index = [slice(0, params[p[i]].shape[j]) for j in np.arange(2)]
index[concat_dim] = slice(
cur_offset, cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
self.assertAllEqual(result[index], params[p[i]])
class ConcatOffsetTest(xla_test.XLATestCase):
def testBasic(self):
with self.session():
with self.test_scope():
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = self.evaluate(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
class PackTest(xla_test.XLATestCase):
def testBasic(self):
with self.session():
with self.test_scope():
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
packed = array_ops.stack([s0, s1, s2])
ans = self.evaluate(packed)
self.assertAllEqual(ans, [[2, 3, 5], [2, 7, 5], [2, 20, 5]])
def testScalars(self):
with self.session():
with self.test_scope():
s0 = constant_op.constant(2, dtypes.int32)
s1 = constant_op.constant(3, dtypes.int32)
s2 = constant_op.constant(5, dtypes.int32)
packed = array_ops.stack([s0, s1, s2])
ans = self.evaluate(packed)
self.assertAllEqual(ans, [2, 3, 5])
def testEmpty(self):
with self.session():
with self.test_scope():
s0 = constant_op.constant([[]], dtypes.int32)
s1 = constant_op.constant([[]], dtypes.int32)
s2 = constant_op.constant([[]], dtypes.int32)
packed = array_ops.stack([s0, s1, s2])
ans = self.evaluate(packed)
self.assertAllEqual(ans, [[[]], [[]], [[]]])
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/concat_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA TensorArray Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.compiler.xla import xla
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _make_converter(dtype):
def _converter(x):
return np.asarray(x).astype(dtype.as_numpy_dtype)
return _converter
# This lets me define `fn` repeatedly to pass to xla.compile.
#
# pylint: disable=function-redefined
@test_util.with_control_flow_v2
class TensorArrayTest(xla_test.XLATestCase):
@test_util.disable_control_flow_v2("Tries to evaluate flow")
def testTensorArrayWriteRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0, 3.0]])
w2 = w1.write(2, [[7.0, -8.5]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
flow = w2.flow
return [r0, r1, r2, flow]
d0, d1, d2, flow_val = self.evaluate(xla.compile(fn))
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0, 3.0]], d1)
self.assertAllEqual([[7.0, -8.5]], d2)
self.assertAllEqual([], flow_val.shape)
def _testTensorArrayWritePack(self, tf_dtype):
with self.session(), self.test_scope():
convert = _make_converter(tf_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
return w2.stack()
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]),
self.evaluate(xla.compile(fn)[0]))
def testTensorArrayWritePack(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWritePack(dtype)
def testEmptyTensorArrayPack(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
empty_element = np.zeros((0, 1), dtype=np.float32)
w0 = ta.write(0, empty_element)
w1 = w0.write(1, empty_element)
w2 = w1.write(2, empty_element)
return w2.stack()
self.assertAllEqual([3, 0, 1], self.evaluate(xla.compile(fn)[0]).shape)
def _testTensorArrayWriteConcat(self, tf_dtype):
with self.session(), self.test_scope():
convert = _make_converter(tf_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0], [204.0, 205.0]]))
return w2.concat()
self.assertAllEqual(
convert([[4.0, 5.0], [104.0, 105.0], [6.0, 7.0], [106.0, 107.0],
[8.0, 9.0], [204.0, 205.0]]),
self.evaluate(xla.compile(fn)[0]))
@test_util.disable_control_flow_v2("b/122315751 (concat)")
def testTensorArrayWriteConcat(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWriteConcat(dtype)
def _testTensorArrayUnpackRead(self, tf_dtype):
with self.session() as session, self.test_scope():
convert = _make_converter(tf_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Unpack a vector into scalars
w0 = ta.unstack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Unpack a matrix into vectors.
w1 = ta.unstack(
convert([[1.0, 1.03125], [2.0, 2.03125], [3.0, 3.03125]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([1.0, 1.03125]), d0)
self.assertAllEqual(convert([2.0, 2.03125]), d1)
self.assertAllEqual(convert([3.0, 3.03125]), d2)
def fn():
# Reset ta because we're going to change the shape, else shape
# inference will throw an error.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Try unpacking an empty matrix, which should not cause an error.
w2 = ta.unstack(convert([[], [], []]))
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def _testTensorArrayUnpackReadMaybeLegacy(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayUnpackRead(dtype)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy()
def _testTensorArraySplitRead(self, tf_dtype):
with self.session() as session, self.test_scope():
convert = _make_converter(tf_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Split an empty vector.
lengths = constant_op.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def fn():
# Split a vector.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
lengths = constant_op.constant([1, 1, 1])
w0 = ta.split(convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([1.0]), d0)
self.assertAllEqual(convert([2.0]), d1)
self.assertAllEqual(convert([3.0]), d2)
def fn():
# Split a matrix.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
lengths = constant_op.constant([1, 1, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]),
lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([[1.0, 101.0]]), d0)
self.assertAllEqual(convert([[2.0, 201.0]]), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
@test_util.disable_control_flow_v2("b/122315872 (split)")
def testTensorArraySplitRead(self):
for dtype in self.numeric_tf_types:
self._testTensorArraySplitRead(dtype)
@test_util.disable_control_flow_v2("TensorArray.grad is not supported in v2")
def testTensorGradArrayWriteRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, [[-3.0]])
g_ta = w2.grad("grad")
g_w0 = g_ta.write(0, [[5.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, [[-2.0]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
return [r0, r1, r2, g_r0, g_r1, g_r2]
d0, d1, d2, g_d0, g_d1, g_d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual([[4.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual([[-3.0]], d2)
self.assertAllEqual([[5.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual([[-2.0]], g_d2)
@test_util.disable_control_flow_v2("TensorArray.grad is not supported in v2")
def testTensorGradArrayDynamicWriteRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, [[-3.0]])
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, [[-2.0]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
return [r0, r1, r2, g_r0, g_r1, g_r2, s, g_s]
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = self.evaluate(xla.compile(fn))
self.assertAllEqual([[4.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual([[-3.0]], d2)
self.assertAllEqual([[5.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual([[-2.0]], g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
@test_util.disable_control_flow_v2("TensorArray.grad is not supported in v2")
def testTensorGradAccessTwiceReceiveSameObject(self):
with self.session() as session, self.test_scope():
ta_out = {}
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
element_shape=[1, 2])
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
ta_out[0] = g_ta_0.handle
ta_out[1] = g_ta_1.handle
with ops.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
with ops.control_dependencies([g_ta_0.handle.op, g_ta_1.handle.op]):
return [r1_0]
[d_r1_0] = self.evaluate(xla.compile(fn))
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
# Can't assert this because adding a side output like we have here fails
# as follows:
#
# ValueError: Operation u'TensorArrayGrad/TensorArrayGradV3' has been
# marked as not fetchable.
#
# On the other hand, legitimately returning the handle from the
# xla.compile function fails because we don't support DT_RESOURCE outputs
# from XLA clusters.
#
# self.assertAllEqual(ta_out[0], ta_out[1])
@test_util.disable_control_flow_v2("b/124334470")
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
return ta.write(-1, constant_op.constant(7)).flow
# Test writing the wrong datatype.
# TODO(b/129870929): Remove InvalidArgumentError/second regexp after all
# callers provide proper init dtype.
with self.assertRaisesRegexp(
(ValueError, errors.InvalidArgumentError),
r"("
r"conversion requested dtype float32 for Tensor with dtype int32"
r"|"
r"TensorArray dtype is float but op has dtype int32"
r")"):
xla.compile(fn)[0].eval()
@test_util.disable_control_flow_v2("b/124334096 verify dtype")
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
# Find two different floating point types, create an array of
# the first type, but try to read the other type.
if len(self.float_types) > 1:
dtype1, dtype2 = list(self.float_types)[:2]
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtype1, tensor_array_name="foo", size=3)
w0 = ta.write(0, math_ops.cast([[4.0, 5.0]], dtype1))
# Test reading wrong datatype.
return gen_data_flow_ops.tensor_array_read_v3(
handle=w0.handle, index=0, dtype=dtype2, flow_in=w0.flow)
with self.assertRaisesOpError("TensorArray dtype is "):
self.evaluate(xla.compile(fn))
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtype1, tensor_array_name="foo", size=3)
w0 = ta.write(0, math_ops.cast([[4.0, 5.0]], dtype1))
# Test reading from a different index than the one we wrote to
with ops.control_dependencies([w0.read(1)]):
return 1.0
xla.compile(fn)[0].eval()
@test_util.disable_control_flow_v2("b/122315872 (split)")
def testTensorArraySplitIncompatibleShapesFails(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
return ta.split([1.0, 2.0, 3.0], 1).flow
with self.assertRaisesWithPredicateMatch(
ValueError, r"Shape must be rank 1 but is rank 0"):
xla.compile(fn)[0].eval()
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
return ta.split([1.0, 2.0, 3.0], [1, 2, 3]).flow
with self.assertRaisesOpError(
r"lengths must be equal: 1 vs. 2"):
xla.compile(fn)[0].eval()
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
return ta.split(1.0, [1]).flow
with self.assertRaisesOpError(
r"value must have rank >= 1"):
xla.compile(fn)[0].eval()
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
infer_shape=False)
return ta.split([1.0], [1]).flow
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(1 vs. 2\)"):
xla.compile(fn)[0].eval()
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.session(), self.test_scope():
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
ta_grad = w1.grad("grad")
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
return w2_grad.read(2)
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), xla.compile(fn)[0].eval())
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
ta_grad = w1.grad("grad")
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
return wb1_grad.flow
with self.assertRaisesOpError(
r"Mismatched TensorArray sizes"):
xla.compile(fn)[0].eval()
@test_util.disable_control_flow_v2("TensorArray.grad is not supported in v2")
def testTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
def testMultiTensorArray(self):
with self.session(), self.test_scope():
def fn():
h1 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
return r1 + r2
self.assertAllClose(9.0, self.evaluate(xla.compile(fn)[0]))
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.session() as session, self.test_scope():
c = lambda x: np.array(x, dtype=dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.as_dtype(dtype),
tensor_array_name="foo",
size=3,
infer_shape=False)
value_0 = constant_op.constant(c([[4.0, 5.0]]))
value_1 = constant_op.constant(c([[3.0, 3.5]]))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = gradients_impl.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_r0_r0_2 = gradients_impl.gradients(
ys=[r0, r0_2],
xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_just_r1 = gradients_impl.gradients(
ys=[r1], xs=[value_1], grad_ys=[c([[-2.0, -4.0]])])
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r0, r0_2, r1],
xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]),
c([[1.0, -1.0]]),
c([[-2.0, -10.0]])])
return [grad_just_r0, grad_r0_r0_2, grad_just_r1, grad]
[grad_just_r0_vals, grad_r0_r0_2_vals, grad_just_r1_vals,
grad_vals] = self.evaluate(xla.compile(fn))
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
self.assertAllEqual(c([[-2.0, -4.0]]), grad_just_r1_vals[0])
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c([[-2.0, -10.0]]), grad_vals[1])
def testTensorArrayGradientWriteRead(self):
for dtype in self.float_types:
self._testTensorArrayGradientWriteReadType(dtype)
for dtype in self.complex_types:
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self):
with self.session() as sess, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value_0 = constant_op.constant([-1.0, 1.0])
value_1 = constant_op.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.stack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat().
with ops.control_dependencies([p0, r0, s0]):
return gradients_impl.gradients(
ys=[p0, r0, s0],
xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # stack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0], # concat gradient
])
grad_vals = self.evaluate(xla.compile(fn)) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
@test_util.disable_control_flow_v2("b/122315751 (concat)")
def testTensorArrayGradientWritePackConcatAndRead(self):
self._testTensorArrayGradientWritePackConcatAndRead()
def testTensorArrayReadTwice(self):
with self.session(), self.test_scope():
def fn():
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readtwice = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unstack(value)
r0_readtwice = w_readtwice.read(0)
with ops.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
return [r0_readtwice, r1_readtwice]
self.assertAllEqual([1.0, -1.0], self.evaluate(xla.compile(fn))[0])
def _testTensorArrayGradientUnpackRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0).
return gradients_impl.gradients(
ys=[r0, r0_1, r1],
xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = self.evaluate(xla.compile(fn))
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientUnpackRead(self):
self._testTensorArrayGradientUnpackRead()
@test_util.disable_control_flow_v2("b/122315751(concat), b/122315872(split)")
def testTensorArrayGradientSplitConcat(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0],
[100.0, -100.0], [1000.0, -1000.0]])
w = ta.split(value, [2, 2])
r = w.concat()
# Test combined gradients
return gradients_impl.gradients(
ys=[r],
xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0],
[2000.0, -2000.0]]])
grad_vals = self.evaluate(xla.compile(fn))
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, -2.0], [20.0, -20.0], [200.0, -200.0],
[2000.0, -2000.0]],
grad_vals[0])
def testCloseTensorArray(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
with ops.control_dependencies([ta.close()]):
return 1.0
self.evaluate(xla.compile(fn)[0])
def testSizeTensorArray(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
return ta.size()
self.assertAllEqual(3, self.evaluate(xla.compile(fn))[0])
def testWriteCloseTensorArray(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[3.0, 1.0]])
with ops.control_dependencies([w1.close()]):
return 1.0
self.evaluate(xla.compile(fn))
# TODO(phawkins): implement while loops.
# def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
# np_dtype = dtype.as_numpy_dtype
# with self.session() as session, self.test_scope():
# v0 = array_ops.identity(np.arange(3 * 5, dtype=np_dtype).reshape(3, 5))
# var = variables.Variable(np.arange(100, 105, dtype=np_dtype))
# state0 = array_ops.identity(np.array([1] * 5, dtype=np_dtype))
# ta = tensor_array_ops.TensorArray(
# dtype=dtype,
# tensor_array_name="foo",
# size=0 if dynamic_size else 3,
# dynamic_size=dynamic_size)
# time_0 = array_ops.identity(0)
# def body(time, ta_t, state):
# sliced = array_ops.slice(
# v0, begin=array_ops.stack([time, 0]), size=[1, -1])
# sliced = array_ops.squeeze(sliced)
# out = sliced + var + state
# state += sliced
# ta_t = ta_t.write(time, out)
# return (time + 1, ta_t, state)
# (unused_0, h_final, unused_2) = control_flow_ops.while_loop(
# cond=lambda time, unused_1, unused_2: time < 3,
# body=body,
# loop_vars=(time_0, ta, state0),
# shape_invariants=(time_0.get_shape(), tensor_shape.unknown_shape(),
# tensor_shape.unknown_shape()),
# parallel_iterations=3)
# vout = h_final.stack()
# grad_val = -np.arange(3 * 5, dtype=np_dtype).reshape(3, 5)
# v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]
# state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
# var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
# variables.global_variables_initializer().run()
# state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
# self.evaluate([state0, var, v0, vout, v0_grad, var_grad, state0_grad])
# )
# just_v0_grad_t, = self.evaluate([v0_grad])
# # state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# # vout = [ v0[0] + var + state[0] |
# # v0[1] + var + state[1] |
# # v0[2] + var + state[2] ]
# # = [ v0[0] + var + state0 |
# # v0[1] + var + state0 + v0[0] |
# # v0[2] + var + state0 + v0[0] + v0[1] ]
# #
# # d(vout[0])/d(v0) = [1 | 0 | 0 ]
# # d(vout[1])/d(v0) = [1 | 1 | 0 ]
# # d(vout[2])/d(v0) = [1 | 1 | 1 ]
# # d(vout)/d(var) = [1 | 1 | 1]
# # d(vout)/d(state0) = [ 1 | 1 | 1 ]
# state_per_time = np.array(
# [state0_t, state0_t + v0_t[0, :],
# state0_t + v0_t[0, :] + v0_t[1, :]])
# # Compare forward prop
# self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# # Compare backward prop
# expected_v0_grad_t = np.array([
# grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
# grad_val[1, :] + grad_val[2, :], grad_val[2, :]
# ])
# self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
# self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
# self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
# self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
# def testWhileLoopWritePackGradients(self):
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=dtypes.float32)
# # TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# # self._testWhileLoopWritePackGradients(
# # dynamic_size=False, dtype=tf.int64)
# def testWhileLoopDynamicWritePackGradients(self):
# self._testWhileLoopWritePackGradients(
# dynamic_size=True, dtype=dtypes.float32)
# def testGradSerialTwoLoops(self):
# with self.session(), self.test_scope():
# num_steps = 100
# acc = tensor_array_ops.TensorArray(
# dtype=dtypes.float32,
# size=num_steps,
# clear_after_read=False,
# element_shape=tensor_shape.scalar())
# i = constant_op.constant(0, name="i")
# x = constant_op.constant(2.0, name="x")
# c = lambda i, acc: i < 5
# def b(i, acc):
# x1 = control_flow_ops.cond(
# math_ops.equal(i, 0), lambda: x,
# lambda: math_ops.multiply(acc.read(i - 1), 2.0))
# return i + 1, acc.write(i, x1)
# i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])
# z = constant_op.constant(0.0)
# def fn(i, acc):
# return i + 1, acc.write(i, z)
# _, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,
# [i1, acc1])
# r = acc2.stack()
# grad = gradients_impl.gradients(r, [x])[0]
# self.assertAllClose(31.0, self.evaluate(grad))
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.session() as session, self.test_scope():
g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
def fn():
a = array_ops.identity(
np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
b = array_ops.identity(
np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (
ta.read(0, name="read_a_0") + # a + b
ta.read(1, name="read_b_0"))
grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
return [grad_a, grad_b]
grad_a, grad_b = xla.compile(fn)
# Test gradients calculated individually
grad_a_t, = self.evaluate([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = self.evaluate([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly.
joint_grad_a_t, joint_grad_b_t = self.evaluate([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def testWriteShape(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c0 = constant_op.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
return [c0, r0]
c0, r0 = xla.compile(fn)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = constant_op.constant([6.0, 7.0])
w0 = ta.write(0, c0)
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
return [r0, c1, r1]
[r0, c1, r1] = xla.compile(fn)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, c0)
c2 = constant_op.constant([4.0, 5.0, 6.0])
return w0.write(0, c2).flow
with self.assertRaises(ValueError):
self.evaluate(xla.compile(fn))
def _testGradientWhenNotAllComponentsRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
x = constant_op.constant([2.0, 3.0])
w = ta.unstack(x)
r0 = w.read(0)
# Calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
return gradients_impl.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = self.evaluate(xla.compile(fn))[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
def testGradientWhenNotAllComponentsRead(self):
self._testGradientWhenNotAllComponentsRead()
def _testTensorArrayEvalEmpty(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, infer_shape=False)
return ta.stack()
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError, "Uninitialized TensorArray passed to "
"TensorArrayStack/TensorArrayGatherV3"):
xla.compile(fn)[0].eval()
@test_util.disable_control_flow_v2("b/124335246")
def testTensorArrayEvalEmpty(self):
self._testTensorArrayEvalEmpty()
def _testTensorArrayEvalEmptyWithDefault(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, infer_shape=True)
size = ta.size()
ta = ta.unstack(array_ops.zeros([0, 3, 5]))
return [size, ta.stack()]
[size, stack] = self.evaluate(xla.compile(fn))
self.assertEqual(0, size)
self.assertAllEqual([0, 3, 5], stack.shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, infer_shape=True)
ta = ta.unstack(array_ops.zeros([0, 3, 5]))
return ta.concat()
# TODO(b/122315751): Enable this.
self.assertAllEqual([0, 5], self.evaluate(xla.compile(fn))[0].shape)
def testTensorArrayEvalEmptyWithDefault(self):
self._testTensorArrayEvalEmptyWithDefault()
def _testTensorArrayScatterRead(self, tf_dtype):
with self.session() as session, self.test_scope():
convert = _make_converter(tf_dtype)
id0 = array_ops.placeholder(dtypes.int32)
id1 = array_ops.placeholder(dtypes.int32)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=10)
indices = constant_op.constant([1, 8])
value = constant_op.constant(convert([[1.0, -1.0], [10.0, -10.0]]))
w = ta.scatter(indices, value)
r0 = w.read(id0)
r1 = w.read(id1)
return [r0, r1]
# Test aggregation of read
read_vals = session.run(xla.compile(fn), feed_dict={id0: 1, id1: 8})
self.assertAllEqual(convert([1.0, -1.0]), read_vals[0])
self.assertAllEqual(convert([10.0, -10.0]), read_vals[1])
@test_util.disable_control_flow_v2("b/122315734 (scatter)")
def testTensorArrayScatterRead(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayScatterRead(dtype)
self._testTensorArrayScatterRead(dtypes.bool)
@test_util.disable_control_flow_v2("b/122315734 (scatter)")
def testTensorArrayScatterReadAndGradients(self):
with self.session() as session, self.test_scope():
id0 = array_ops.placeholder(dtypes.int32)
id1 = array_ops.placeholder(dtypes.int32)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=10)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(id0)
r1 = w.read(id1)
# Test combined gradients + aggregation of read(0).
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
return [[r0, r1], grad]
read_vals, grad_vals = session.run(
xla.compile(fn), feed_dict={
id0: 1,
id1: 8
})
self.assertEqual(len(read_vals), 2)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([1.0, -1.0], read_vals[0])
self.assertAllEqual([10.0, -10.0], read_vals[1])
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
@test_util.disable_control_flow_v2("b/122315378 (gather)")
def testTensorArrayWriteGatherAndGradients(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=10)
values = constant_op.constant([[1.0 * x, -1.0 * x] for x in range(10)])
indices = constant_op.constant([1, 8])
w = ta.unstack(values)
g = w.gather(indices)
# Test combined gradients + aggregation of read(0).
grad = gradients_impl.gradients(
ys=[g], xs=[values], grad_ys=[[[2.0, 3.0], [4.0, 5.0]]])
return [[g], grad]
g_vals, grad_vals = self.evaluate(xla.compile(fn))
# Gradients for 8 of the 10 unread components are zero.
expected_grad = np.zeros((10, 2))
expected_grad[1] = [2.0, 3.0]
expected_grad[8] = [4.0, 5.0]
self.assertEqual(len(g_vals), 1)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[1.0, -1.0], [8.0, -8.0]], g_vals[0])
self.assertAllEqual(expected_grad, grad_vals[0])
def testTensorArrayIdentity(self):
with self.session() as session, self.test_scope():
tensor_arrays = {}
v0 = resource_variable_ops.ResourceVariable(0.0)
v1 = resource_variable_ops.ResourceVariable(0.0)
def fn():
ta0 = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, infer_shape=False)
ta1 = tensor_array_ops.TensorArray(
dtype=dtypes.int32, size=4, infer_shape=True)
ta0 = ta0.write(0, 0.)
ta1 = ta1.write(0, 1)
with ops.control_dependencies([v0.assign_add(1.0)]):
ta0 = ta0.identity()
with ops.control_dependencies([v1.assign_add(1.0)]):
ta1 = ta1.identity()
read0 = ta0.read(0)
read1 = ta1.read(0)
size0 = ta0.size()
size1 = ta1.size()
tensor_arrays[0] = ta0
tensor_arrays[1] = ta1
return [read0, read1, size0, size1, v0, v1]
variables.global_variables_initializer().run()
read0_v, read1_v, size0_v, size1_v, v0, v1 = self.evaluate(
xla.compile(fn))
# Tests correct properties on new TensorArrays.
self.assertEqual(dtypes.float32, tensor_arrays[0].dtype)
self.assertEqual(dtypes.int32, tensor_arrays[1].dtype)
# Tests that the control dependencies was added and executed.
self.assertEqual(1.0, v0)
self.assertEqual(1.0, v1)
# Tests correct TensorArray.
self.assertEqual(read0_v, 0)
self.assertEqual(read1_v, 1)
self.assertEqual(size0_v, 2)
self.assertEqual(size1_v, 4)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/tensor_array_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import math
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
def GenerateNumpyRandomRGB(shape):
# Only generate floating points that are fractions like n / 256, since they
# are RGB pixels. Some low-precision floating point types in this test can't
# handle arbitrary precision floating points well.
return np.random.randint(0, 256, shape) / 256.
class RGBToHSVTest(xla_test.XLATestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in self.float_types:
inp = GenerateNumpyRandomRGB(shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.session() as sess:
batch0 = array_ops.placeholder(nptype, shape=shape)
with self.test_scope():
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
with self.test_scope():
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2],
{batch0: inp})
# Verify that processing batch elements together is the same as separate
self.assertAllCloseAccordingToType(batch1, join1, half_rtol=0.000002)
self.assertAllCloseAccordingToType(batch2, join2, half_rtol=0.000002)
self.assertAllCloseAccordingToType(
batch2, inp, bfloat16_atol=0.03, half_rtol=0.02)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in self.float_types:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv = image_ops.rgb_to_hsv(placeholder)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = rgb.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(rgb_tf, rgb_np, bfloat16_atol=0.03)
def testRGBToHSVNumpy(self):
"""Tests the RGB to HSV conversion matches a reference implementation."""
for nptype in self.float_types:
rgb_flat = GenerateNumpyRandomRGB((64, 3)).astype(nptype)
rgb_np = rgb_flat.reshape(4, 4, 4, 3)
hsv_np = np.array([
colorsys.rgb_to_hsv(
r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))
for r, g, b in rgb_flat
])
hsv_np = hsv_np.reshape(4, 4, 4, 3)
with self.session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv_op = image_ops.rgb_to_hsv(placeholder)
hsv_tf = hsv_op.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(hsv_tf, hsv_np)
class AdjustContrastTest(xla_test.XLATestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_np.shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = image_ops.adjust_contrast(flt_x, contrast_factor)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllClose(y_tf, y_np, 1e-6)
def testFloatContrast(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testBatchContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.session():
x = array_ops.placeholder(np.float32)
with self.test_scope():
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval({x: x_np})
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
class AdjustHueTest(xla_test.XLATestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.session():
x = array_ops.placeholder(dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(x, delta_h)
y_tf = y.eval({x: x_np})
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-4)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class AdjustSaturationTest(xla_test.XLATestCase):
def _adjust_saturation(self, image, saturation_factor):
image = ops.convert_to_tensor(image, name="image")
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
with self.test_scope():
saturation_adjusted_image = gen_image_ops.adjust_saturation(
flt_image, saturation_factor)
return image_ops.convert_image_dtype(saturation_adjusted_image, orig_dtype)
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_rgb_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_rgb_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_rgb_data, dtype=np.uint8).reshape(x_shape)
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
x = array_ops.placeholder(dtypes.float32, shape=x_shape)
with self.test_scope():
y_fused = self._adjust_saturation(x,
scale).eval(feed_dict={x: x_np})
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class ResizeNearestNeighborTest(xla_test.XLATestCase):
# TODO(ilch): Wrap each test with `for dtype in self.float_types:`
# Some work to understand how that should be done was presented here:
# cl/227850213
def _assertForwardOpMatchesExpected(self,
image_np,
target_shape,
expected=None,
large_tolerance=False,
align_corners=True):
if expected is None:
self.fail("expected must be specified")
with self.session() as sess, self.test_scope():
image = array_ops.placeholder(image_np.dtype)
resized = gen_image_ops.resize_nearest_neighbor(
image, target_shape, align_corners=align_corners)
out = sess.run(resized, {image: image_np[np.newaxis, :, :, np.newaxis]})
if large_tolerance:
self.assertAllClose(
expected[np.newaxis, :, :, np.newaxis], out, rtol=2e-4, atol=2e-4)
else:
self.assertAllClose(expected[np.newaxis, :, :, np.newaxis], out)
def testAlignCorners2x2To1x1(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=np.float32), [1, 1],
expected=np.array([[1]], dtype=np.float32))
def testAlignCorners1x1To2x2(self):
self._assertForwardOpMatchesExpected(
np.array([[1]], dtype=np.float32), [2, 2],
expected=np.array([[1, 1], [1, 1]], dtype=np.float32))
def testAlignCorners1x1To3x3(self):
self._assertForwardOpMatchesExpected(
np.array([[1]], dtype=np.float32), [3, 3],
expected=np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.float32))
def testAlignCorners2x2To3x3(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=np.float32), [3, 3],
expected=np.array([[1, 2, 2], [3, 4, 4], [3, 4, 4]], dtype=np.float32))
def testAlignCorners2x2To4x4(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=np.float32), [4, 4],
expected=np.array(
[[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]],
dtype=np.float32), large_tolerance=True)
def testAlignCorners3x3To2x2(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [2, 2],
expected=np.array([[1, 3], [7, 9]], dtype=np.float32))
def testAlignCorners4x4To3x3(self):
self._assertForwardOpMatchesExpected(
np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]],
dtype=np.float32), [3, 3],
expected=np.array([[1, 3, 4], [9, 11, 12], [13, 15, 16]],
dtype=np.float32))
def testAlignCorners3x3To4x4(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [4, 4],
expected=np.array(
[[1, 2, 2, 3], [4, 5, 5, 6], [4, 5, 5, 6], [7, 8, 8, 9]],
dtype=np.float32))
def testAlignCorners3x3To6x6(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [6, 6],
expected=np.array(
[[1, 1, 2, 2, 3, 3], [1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6],
[4, 4, 5, 5, 6, 6], [7, 7, 8, 8, 9, 9], [7, 7, 8, 8, 9, 9]],
dtype=np.float32))
def testAlignCorners3x3To9x9(self):
# The expected matrix might look uneven in terms of how many of each number
# there is, but this is an artifact of doing the dilation and convolution
# iteratively. The behavior is less esoteric in the 3x3To12x12 case below.
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [9, 9],
expected=np.array(
[[1, 1, 2, 2, 2, 2, 3, 3, 3], [1, 1, 2, 2, 2, 2, 3, 3, 3],
[4, 4, 5, 5, 5, 5, 6, 6, 6], [4, 4, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 5, 5, 5, 5, 6, 6, 6], [4, 4, 5, 5, 5, 5, 6, 6, 6],
[7, 7, 8, 8, 8, 8, 9, 9, 9], [7, 7, 8, 8, 8, 8, 9, 9, 9],
[7, 7, 8, 8, 8, 8, 9, 9, 9]],
dtype=np.float32))
def testAlignCorners3x3To12x12(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [12, 12],
expected=np.array([[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9]],
dtype=np.float32))
def testAlignCorners3x3To12x12_uint8(self):
# TODO(b/72099414): enable the test for TPU when the issue is fixed.
if (self.device not in ["XLA_GPU", "XLA_CPU"]):
return
# Ensure that resize with convolution works on XLA/GPU for integer types
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.uint8), [12, 12],
expected=np.array([[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9]],
dtype=np.uint8))
class ResizeBilinearTest(parameterized.TestCase, xla_test.XLATestCase):
def _assertForwardOpMatchesExpected(self,
image_np,
target_shape,
expected=None,
large_tolerance=False,
align_corners=True):
if expected is None:
self.fail("expected must be specified")
with self.session() as sess, self.test_scope():
image = array_ops.placeholder(image_np.dtype)
resized = gen_image_ops.resize_bilinear(
image, target_shape, align_corners=align_corners)
out = sess.run(resized, {image: image_np[np.newaxis, :, :, np.newaxis]})
if large_tolerance:
self.assertAllClose(
expected[np.newaxis, :, :, np.newaxis], out, rtol=0.1, atol=0.01)
else:
self.assertAllClose(expected[np.newaxis, :, :, np.newaxis], out)
@parameterized.named_parameters(
("1x2To3x3", 1, 2, 3, 3),
("2x2To1x1", 2, 2, 1, 1),
("2x2To3x3", 2, 2, 3, 3),
("3x3To2x2", 3, 3, 2, 2),
("4x4To3x3", 4, 4, 3, 3),
("3x3To9x9", 3, 3, 9, 9),
("4x4To8x8", 4, 4, 8, 8),
("8x8To16x16", 8, 8, 16, 16),
("64x64To512x512", 64, 64, 512, 512),
("80x80To512x512", 80, 80, 512, 512),
("96x96To512x512", 96, 96, 512, 512),
("112x112To512x512", 112, 112, 512, 512),
("256x48To2048x384", 256, 48, 2048, 384),
("320x60To2048x384", 320, 60, 2048, 384),
("448x84To2048x384", 448, 84, 2048, 384),
("69x69To545x545", 69, 69, 545, 545),
("86x86To545x545", 86, 86, 545, 545),
("103x103To545x545", 103, 103, 545, 545),
("120x120To545x545", 120, 120, 545, 545),
("57x57To456x456", 57, 57, 456, 456),
("72x72To456x456", 72, 72, 456, 456),
("86x86To456x456", 86, 86, 456, 456),
("100x100To456x456", 100, 100, 456, 456),
("64x64To224x224", 64, 64, 224, 224),
("128x128To224x224", 128, 128, 224, 224),
("256x256To224x224", 256, 256, 224, 224),
("512x512To224x224", 512, 512, 224, 224),
("64x64To299x299", 64, 64, 299, 299),
("128x128To299x299", 128, 128, 299, 299),
("256x256To299x299", 256, 256, 299, 299),
# Disable due to OOM on TitanV: nvbug 2838964
#("512x512To299x299", 512, 512, 299, 299),
("224x224To224x224", 224, 224, 224, 224),
# This test is disabled because it is very slow. It is slow because
# 383 is prime, 383 and 2047 are coprime, and 2048 is large.
# ("Disabled_384x72To2048x384", 384, 72, 2048, 384),
)
def test(self, src_y, src_x, dst_y, dst_x):
self.skipTest("Disabled because it runs out of memory")
max_y = max(src_y - 1, 1) * (dst_y - 1) + 1
max_x = max(src_x - 1, 1) * (dst_x - 1) + 1
input_data = [
range(y * max_x, (y + 1) * max_x, max(dst_x - 1, 1))
for y in range(0, max_y, max(dst_y - 1, 1))
]
result = [
range(y * max_x, (y + 1) * max_x, max(src_x - 1, 1))
for y in range(0, max_y, max(src_y - 1, 1))
]
self._assertForwardOpMatchesExpected(
np.array(input_data, dtype=np.float32), [dst_y, dst_x],
expected=np.array(result, dtype=np.float32),
large_tolerance=True)
class ResizeBilinearGradTest(parameterized.TestCase, xla_test.XLATestCase):
def _assertBackwardOpMatchesExpected(self,
grads_np,
input_shape=None,
dtype=None,
expected=None,
large_tolerance=False):
if input_shape is None:
self.fail("input_shape must be specified")
if expected is None:
self.fail("expected must be specified")
with self.session() as sess, self.test_scope():
dtype = dtype or np.float32
grads = array_ops.placeholder(np.float32)
resized = gen_image_ops.resize_bilinear_grad(
grads,
np.zeros([1, input_shape[0], input_shape[1], 1], dtype=dtype),
align_corners=True)
out = sess.run(resized, {grads: grads_np[np.newaxis, :, :, np.newaxis]})
if large_tolerance:
self.assertAllClose(
expected[np.newaxis, :, :, np.newaxis], out, rtol=0.1, atol=0.01)
else:
self.assertAllCloseAccordingToType(
expected[np.newaxis, :, :, np.newaxis], out)
@parameterized.named_parameters(
("1x3To1x3", 1, 2, 1, 3),
("1x2To3x2", 1, 2, 3, 2),
("1x2To3x3", 1, 2, 3, 3),
("1x1To4x1", 1, 1, 4, 1),
("1x1To5x1", 1, 1, 5, 1),
("2x2To1x1", 2, 2, 1, 1),
("2x2To3x3", 2, 2, 3, 3),
("3x3To2x2", 3, 3, 2, 2),
("4x4To3x3", 4, 4, 3, 3),
("3x3To9x9", 3, 3, 9, 9),
("4x4To8x8", 4, 4, 8, 8),
("8x8To16x16", 8, 8, 16, 16),
("2x64To2x512", 2, 64, 2, 512),
("64x64To512x512", 64, 64, 512, 512),
("80x80To512x512", 80, 80, 512, 512),
("96x96To512x512", 96, 96, 512, 512),
("112x112To512x512", 112, 112, 512, 512),
# ("Disabled_256x48To2048x384", 256, 48, 2048, 384),
# ("Disabled_320x60To2048x384", 320, 60, 2048, 384),
# ("Disabled_448x84To2048x384", 448, 84, 2048, 384),
("69x69To545x545", 69, 69, 545, 545),
("86x86To545x545", 86, 86, 545, 545),
("103x103To545x545", 103, 103, 545, 545),
("120x120To545x545", 120, 120, 545, 545),
("57x57To456x456", 57, 57, 456, 456),
("72x72To456x456", 72, 72, 456, 456),
("86x86To456x456", 86, 86, 456, 456),
("100x100To456x456", 100, 100, 456, 456),
# This test is disabled because it is very slow. It is slow because
# 383 is prime, 383 and 2047 are coprime, and 2048 is large.
# ("Disabled_384x72To2048x384", 384, 72, 2048, 384),
)
def test(self, src_y, src_x, dst_y, dst_x):
def GetRow(src, dst):
if src == 1:
return np.array([[max(dst**2 - dst, 1)]])
row = [0] * src
for i in range(0, (dst - 1) * max(src - 1, 1) + 1, src - 1):
prev = int(math.floor(i / max(dst - 1, 1)))
row[prev] += max(dst - 1, 1) - i % max(dst - 1, 1)
if prev + 1 < src:
row[prev + 1] += i % max(dst - 1, 1)
return np.array([row])
input_element = max(dst_x - 1, 1) * max(dst_y - 1, 1)
input_data = [[input_element] * dst_x] * dst_y
result = GetRow(src_x, dst_x) * np.transpose(GetRow(src_y, dst_y))
self._assertBackwardOpMatchesExpected(
np.array(input_data, dtype=np.float32), [src_y, src_x],
expected=np.array(result, dtype=np.float32),
large_tolerance=True)
class ResizeBilinearNonAlignCornersTest(xla_test.XLATestCase):
def _assertForwardOpMatchesExpected(self,
image_np,
target_shape,
expected=None,
large_tolerance=False,
align_corners=True):
if expected is None:
self.fail("expected must be specified")
with self.session() as sess, self.test_scope():
image = array_ops.placeholder(image_np.dtype)
resized = gen_image_ops.resize_bilinear(
image, target_shape, align_corners=align_corners)
out = sess.run(resized, {image: image_np[np.newaxis, :, :, np.newaxis]})
if large_tolerance:
self.assertAllClose(
expected[np.newaxis, :, :, np.newaxis], out, rtol=0.1, atol=0.01)
else:
self.assertAllClose(expected[np.newaxis, :, :, np.newaxis], out)
def testNonAlignCorners3x2To6x4(self):
input_data = [[64, 32], [32, 64], [50, 100]]
expected_data = [[64.0, 48.0, 32.0, 32.0], [48.0, 48.0, 48.0, 48.0],
[32.0, 48.0, 64.0, 64.0], [41.0, 61.5, 82.0, 82.0],
[50.0, 75.0, 100.0, 100.0], [50.0, 75.0, 100.0, 100.0]]
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array(input_data, dtype=dtype), [6, 4],
expected=np.array(expected_data, dtype=np.float32),
align_corners=False)
def testNonAlignCorners6x4To3x2(self):
input_data = [[127, 127, 64, 64], [127, 127, 64, 64], [64, 64, 127, 127],
[64, 64, 127, 127], [50, 50, 100, 100], [50, 50, 100, 100]]
expected_data = [[127, 64], [64, 127], [50, 100]]
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array(input_data, dtype=dtype), [3, 2],
expected=np.array(expected_data, dtype=dtype),
align_corners=False)
def testNonAlignCorners3x2To6x4Batch2(self):
input_data = [[[64, 32], [32, 64], [50, 100]], [[32, 16], [16, 32],
[25, 50]]]
expected_data = [[[64.0, 48.0, 32.0, 32.0], [48.0, 48.0, 48.0, 48.0],
[32.0, 48.0, 64.0, 64.0], [41.0, 61.5, 82.0, 82.0],
[50.0, 75.0, 100.0, 100.0], [50.0, 75.0, 100.0, 100.0]],
[[32.0, 24.0, 16.0, 16.0], [24.0, 24.0, 24.0, 24.0],
[16.0, 24.0, 32.0, 32.0], [20.5, 30.75, 41.0, 41.0],
[25.0, 37.5, 50.0, 50.0], [25.0, 37.5, 50.0, 50.0]]]
for dtype in self.float_types:
input_image = np.array(input_data, dtype=dtype)
expected = np.array(expected_data, dtype=dtype)
with self.session() as sess, self.test_scope():
image = array_ops.placeholder(input_image.dtype)
resized = gen_image_ops.resize_bilinear(
image, [6, 4], align_corners=False)
out = sess.run(resized, {image: input_image[:, :, :, np.newaxis]})
self.assertAllClose(expected[:, :, :, np.newaxis], out)
class NonMaxSuppressionTest(xla_test.XLATestCase):
def testNMS128From1024(self):
num_boxes = 1024
boxes_np = np.random.normal(50, 10, (num_boxes, 4)).astype("f4")
scores_np = np.random.normal(0.5, 0.1, (num_boxes,)).astype("f4")
max_output_size = 128
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.0, dtype=np.float32)
with self.session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
score_threshold: score_threshold_np,
iou_threshold: iou_threshold_np
}
(indices_tf, _) = sess.run(selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
def testNMS3From6Boxes(self):
# Three boxes are selected based on IOU.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 3
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.0, dtype=np.float32)
with self.session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
score_threshold: score_threshold_np,
iou_threshold: iou_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 3)
self.assertAllClose(indices_tf[:num_valid], [3, 0, 5])
def testNMS3Then2WithScoreThresh(self):
# Three boxes are selected based on IOU.
# One is filtered out by score threshold.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 3
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.4, dtype=np.float32)
with self.session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
iou_threshold: iou_threshold_np,
score_threshold: score_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 2)
self.assertAllClose(indices_tf[:num_valid], [3, 0])
def testNMS3Then1WithScoreMaxThresh(self):
# Three boxes are selected based on IOU.
# One is filtered out by score threshold.
# One is filtered out by max_output_size.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 1
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.4, dtype=np.float32)
with self.session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
iou_threshold: iou_threshold_np,
score_threshold: score_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 1)
self.assertAllClose(indices_tf[:num_valid], [3])
def testSelectFromContinuousOverLap(self):
# Tests that a suppressed box does not itself suppress other boxes.
boxes_data = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 3]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 3
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.1, dtype=np.float32)
with self.session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
iou_threshold: iou_threshold_np,
score_threshold: score_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 3)
self.assertAllClose(indices_tf[:num_valid], [0, 2, 4])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/image_ops_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.self_adjoint_eig."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test
class SelfAdjointEigOpTest(xla_test.XLATestCase, parameterized.TestCase):
def _test(self, dtype, shape):
np.random.seed(1)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype)
x_np = x_np + np.swapaxes(x_np, -1, -2)
n = shape[-1]
e_np, _ = np.linalg.eigh(x_np)
with self.session() as sess:
x_tf = array_ops.placeholder(dtype)
with self.test_scope():
e, v = linalg_ops.self_adjoint_eig(x_tf)
e_val, v_val = sess.run([e, v], feed_dict={x_tf: x_np})
v_diff = np.matmul(v_val, np.swapaxes(v_val, -1, -2)) - np.eye(n)
self.assertAlmostEqual(np.mean(v_diff**2), 0.0, delta=1e-6)
self.assertAlmostEqual(np.mean((e_val - e_np)**2), 0.0, delta=1e-6)
SIZES = [1, 2, 5, 10, 32]
DTYPES = [np.float32]
PARAMS = itertools.product(SIZES, DTYPES)
@parameterized.parameters(*PARAMS)
def testSelfAdjointEig(self, n, dtype):
for batch_dims in [(), (3,)] + [(3, 2)] * (n < 10):
self._test(dtype, batch_dims + (n, n))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/self_adjoint_eig_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.dynamic_stitch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import googletest
class DynamicStitchTest(xla_test.XLATestCase):
def _AssertDynamicStitchResultIs(self, indices, data, expected):
with self.session() as session:
index_placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype)) for arg in indices
]
data_placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype)) for arg in data
]
with self.test_scope():
output = data_flow_ops.dynamic_stitch(index_placeholders,
data_placeholders)
feed_dict = {}
for placeholder, value in zip(index_placeholders, indices):
feed_dict[placeholder] = value
for placeholder, value in zip(data_placeholders, data):
feed_dict[placeholder] = value
result = session.run(output, feed_dict=feed_dict)
self.assertAllClose(expected, result, rtol=1e-3)
def testSimpleEmpty(self):
idx1 = np.array([0, 2], dtype=np.int32)
idx2 = np.array([[1], [3]], dtype=np.int32)
val1 = np.array([[], []], dtype=np.int32)
val2 = np.array([[[]], [[]]], dtype=np.int32)
self._AssertDynamicStitchResultIs(
[idx1, idx2], [val1, val2],
expected=np.array([[], [], [], []], np.int32))
def testEmptyIndex(self):
idx1 = np.array([], dtype=np.int32)
idx2 = np.array([[], []], dtype=np.int32)
val1 = np.ndarray(shape=(0, 9), dtype=np.int32)
val2 = np.ndarray(shape=(2, 0, 9), dtype=np.int32)
self._AssertDynamicStitchResultIs([idx1, idx2], [val1, val2],
expected=np.ndarray(
shape=(0, 9), dtype=np.int32))
def testSimple1D(self):
val1 = np.array([0, 4, 7], dtype=np.int32)
val2 = np.array([1, 6, 2, 3, 5], dtype=np.int32)
val3 = np.array([0, 40, 70], dtype=np.float32)
val4 = np.array([10, 60, 20, 30, 50], dtype=np.float32)
expected = np.array([0, 10, 20, 30, 40, 50, 60, 70], dtype=np.float32)
self._AssertDynamicStitchResultIs(
[val1, val2], [val3, val4], expected=expected)
def testSimple2D(self):
val1 = np.array([0, 4, 7], dtype=np.int32)
val2 = np.array([1, 6], dtype=np.int32)
val3 = np.array([2, 3, 5], dtype=np.int32)
val4 = np.array([[0, 1], [40, 41], [70, 71]], dtype=np.float32)
val5 = np.array([[10, 11], [60, 61]], dtype=np.float32)
val6 = np.array([[20, 21], [30, 31], [50, 51]], dtype=np.float32)
expected = np.array(
[[0, 1], [10, 11], [20, 21], [30, 31], [40, 41], [50, 51], [60, 61],
[70, 71]],
dtype=np.float32)
self._AssertDynamicStitchResultIs(
[val1, val2, val3], [val4, val5, val6], expected=expected)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/dynamic_stitch_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateful random-number generation ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.client import device_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.kernel_tests.random import util as \
random_test_util
from tensorflow.python.ops import gen_stateful_random_ops
from tensorflow.python.ops import stateful_random_ops as \
random
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def xla_device():
devices = device_lib.list_local_devices()
def find_type(device_type):
for d in devices:
if d.device_type == device_type:
return d
return None
d = find_type("TPU") or find_type("XLA_GPU") or find_type("XLA_CPU")
if d is None:
raise ValueError(
"Can't find any XLA device. Available devices:\n%s" % devices)
return d
def xla_device_name():
return str(xla_device().name)
ALGS = [random.RNG_ALG_PHILOX, random.RNG_ALG_THREEFRY]
INTS = [dtypes.int32, dtypes.uint32, dtypes.int64, dtypes.uint64]
FLOATS = [dtypes.bfloat16, dtypes.float32, dtypes.float64]
class StatefulRandomOpsTest(xla_test.XLATestCase, parameterized.TestCase):
"""Test cases for stateful random-number generator operators."""
@parameterized.parameters(ALGS)
def testSimple(self, alg):
"""A simple test."""
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=0, alg=alg)
gen.normal(shape=(3,))
gen.uniform(shape=(3,), minval=0, maxval=10, dtype=dtypes.uint32)
gen.uniform_full_int(shape=(3,))
@parameterized.parameters(ALGS)
def testDefun(self, alg):
"""Test for defun."""
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=0, alg=alg)
@def_function.function
def f():
x = gen.normal(shape=(3,))
y = gen.uniform(shape=(3,), minval=0, maxval=10, dtype=dtypes.uint32)
z = gen.uniform_full_int(shape=(3,))
return (x, y, z)
f()
def _compareToKnownOutputs(self, g, counter, key, expect):
"""Compares against known outputs for specific counter and key inputs."""
def uint32s_to_uint64(a, b):
return b << 32 | a
def uint32s_to_uint64s(ls):
return [uint32s_to_uint64(ls[2 * i], ls[2 * i + 1])
for i in range(len(ls) // 2)]
ctr_len = len(counter)
counter = uint32s_to_uint64s(counter)
key = uint32s_to_uint64s(key)
state = counter + key
g.reset(state)
got = g.uniform_full_int(shape=(ctr_len,), dtype=dtypes.uint32)
self.assertAllEqual(expect, got)
g.reset(state)
got = g.uniform_full_int(shape=(ctr_len // 2,), dtype=dtypes.uint64)
self.assertAllEqual(uint32s_to_uint64s(expect), got)
def testThreefry2x32(self):
"""Tests ThreeFry2x32 conforms to known results.
"""
# Based on
# https://github.com/google/jax/blob/8565a3486adf16beb388b2364c9cd930d7a0d92d/tests/random_test.py#L65-L85
# which is in turn based on
# https://github.com/DEShawResearch/Random123-Boost/blob/65e3d874b67aa7b3e02d5ad8306462f52d2079c0/libs/random/test/test_threefry.cpp#L30-L32
with ops.device(xla_device_name()):
g = random.Generator.from_seed(seed=0, alg=random.RNG_ALG_THREEFRY)
self._compareToKnownOutputs(
g,
[0x00000000, 0x00000000], [0x00000000, 0x00000000],
[0x6b200159, 0x99ba4efe])
self._compareToKnownOutputs(
g,
[0xffffffff, 0xffffffff], [0xffffffff, 0xffffffff],
[0x1cb996fc, 0xbb002be7])
self._compareToKnownOutputs(
g,
[0x243f6a88, 0x85a308d3], [0x13198a2e, 0x03707344],
[0xc4923a9c, 0x483df7a0])
def testPhilox4x32(self):
"""Tests Philox4x32 conforms to known results.
"""
# Based on
# https://github.com/DEShawResearch/Random123-Boost/blob/65e3d874b67aa7b3e02d5ad8306462f52d2079c0/libs/random/test/test_philox.cpp#L50-L52
with ops.device(xla_device_name()):
g = random.Generator.from_seed(seed=0, alg=random.RNG_ALG_PHILOX)
self._compareToKnownOutputs(
g,
[0x00000000, 0x00000000, 0x00000000, 0x00000000],
[0x00000000, 0x00000000],
[0x6627e8d5, 0xe169c58d, 0xbc57ac4c, 0x9b00dbd8])
self._compareToKnownOutputs(
g,
[0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff],
[0xffffffff, 0xffffffff],
[0x408f276d, 0x41c83b0e, 0xa20bc7c6, 0x6d5451fd])
self._compareToKnownOutputs(
g,
[0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344],
[0xa4093822, 0x299f31d0],
[0xd16cfe09, 0x94fdcceb, 0x5001e420, 0x24126ea1])
def testNewStateThreeFry(self):
"""Tests that the new state is correct (for ThreeFry).
"""
with ops.device(xla_device_name()):
counter = 57
key = 0x1234
size = 46
state = [counter, key]
gen = random.Generator(state=state, alg=random.RNG_ALG_THREEFRY)
gen.uniform_full_int(shape=(size,), dtype=dtypes.uint32)
self.assertAllEqual([counter+(size+1)//2, key], gen.state.read_value())
gen.reset(state)
gen.uniform_full_int(shape=(size,), dtype=dtypes.uint64)
self.assertAllEqual([counter+size, key], gen.state.read_value())
def testNewStatePhilox(self):
"""Tests that the new state is correct (for Philox).
"""
with ops.device(xla_device_name()):
counter_low = 57
counter_high = 283
key = 0x1234
size = 47
state = [counter_low, counter_high, key]
gen = random.Generator(state=state, alg=random.RNG_ALG_PHILOX)
gen.uniform_full_int(shape=(size,), dtype=dtypes.uint32)
self.assertAllEqual([counter_low+(size+3)//4, counter_high, key],
gen.state.read_value())
gen.reset(state)
gen.uniform_full_int(shape=(size,), dtype=dtypes.uint64)
self.assertAllEqual([counter_low+(size+1)//2, counter_high, key],
gen.state.read_value())
# Tests that large counter_low will correctly overflows to counter_high
counter_low = -1 # same as 0xffffffffffffffff
counter_high = 283
size = 47
state = [counter_low, counter_high, key]
gen = random.Generator(state=state, alg=random.RNG_ALG_PHILOX)
gen.uniform_full_int(shape=(size,), dtype=dtypes.uint32)
self.assertAllEqual([(size+3)//4-1, counter_high+1, key],
gen.state.read_value())
gen.reset(state)
gen.uniform_full_int(shape=(size,), dtype=dtypes.uint64)
self.assertAllEqual([(size+1)//2-1, counter_high+1, key],
gen.state.read_value())
@parameterized.parameters(INTS)
def testXLAEqualsCPU(self, dtype):
"""Tests that XLA and CPU kernels generate the same integers."""
seed = 1234
shape = [315, 49]
with ops.device("/device:CPU:0"):
cpu = (random.Generator.from_seed(seed=seed, alg=random.RNG_ALG_PHILOX)
.uniform_full_int(shape=shape, dtype=dtype))
with ops.device(xla_device_name()):
xla = (random.Generator.from_seed(seed=seed, alg=random.RNG_ALG_PHILOX)
.uniform_full_int(shape=shape, dtype=dtype))
self.assertAllEqual(cpu, xla)
def _testRngIsNotConstant(self, rng, dtype):
# Tests that 'rng' does not always return the same value.
# The random-number generator, if working correctly, should produce the
# same output multiple times with low probability.
x = rng(dtype).numpy()
y = rng(dtype).numpy()
self.assertFalse(np.array_equal(x, y))
def check_dtype(self, dtype):
device = xla_device()
if device.device_type == "TPU" and dtype == dtypes.float64:
self.skipTest("TPU doesn't support float64.")
@parameterized.parameters(list(itertools.product(ALGS, INTS + FLOATS)))
def testUniformIsNotConstant(self, alg, dtype):
self.check_dtype(dtype)
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=1234, alg=alg)
def rng(dtype):
maxval = dtype.max
return gen.uniform(shape=[2], dtype=dtype, maxval=maxval)
self._testRngIsNotConstant(rng, dtype)
@parameterized.parameters(list(itertools.product(ALGS, FLOATS)))
def testNormalIsNotConstant(self, alg, dtype):
self.check_dtype(dtype)
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=1234, alg=alg)
def rng(dtype):
return gen.normal(shape=[2], dtype=dtype)
self._testRngIsNotConstant(rng, dtype)
@parameterized.parameters(list(itertools.product(ALGS, INTS + FLOATS)))
def testUniformIsInRange(self, alg, dtype):
self.check_dtype(dtype)
minval = 2
maxval = 33
size = 1000
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=1234, alg=alg)
x = gen.uniform(
shape=[size], dtype=dtype, minval=minval, maxval=maxval).numpy()
self.assertTrue(np.all(x >= minval))
self.assertTrue(np.all(x <= maxval))
@parameterized.parameters(list(itertools.product(ALGS, FLOATS)))
def testNormalIsFinite(self, alg, dtype):
self.check_dtype(dtype)
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=1234, alg=alg)
x = gen.normal(shape=[10000], dtype=dtype).numpy()
self.assertTrue(np.all(np.isfinite(x)))
@parameterized.parameters(list(itertools.product(ALGS, INTS + FLOATS)))
def testDistributionOfUniform(self, alg, dtype):
"""Use Pearson's Chi-squared test to test for uniformity."""
self.check_dtype(dtype)
with ops.device(xla_device_name()):
n = 1000
seed = 12
gen = random.Generator.from_seed(seed=seed, alg=alg)
maxval = 1
if dtype.is_integer:
maxval = 100
t = gen.uniform(shape=[n], maxval=maxval, dtype=dtype)
x = t.numpy().astype(float)
if maxval > 1:
# Normalize y to range [0, 1).
x = x / maxval
# Tests that the values are distributed amongst 10 bins with equal
# probability. 16.92 is the Chi^2 value for 9 degrees of freedom with
# p=0.05. This test is probabilistic and would be flaky if the random
# seed were not fixed.
val = random_test_util.chi_squared(x, 10)
self.assertLess(val, 16.92)
@parameterized.parameters(list(itertools.product(ALGS, FLOATS)))
def testDistributionOfNormal(self, alg, dtype):
"""Use Anderson-Darling test to test distribution appears normal."""
self.check_dtype(dtype)
with ops.device(xla_device_name()):
n = 1000
gen = random.Generator.from_seed(seed=1234, alg=alg)
x = gen.normal(shape=[n], dtype=dtype).numpy()
# The constant 2.492 is the 5% critical value for the Anderson-Darling
# test where the mean and variance are known. This test is probabilistic
# so to avoid flakiness the seed is fixed.
self.assertLess(
random_test_util.anderson_darling(x.astype(float)), 2.492)
@parameterized.parameters(list(itertools.product(ALGS, FLOATS)))
def testTruncatedNormal(self, alg, dtype):
self.check_dtype(dtype)
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=123, alg=alg)
n = 100000
y = gen.truncated_normal(shape=[n], dtype=dtype).numpy()
random_test_util.test_truncated_normal(
self.assertEqual, self.assertAllClose, n, y,
mean_atol=2e-3, median_atol=4e-3,
variance_rtol=1e-2 if dtype == dtypes.bfloat16 else 5e-3)
def testErrors(self):
"""Tests that proper errors are raised.
"""
shape = [2, 3]
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=1234, alg=random.RNG_ALG_THREEFRY)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
r"algorithm must be of shape \[\], not"):
gen_stateful_random_ops.stateful_standard_normal_v2(
gen.state.handle, [0, 0], shape)
with self.assertRaisesWithPredicateMatch(
TypeError, "EagerTensor of dtype int64"):
gen_stateful_random_ops.stateful_standard_normal_v2(
gen.state.handle, 1.1, shape)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
"Unsupported algorithm id"):
gen_stateful_random_ops.stateful_standard_normal_v2(
gen.state.handle, 123, shape)
var = variables.Variable([0, 0], dtype=dtypes.uint32)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
"Type mismatch for read of variable .* Expected int64; got"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_THREEFRY, shape)
var = variables.Variable([[0]], dtype=dtypes.int64)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
"RNG state must have one and only one dimension, not"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_THREEFRY, shape)
var = variables.Variable([0], dtype=dtypes.int64)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
"The size of the state must be at least"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_THREEFRY, shape)
var = variables.Variable([0, 0], dtype=dtypes.int64)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
"The size of the state must be at least"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_PHILOX, shape)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/stateful_random_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multinomial generation ops in the XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.platform import googletest
# TODO(srvasude): Merge this with
# third_party/tensorflow/python/kernel_tests/random/multinomial_op_test.py.
class CategoricalTest(xla_test.XLATestCase):
"""Test cases for random-number generating operators."""
def output_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _chi2(self, expected, actual):
"""Returns Chi2 GOF statistic."""
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected)
return chi2
def _do_sampling(self, logits, num_samples):
"""Categorical samples from given input.
Args:
logits: Numpy ndarray of shape [batch_size, num_classes].
num_samples: Int; number of samples to draw.
Returns:
Frequencies from sampled classes; shape [batch_size, num_classes].
"""
with self.session(), self.test_scope():
random_seed.set_random_seed(1618)
op = random_ops.multinomial(logits, num_samples,
output_dtype=dtypes.int32)
d = self.evaluate(op)
batch_size, num_classes = logits.shape
freqs_mat = []
for i in range(batch_size):
cnts = dict(collections.Counter(d[i, :]))
# Requires drawn class labels be in range.
self.assertLess(max(cnts.keys()), num_classes)
self.assertGreaterEqual(min(cnts.keys()), 0)
freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0)
for k in range(num_classes)]
freqs_mat.append(freqs)
return freqs_mat
def _testRngIsNotConstant(self, rng, dtype, output_dtype):
# Tests that 'rng' does not always return the same value.
with self.session():
with self.test_scope():
x = rng(dtype, output_dtype)
# The random-number generator, if working correctly, should produce the
# same output multiple times with low probability.
y = self.evaluate(x)
z = self.evaluate(x)
w = self.evaluate(x)
# We use exact equality here. If the random-number generator is producing
# deterministic output, all three outputs will be bitwise identical.
self.assertTrue((not np.array_equal(y, z)) or
(not np.array_equal(z, w)) or
(not np.array_equal(y, w)))
def testCategoricalIsNotConstant(self):
def rng(dtype, output_dtype):
return random_ops.multinomial(np.array([[1., 1., 1.]], dtype=dtype), 10,
output_dtype=output_dtype)
dtype = np.float32
for output_dtype in self.output_dtypes():
self._testRngIsNotConstant(rng, dtype, output_dtype)
def testCategoricalIsInRange(self):
for dtype in self.float_types:
for output_dtype in self.output_dtypes():
with self.session():
with self.test_scope():
x = random_ops.multinomial(
array_ops.ones(shape=[1, 20], dtype=dtype), 1000,
output_dtype=output_dtype)
y = self.evaluate(x)
self.assertTrue((y >= 0).sum() == 1000)
self.assertTrue((y < 20).sum() == 1000)
def testSamplingCorrectness(self):
np.random.seed(1618) # Make it reproducible.
num_samples = 40000
rand_probs = np.random.dirichlet([1., 1., 2., 3.])
rand_probs2 = np.random.dirichlet([1., 4., 5.], size=3) # batched
for probs in [[.5, .5], [.85, .05, .1], rand_probs, rand_probs2]:
probs = np.asarray(probs)
if len(probs.shape) == 1:
probs = probs.reshape(1, probs.size) # singleton batch
logits = np.log(probs).astype(np.float32)
freqs = self._do_sampling(logits, num_samples)
# the test here is similar to
# python/kernel_tests/random/multinomial_op_test.py
# Note that df >= 1 in all these cases. Choosing a cutoff of 1e-3
# corresponds to an alpha value of 2.5% for df = 1, and smaller for larger
# df.
chi2 = self._chi2(probs, freqs)
self.assertLess(chi2, 1e-3)
def testStatelessMultinomialIsInRange(self):
for dtype in self.float_types.intersection(
[dtypes.float32, dtypes.bfloat16]):
for output_dtype in self.output_dtypes():
with self.session() as sess:
with self.test_scope():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
x = stateless_random_ops.stateless_multinomial(
array_ops.ones(shape=[1, 20], dtype=dtype),
1000,
seed_t,
output_dtype=output_dtype)
y = sess.run(x, {seed_t: [0x12345678, 0xabcdef12]})
self.assertTrue((y >= 0).sum() == 1000)
self.assertTrue((y < 20).sum() == 1000)
def testDeterminismMultinomial(self):
# Stateless values should be equal iff the seeds are equal (roughly)
num_samples = 10
with self.session(), self.test_scope():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
seeds = [(x, y) for x in range(5) for y in range(5)] * 3
for logits in ([[0.1, 0.25, 0.5, 0.15]], [[0.5, 0.5], [0.8, 0.2],
[0.25, 0.75]]):
pure = stateless_random_ops.stateless_multinomial(
logits, num_samples, seed=seed_t)
values = [(seed, pure.eval(feed_dict={seed_t: seed})) for seed in seeds]
for s0, v0 in values:
for s1, v1 in values:
self.assertEqual(s0 == s1, np.all(v0 == v1))
def testEmpty(self):
with self.session():
with self.test_scope():
x = random_ops.multinomial(
array_ops.zeros([42, 40]), 0, output_dtype=dtypes.int32)
y = self.evaluate(x)
self.assertEqual(y.shape, (42, 0))
def testEmptyStateless(self):
with self.session() as sess:
with self.test_scope():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
x = stateless_random_ops.stateless_multinomial(
array_ops.zeros([42, 40]),
0,
seed=seed_t,
output_dtype=dtypes.int32)
y = sess.run(x, {seed_t: [0x12345678, 0xabcdef1]})
self.assertEqual(y.shape, (42, 0))
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/categorical_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.sparse_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
def _SparseToDense(sparse_indices,
output_size,
sparse_values,
default_value,
validate_indices=True):
feed_sparse_indices = array_ops.placeholder(dtypes.int32)
feed_dict = {feed_sparse_indices: sparse_indices}
return sparse_ops.sparse_to_dense(
feed_sparse_indices,
output_size,
sparse_values,
default_value=default_value,
validate_indices=validate_indices).eval(feed_dict=feed_dict)
class SparseToDenseTest(xla_test.XLATestCase):
def testInt(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1, 0)
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testFloat(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1.0, 0.0)
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.float32)
self.assertAllClose(np_ans, tf_ans)
def testSetValue(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], [1, 2], -1)
np_ans = np.array([-1, 1, -1, 2, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testSetSingleValue(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1, -1)
np_ans = np.array([-1, 1, -1, 1, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def test2d(self):
# pylint: disable=bad-whitespace
with self.session(), self.test_scope():
tf_ans = _SparseToDense([[1, 3], [2, 0]], [3, 4], 1, -1)
np_ans = np.array([[-1, -1, -1, -1],
[-1, -1, -1, 1],
[ 1, -1, -1, -1]]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testZeroDefault(self):
with self.session():
x = sparse_ops.sparse_to_dense(2, [4], 7).eval()
self.assertAllEqual(x, [0, 0, 7, 0])
def test3d(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([[1, 3, 0], [2, 0, 1]], [3, 4, 2], 1, -1)
np_ans = np.ones((3, 4, 2), dtype=np.int32) * -1
np_ans[1, 3, 0] = 1
np_ans[2, 0, 1] = 1
self.assertAllClose(np_ans, tf_ans)
def testDegenerateIndexMatrix(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([[2], [3], [4], [5], [6], [7], [8], [9]], [10],
[1, 2, 3, 4, 5, 6, 7, 8], -1)
self.assertAllClose([-1, -1, 1, 2, 3, 4, 5, 6, 7, 8], tf_ans)
def testBadShape(self):
with self.session(), self.test_scope():
with self.assertRaisesWithPredicateMatch(ValueError, "must be rank 1"):
_SparseToDense([1, 3], [[5], [3]], 1, -1)
def testBadValue(self):
with self.session(), self.test_scope():
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[2,1\], "
r"should be \[\] or \[2\]"):
_SparseToDense([1, 3], [5], [[5], [3]], -1)
def testBadNumValues(self):
with self.session(), self.test_scope():
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[3\], should be \[\] or \[2\]"):
_SparseToDense([1, 3], [5], [1, 2, 3], -1)
def testBadDefault(self):
with self.session(), self.test_scope():
with self.assertRaisesOpError("default_value should be a scalar"):
_SparseToDense([1, 3], [5], [1, 2], [0])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/sparse_to_dense_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for operators with no arguments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import googletest
class NullaryOpsTest(xla_test.XLATestCase):
def _testNullary(self, op, expected):
with self.session() as session:
with self.test_scope():
output = op()
result = session.run(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testNoOp(self):
with self.session():
with self.test_scope():
output = control_flow_ops.no_op()
# This should not crash.
output.run()
def testConstants(self):
for dtype in self.numeric_types:
constants = [
dtype(42),
np.array([], dtype=dtype),
np.array([1, 2], dtype=dtype),
np.array([7, 7, 7, 7, 7], dtype=dtype),
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[[1, 2], [3, 4], [5, 6]], [[10, 20], [30, 40], [50, 60]]],
dtype=dtype),
np.array([[[]], [[]]], dtype=dtype),
np.array([[[[1]]]], dtype=dtype),
]
for c in constants:
self._testNullary(lambda c=c: constant_op.constant(c), expected=c)
def testComplexConstants(self):
for dtype in self.complex_types:
constants = [
dtype(42 + 3j),
np.array([], dtype=dtype),
np.ones([50], dtype=dtype) * (3 + 4j),
np.array([1j, 2 + 1j], dtype=dtype),
np.array([[1, 2j, 7j], [4, 5, 6]], dtype=dtype),
np.array([[[1, 2], [3, 4 + 6j], [5, 6]],
[[10 + 7j, 20], [30, 40], [50, 60]]],
dtype=dtype),
np.array([[[]], [[]]], dtype=dtype),
np.array([[[[1 + 3j]]]], dtype=dtype),
]
for c in constants:
self._testNullary(lambda c=c: constant_op.constant(c), expected=c)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/nullary_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
def nhwc_to_format(x, data_format):
"""Converts a numpy array from NHWC format to `data_format`."""
rank = len(x.shape)
if data_format == "NCHW":
return np.transpose(x, [0, rank - 1] + list(range(1, rank - 1)))
elif data_format == "NHWC":
return x
else:
raise ValueError("Unknown format {}".format(data_format))
class UnaryOpsTest(xla_test.XLATestCase):
"""Test cases for unary operators."""
def _assertOpOutputMatchesExpected(self,
op,
inp,
expected,
equality_test=None,
rtol=1e-3,
atol=1e-5):
"""Verifies that 'op' produces 'expected' when fed input 'inp' .
Args:
op: operator to test
inp: numpy input array to use as input to 'op'.
expected: numpy array representing the expected output of 'op'.
equality_test: either None, or a function that tests two numpy arrays for
equality. If None, self.assertAllClose is used.
rtol: relative tolerance for equality test.
atol: absolute tolerance for equality test.
"""
with self.session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(inp.dtype), inp.shape, name="a")
output = op(pinp)
result = session.run(output, {pinp: inp})
if equality_test is None:
self.assertEqual(output.dtype, expected.dtype)
self.assertAllCloseAccordingToType(
expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)
else:
equality_test(result, expected, rtol=rtol, atol=atol)
def ListsAreClose(self, result, expected, rtol, atol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in xrange(len(result)):
self.assertAllClose(result[i], expected[i], rtol, atol)
def testAllTypeOps(self):
for dtype in self.numeric_types - {np.int8, np.uint8}:
self._assertOpOutputMatchesExpected(
array_ops.diag, np.array([1, 2, 3, 4], dtype=dtype),
np.array(
[[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.diag_part,
np.arange(36).reshape([2, 3, 2, 3]).astype(dtype),
np.array([[0, 7, 14], [21, 28, 35]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.diag, np.array([[1, 2], [3, 4]], dtype=dtype),
np.array(
[[[[1, 0], [0, 0]], [[0, 2], [0, 0]]], [[[0, 0], [3, 0]],
[[0, 0], [0, 4]]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.identity,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.prevent_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[[[]]]]], dtype=dtype),
expected=np.array([], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1], [2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1]], [[2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.stop_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
def testFloatOps(self):
for dtype in self.float_types:
x = np.arange(-0.90, 0.90, 0.25)
self._assertOpOutputMatchesExpected(
math_ops.acos, x.astype(dtype), expected=np.arccos(x).astype(dtype))
self._assertOpOutputMatchesExpected(
math_ops.asin, x.astype(dtype), expected=np.arcsin(x).astype(dtype))
x = np.arange(-3, 3).reshape(1, 3, 2)
self._assertOpOutputMatchesExpected(
math_ops.atan, x.astype(dtype), expected=np.arctan(x).astype(dtype))
self._assertOpOutputMatchesExpected(
math_ops.acosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[0, 1.3169579, 1.76274717, 2.06343707], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.asinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[0.88137359, 1.44363548, 1.81844646, 2.09471255], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.atanh,
np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype),
expected=np.array(
[0.10033535, 0.20273255, 0.3095196, 0.42364893], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.ceil,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-1, 2]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.cosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.54308063, 3.76219569, 10.067662, 27.30823284], dtype=dtype))
# Disable float16 testing for now
if dtype != np.float16:
x = np.arange(-10, 10, 1).astype(dtype)
with self.session() as session:
erf_x = session.run(math_ops.erf(x))
erfc_x = session.run(math_ops.erfc(x))
self._assertOpOutputMatchesExpected(math_ops.erf, x, expected=erf_x)
self._assertOpOutputMatchesExpected(math_ops.erfc, x, expected=erfc_x)
self._assertOpOutputMatchesExpected(
math_ops.exp,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0.36787945, 2.7182817]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.expm1,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-0.63212056, 1.71828183]], dtype=dtype),
rtol=1e-5)
self._assertOpOutputMatchesExpected(
math_ops.floor,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-2, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
np.array(
[[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[0, 1, 1, 1, 1, 1, 1, 0, 0]], dtype=np.bool))
# Tests for tf.nn ops.
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[[]]], dtype=dtype), expected=dtype(0))
self._assertOpOutputMatchesExpected(nn_ops.l2_loss, dtype(4), dtype(8))
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[-2, 4]], dtype=dtype), expected=dtype(10))
self._assertOpOutputMatchesExpected(
math_ops.reciprocal,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[1, 0.5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0, 0.69314718]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sin,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.841478, 0.909302]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.cos,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.540297, -0.41614]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log1p,
np.array([[1e-14, 1e-15, 0.6]], dtype=dtype),
expected=np.log1p(np.array([[1e-14, 1e-15, 0.6]],
dtype=dtype)).astype(dtype),
rtol=1e-4,
atol=1e-6)
self._assertOpOutputMatchesExpected(
math_ops.rint,
np.array(
[[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5]],
dtype=dtype),
expected=np.array(
[[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.round,
np.array(
[[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5]],
dtype=dtype),
expected=np.array(
[[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.rsqrt,
np.array([[4, 16]], dtype=dtype),
expected=np.array([[0.5, 0.25]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[0.7310586, 0.7310586, 0.7310586, 0.7310586],
[0.7310586, 0.880797, 0.95257413, 0.98201376]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
np.array([-300, -150, 0, 150, 300], dtype=dtype),
expected=np.array([0, 0, 0.5, 1, 1], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.17520119, 3.62686041, 10.01787493, 27.2899172], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sqrt,
np.array([[4, 9]], dtype=dtype),
expected=np.array([[2, 3]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.tan,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.55740772, -2.18503986, -0.14254654, 1.15782128], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.tanh,
np.array(
[[1, 2, 3, 4], [np.inf, -np.inf, np.nan, 20], [19, -19, 22, -22]],
dtype=dtype),
expected=np.array([[0.76159418, 0.96402758, 0.99505478, 0.99932933],
[1.0, -1.0, np.nan, 1.0], [1.0, -1.0, 1.0, -1.0]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.log_softmax,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[-1.3862944, -1.3862944, -1.3862944, -1.3862944],
[-3.4401896, -2.4401896, -1.4401897, -0.44018969]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.elu,
np.array([[-1, 0, 1, -1e-6]], dtype=dtype),
expected=np.array([[-0.63212056, 0, 1, -9.999995e-07]], dtype=dtype),
rtol=1e-5,
atol=1e-6)
self._assertOpOutputMatchesExpected(
nn_ops.selu,
np.array([[-1, 0, 1, -1e-5]], dtype=dtype),
expected=np.array(
[[-1.11133074, 0., 1.05070099, -1.758090550379974e-05]],
dtype=dtype),
rtol=1e-5,
atol=1e-6)
self._assertOpOutputMatchesExpected(
nn_ops.relu,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.relu6,
np.array([[-0.05, 6.05, 5]], dtype=dtype),
expected=np.array([[0, 6, 5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.leaky_relu,
np.array([[-2, -1, 0, 1, 2]], dtype=dtype),
expected=np.array([[-0.4, -0.2, 0.0, 1.0, 2.0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([0.032058604, 0.087144323, 0.23688284, 0.64391428],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[0.25, 0.25, 0.25, 0.25],
[0.032058604, 0.087144323, 0.23688284, 0.64391428]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array([[[1, 1], [1, 1]], [[1, 2], [3, 4]]], dtype=dtype),
expected=np.array(
[[[0.5, 0.5], [0.5, 0.5]],
[[0.26894142, 0.73105858], [0.26894142, 0.73105858]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softsign,
np.array([[-2, -1, 0, 1, 2]], dtype=dtype),
expected=np.array(
[[-0.66666669, -0.5, 0, 0.5, 0.66666669]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sign,
np.array([[-2.0, -1.0, -0.0, +0.0, 1.0, 2.0]], dtype=dtype),
expected=np.array([[-1.0, -1.0, -0.0, +0.0, 1.0, 1.0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
np.array(
[[42, float("inf"), -123], [float("nan"), 0, -0.0]], dtype=dtype),
expected=np.array(
[[True, False, True], [False, True, True]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.lgamma,
np.array(0.5, dtype=dtype),
expected=np.array(np.log(np.pi) / 2, dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.lgamma,
np.array(
[[1, 2, 3], [4, 5, 6], [1 / 2, 3 / 2, 5 / 2],
[-3 / 2, -7 / 2, -11 / 2]],
dtype=dtype),
expected=np.array(
[
[0, 0, np.log(2.0)],
[np.log(6.0), np.log(24.0),
np.log(120)],
[
np.log(np.pi) / 2,
np.log(np.pi) / 2 - np.log(2),
np.log(np.pi) / 2 - np.log(4) + np.log(3)
],
[
np.log(np.pi) / 2 - np.log(3) + np.log(4),
np.log(np.pi) / 2 - np.log(105) + np.log(16),
np.log(np.pi) / 2 - np.log(10395) + np.log(64),
],
],
dtype=dtype))
# The actual result is complex. Take the real part.
self._assertOpOutputMatchesExpected(
math_ops.lgamma,
np.array([-1 / 2, -5 / 2, -9 / 2], dtype=dtype),
expected=np.array(
[
np.log(np.pi) / 2 + np.log(2),
np.log(np.pi) / 2 - np.log(15) + np.log(8),
np.log(np.pi) / 2 - np.log(945) + np.log(32),
],
dtype=dtype),
atol=1e-4)
self._assertOpOutputMatchesExpected(
math_ops.digamma,
np.array(
[[1.0, 0.5, 1 / 3.0], [0.25, 1 / 6.0, 0.125], [2.0, 3.0, 4.0],
[6.0, 8.0, 9.0]],
dtype=dtype),
expected=np.array(
[
[
-np.euler_gamma, -2 * np.log(2) - np.euler_gamma,
-np.pi / 2 / np.sqrt(3) - 3 * np.log(3) / 2 -
np.euler_gamma
],
[
-np.pi / 2 - 3 * np.log(2) - np.euler_gamma,
-np.pi * np.sqrt(3) / 2 - 2 * np.log(2) -
3 * np.log(3) / 2 - np.euler_gamma,
-np.pi / 2 - 4 * np.log(2) -
(np.pi + np.log(2 + np.sqrt(2)) - np.log(2 - np.sqrt(2)))
/ np.sqrt(2) - np.euler_gamma
],
[
1 - np.euler_gamma, 1.5 - np.euler_gamma,
11 / 6.0 - np.euler_gamma
],
[
137 / 60.0 - np.euler_gamma, 363 / 140.0 - np.euler_gamma,
761 / 280.0 - np.euler_gamma
],
],
dtype=dtype))
def quantize_and_dequantize_v2(x):
return array_ops.quantize_and_dequantize_v2(
x, -127, 127, signed_input=True, num_bits=8)
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v2,
np.array([-1, -0.5, 0, 0.3], dtype=dtype),
expected=np.array([-1., -0.5, 0., 0.296875], dtype=dtype))
def quantize_and_dequantize_v2_round_half_up(x):
return array_ops.quantize_and_dequantize_v2(
x,
-1,
1.0,
signed_input=True,
num_bits=8,
range_given=True,
round_mode="HALF_UP")
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v2_round_half_up,
np.array([-0.8, -0.5, 0, 0.3, 0.8, -2, 33], dtype=dtype),
expected=np.array([
-102.0 / 127,
-63.0 / 127,
0,
38.0 / 127,
102.0 / 127,
-128.0 / 127,
1,
],
dtype=dtype))
def quantize_and_dequantize_v2_round_half_to_even(x):
return array_ops.quantize_and_dequantize_v2(
x,
-1.0,
1.0,
signed_input=True,
num_bits=8,
range_given=True,
round_mode="HALF_TO_EVEN")
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v2_round_half_to_even,
np.array(
[
-0.8,
# The -0.5 should become -63.5 after scaling and with
# rounding this should become -64. But with the test
# unary_ops_test_cpu_ondemand, this fails as the result
# before scaling becomes -63.499996 and gets rounded to -63.
# TODO(sreenik): Some one more familiar with this test needs
# to take a look and resolve this. This works on all other
# variations of the platform like cpu, and gpu.
# -0.5,
0,
0.3,
0.8,
-2,
33
],
dtype=dtype),
expected=np.array(
[
-102.0 / 127,
# -64.0 / 127,
0,
38.0 / 127,
102.0 / 127,
-128.0 / 127,
1,
],
dtype=dtype))
def quantize_and_dequantize_v3(x):
return array_ops.quantize_and_dequantize_v3(
x, -127, 127, num_bits=8, signed_input=True, range_given=False)
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v3,
np.array([-1, -0.5, 0, 0.3], dtype=dtype),
expected=np.array([-1., -0.5, 0., 0.296875], dtype=dtype))
def testComplexOps(self):
for dtype in self.complex_types:
self._assertOpOutputMatchesExpected(
math_ops.acosh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arccosh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.asinh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arcsinh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.atanh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arctanh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.cosh,
np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype),
expected=np.cosh(np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.sinh,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.sinh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.exp,
np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
expected=np.exp(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.expm1,
np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
expected=np.expm1(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)),
rtol=1e-6,
atol=1e-6)
# For real part close to zero, or imaginary part close to a multiple of
# pi.
self._assertOpOutputMatchesExpected(
math_ops.expm1,
np.array([[1e-11 + 1j, -1e-11 - 1j, 1. + 1e-11j,
-1. - 1e-11j, 1e-13j + 1e-13j]], dtype=dtype),
# TODO(srvasude): Use numpy as the source of truth after we depend on
# latest numpy with this pull request:
# https://github.com/numpy/numpy/pull/15110.
# The numbers below were generated by scipy.special.expm1.
expected=np.array([[
-4.59697694e-01+8.41470985e-01j,
-4.59697694e-01-8.41470985e-01j,
1.71828183e+00+2.71828183e-11j,
-6.32120559e-01-3.67879441e-12j,
-2.00000000e-26+2.00000000e-13j]], dtype=dtype),
rtol=1e-09,
atol=1e-20)
self._assertOpOutputMatchesExpected(
math_ops.reciprocal,
np.array([[1, 2j, 2 + 3j]], dtype=dtype),
expected=1.0 / np.array([[1, 2j, 2 + 3j]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.log(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.sin,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.sin(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.cos,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.cos(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.log1p,
np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype),
expected=np.log1p(
np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype)),
rtol=1e-4,
atol=1e-6)
val = np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.rsqrt, val, expected=1 / np.sqrt(val))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid, val, expected=1 / (1 + np.exp(-val)))
self._assertOpOutputMatchesExpected(
math_ops.sqrt, val, expected=np.sqrt(val))
self._assertOpOutputMatchesExpected(
math_ops.tanh,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.tanh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.tan,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.tan(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
ctypes = {np.complex64: np.float32, np.complex128: np.float64}
# Disabled because some AMD processors produce [[5, 1, nan]]
#self._assertOpOutputMatchesExpected(
# math_ops.abs,
# np.array([[3 - 4j, -1j, np.inf]], dtype=dtype),
# expected=np.array([[5, 1, np.inf]], dtype=ctypes[dtype]))
self._assertOpOutputMatchesExpected(
math_ops.negative,
np.array([[-1 + 2j, -3j]], dtype=dtype),
expected=np.array([[1 - 2j, 3j]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.square,
np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype),
expected=np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype)**2)
self._assertOpOutputMatchesExpected(
array_ops.zeros_like,
np.array([[4j, 3 - 2j], [2, -1j]], dtype=dtype),
expected=np.array([[0, 0], [0, 0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.ones_like,
np.array([[-4j, 3 + 2j], [2, -1j]], dtype=dtype),
expected=np.array([[1, 1], [1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.angle,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.angle(np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.conj,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([1 - 3j, -4 - 7j, 2.7, 3j], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.imag,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([3, 7, 0, -3], dtype=ctypes[dtype]))
self._assertOpOutputMatchesExpected(
math_ops.real,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([1, -4, 2.7, 0], dtype=ctypes[dtype]))
def testIntOps(self):
for dtype in self.int_types:
self._assertOpOutputMatchesExpected(
bitwise_ops.invert,
np.array([0, -1, 1, 16, 42], dtype=dtype),
expected=np.array([-1, 0, -2, -17, -43], dtype=dtype))
def testNumericOps(self):
for dtype in self.numeric_types - {np.int8, np.uint8}:
self._assertOpOutputMatchesExpected(
math_ops.abs,
np.array([[2, -1]], dtype=dtype),
expected=np.array([[2, 1]], dtype=np.real(dtype(0)).dtype))
self._assertOpOutputMatchesExpected(
math_ops.negative,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[1, -1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.square,
np.array([[-2, 3]], dtype=dtype),
expected=np.array([[4, 9]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.zeros_like,
np.array([[4, 3], [2, 1]], dtype=dtype),
expected=np.array([[0, 0], [0, 0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.ones_like,
np.array([[4, 3], [2, 1]], dtype=dtype),
expected=np.array([[1, 1], [1, 1]], dtype=dtype))
# TODO(phawkins): these tests fail unless fastmath optimizations
# are disabled. Use more robust IsInf/IsNaN detection and enable these
# tests.
@unittest.skip("test case fails in fast-math mode")
def testIsInfAndIsNan(self):
for dtype in self.float_types:
self._assertOpOutputMatchesExpected(
math_ops.is_inf,
np.array(
[[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[1, 0, 0, 0, 0, 0, 0, 1, 0]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.is_nan,
np.array(
[[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.sign,
np.array([[np.nan]], dtype=dtype),
expected=np.array([[0.0]], dtype=dtype))
def testLogicalOps(self):
self._assertOpOutputMatchesExpected(
math_ops.logical_not,
np.array([[True, False], [False, True]], dtype=np.bool),
expected=np.array([[False, True], [True, False]], dtype=np.bool))
def testBiasAddGrad(self):
self._assertOpOutputMatchesExpected(
gen_nn_ops.bias_add_grad,
np.array([[1., 2.], [3., 4.]], dtype=np.float32),
expected=np.array([4., 6.], dtype=np.float32))
self._assertOpOutputMatchesExpected(
lambda x: gen_nn_ops.bias_add_grad(x, data_format="NCHW"),
np.array(
[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]], dtype=np.float32),
expected=np.array([14., 22.], dtype=np.float32))
def testCast(self):
shapes = [[], [4], [2, 3], [2, 0, 4]]
types = (
set([dtypes.bool, dtypes.int32, dtypes.float32])
| self.complex_tf_types)
for shape in shapes:
for src_type in types:
for dst_type in types:
src = np.arange(np.prod(shape)).astype(src_type.as_numpy_dtype)
if src_type in self.complex_tf_types:
src += (np.arange(np.prod(shape)) * 2j).astype(
src_type.as_numpy_dtype)
src = src.reshape(shape)
dst = src.astype(dst_type.as_numpy_dtype)
self._assertOpOutputMatchesExpected(
lambda x, dst_type=dst_type: math_ops.cast(x, dst_type),
src,
expected=dst)
def testBitcast(self):
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.int32),
np.array([1, 0x3f800000], np.int32),
expected=np.array([1, 0x3f800000], np.int32))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.float32),
np.array([1, 0x3f800000], np.int32),
expected=np.array([1e-45, 1.0], np.float32))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.int32),
np.array([1e-45, 1.0], np.float32),
expected=np.array([1, 0x3f800000], np.int32))
if np.int64 in self.numeric_types:
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.int64),
np.array([1, 0x100000003f800000], np.uint64),
expected=np.array([1, 0x100000003f800000], np.int64))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.uint64),
np.array([1, 0x100000003f800000], np.int64),
expected=np.array([1, 0x100000003f800000], np.uint64))
def testInvertPermutation(self):
self._assertOpOutputMatchesExpected(
array_ops.invert_permutation,
np.array([1, 2, 0], np.int32),
expected=np.array([2, 0, 1], dtype=np.int32))
def testInvertPermutationTwiceIsNoop(self):
self._assertOpOutputMatchesExpected(
lambda x: array_ops.invert_permutation(array_ops.invert_permutation(x)),
np.array([1, 2, 0], np.int32),
expected=np.array([1, 2, 0], dtype=np.int32))
def testRank(self):
rank_op = lambda x: array_ops.rank_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
rank_op, dtype(7), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
rank_op, np.array([[], []], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op, np.array([-1, 1], dtype=dtype), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
rank_op, np.array([[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(2))
def testShape(self):
shape_op = lambda x: array_ops.shape_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
shape_op, dtype(7), expected=np.array([], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[], []], dtype=dtype),
expected=np.array([2, 0], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([-1, 1], dtype=dtype),
expected=np.array([2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([1, 2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.array([3, 1], dtype=np.int32))
def testSize(self):
size_op = lambda x: array_ops.size_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
size_op, dtype(7), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
size_op, np.array([[], []], dtype=dtype), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
size_op, np.array([-1, 1], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op, np.array([[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(3))
def testSizeWithInt64OutType(self):
def size_op(x):
return array_ops.size_internal(x, optimize=False, out_type=np.int64)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
size_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int64(3))
def testUnpack(self):
self._assertOpOutputMatchesExpected(
array_ops.unstack,
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 2.], dtype=np.float32),
np.array([3., 4.], dtype=np.float32),
np.array([5., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
self._assertOpOutputMatchesExpected(
lambda x: array_ops.unstack(x, axis=1),
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 3., 5.], dtype=np.float32),
np.array([2., 4., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
def testDepthToSpace(self):
def make_op(data_format):
def op(x):
return array_ops.depth_to_space(
x, block_size=2, data_format=data_format)
return op
for dtype in self.numeric_types:
for data_format in ["NCHW", "NHWC"]:
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array([[[[1, 2, 3, 4]]]], dtype=dtype), data_format),
expected=nhwc_to_format(
np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]], dtype=dtype),
data_format),
expected=nhwc_to_format(
np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]],
dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]],
dtype=dtype), data_format),
expected=nhwc_to_format(
np.array(
[[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]],
dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op("NCHW_VECT_C"),
np.arange(32, dtype=dtype).reshape((1, 8, 1, 1, 4)),
expected=np.array([[[[[0, 1], [8, 9]], [[16, 17], [24, 25]]],
[[[2, 3], [10, 11]], [[18, 19], [26, 27]]],
[[[4, 5], [12, 13]], [[20, 21], [28, 29]]],
[[[6, 7], [14, 15]], [[22, 23], [30, 31]]]]],
dtype=dtype))
def testSpaceToDepth(self):
def make_op(data_format):
def op(x):
return array_ops.space_to_depth(
x, block_size=2, data_format=data_format)
return op
for dtype in self.numeric_types:
for data_format in ["NCHW", "NHWC"]:
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype), data_format),
expected=nhwc_to_format(
np.array([[[[1, 2, 3, 4]]]], dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]],
dtype=dtype), data_format),
expected=nhwc_to_format(
np.array(
[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]], dtype=dtype),
data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]],
dtype=dtype), data_format),
expected=nhwc_to_format(
np.array(
[[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]],
dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op("NCHW_VECT_C"),
np.arange(32, dtype=dtype).reshape((1, 2, 2, 2, 4)),
expected=np.array([[[[[0, 1, 2, 3, 16, 17, 18, 19]]],
[[[4, 5, 6, 7, 20, 21, 22, 23]]],
[[[8, 9, 10, 11, 24, 25, 26, 27]]],
[[[12, 13, 14, 15, 28, 29, 30, 31]]]]],
dtype=dtype))
def _assertSoftplusMatchesExpected(self, features, dtype):
features = np.array(features, dtype=dtype)
zero = np.asarray(0).astype(dtype)
expected = np.logaddexp(zero, features).astype(dtype)
self._assertOpOutputMatchesExpected(
nn_ops.softplus, features, expected=expected, rtol=1e-6, atol=9.1e-6)
def testSoftplus(self):
for dtype in self.float_types:
self._assertSoftplusMatchesExpected([[-2, 0, 8]], dtype)
self._assertSoftplusMatchesExpected(
[[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]], dtype)
if dtype == dtypes.bfloat16.as_numpy_dtype:
log_eps = np.log(np.finfo(np.float32).eps)
else:
log_eps = np.log(np.finfo(dtype).eps)
one = dtype(1)
ten = dtype(10)
self._assertSoftplusMatchesExpected([
log_eps, log_eps - one, log_eps + one, log_eps - ten, log_eps + ten,
-log_eps, -log_eps - one, -log_eps + one, -log_eps - ten,
-log_eps + ten
], dtype)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/unary_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA Gather Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
class GatherTest(xla_test.XLATestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, adds an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testScalar1D(self):
with self.session() as session, self.test_scope():
data = np.array([0, 1, 2, 3, 7, 5])
for dtype in self.all_tf_types:
for indices in 4, [4], [1, 2, 2, 4, 5]:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = session.run(gather_t, feed_dict={params: params_np})
np_val = constant_op.constant(params_np[indices])
self.assertAllEqual(np_val, gather_val)
def testScalar2D(self):
with self.session() as session, self.test_scope():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
for dtype in self.all_tf_types:
for axis in 0, 1, -1:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices = constant_op.constant(2)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = session.run(gather_t, feed_dict={params: params_np})
expected = constant_op.constant(
np.take(params_np, 2, axis=axis), dtype)
self.assertAllEqual(expected, gather_val)
def testSimpleTwoD32(self):
with self.session() as session, self.test_scope():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
for dtype in self.all_tf_types:
for axis in 0, 1, -1:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
# The indices must be in bounds for any axis.
indices = constant_op.constant([0, 1, 0, 2])
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = session.run(gather_t, feed_dict={params: params_np})
expected = constant_op.constant(
np.take(params_np, [0, 1, 0, 2], axis=axis), dtype)
self.assertAllEqual(expected, gather_val)
def testSimpleTwoD32_Int64Indices(self):
if np.int64 not in self.int_types:
return
with self.session() as session, self.test_scope():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
# The indices must be in bounds for any axis.
indices_np = np.array([0, 1, 0, 2])
for dtype in self.all_tf_types:
for axis in 0, 1, -1:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices = array_ops.placeholder(dtype=dtypes.int64)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = session.run(
gather_t, feed_dict={
params: params_np,
indices: indices_np
})
expected = constant_op.constant(
np.take(params_np, [0, 1, 0, 2], axis=axis), dtype)
self.assertAllEqual(expected, gather_val)
def testHigherRank(self):
"""Check that scalar and empty indices shapes work as well."""
shape = (2, 1, 3, 2)
for indices_shape in (), (0,), (2, 0), (2, 3):
for dtype in self.all_tf_types:
for axis in 0, 1, 2, 3, -1, -2:
params = self._buildParams(np.random.randn(*shape), dtype)
indices = np.random.randint(shape[axis], size=indices_shape)
with self.session() as sess, self.test_scope():
tf_params = array_ops.placeholder(dtype=dtype)
tf_indices = constant_op.constant(indices, dtype=dtypes.int32)
gather = array_ops.gather(tf_params, tf_indices, axis=axis)
gather_value = sess.run(gather, feed_dict={tf_params: params})
gather_np = constant_op.constant(
np.take(params, indices, axis=axis), dtype)
self.assertAllEqual(gather_np, gather_value)
def testIndicesWithDifferentDimensions(self):
with self.session():
for dtype in self.numeric_tf_types:
params = array_ops.placeholder(dtype=dtype)
indices = array_ops.placeholder(dtype=np.int32)
with self.test_scope():
gather = array_ops.gather(params, indices)
self.assertAllEqual(
7, gather.eval(feed_dict={params: [4, 7, 2], indices: 1}))
self.assertAllEqual(
[7], gather.eval(feed_dict={params: [4, 7, 2], indices: [1]}))
self.assertAllEqual(
[[7]], gather.eval(feed_dict={params: [4, 7, 2], indices: [[1]]}))
def testGatherPrecision(self):
with self.session() as session, self.test_scope():
data = np.array([[0, 0, 0, 0], [0, 2 * (1 + np.exp2(-8)), 0, 0],
[0, 0, 0, 0], [0.015789, 0.0985, 0.55789, 0.3842]])
indices = np.array([1, 2, 3, 1])
dtype = dtypes.float32
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = session.run(gather_t, feed_dict={params: params_np})
np_val = params_np[indices]
self.assertAllEqual(np_val, gather_val)
class GatherBenchmark(test.Benchmark):
"""Microbenchmarks for the gather op."""
def _benchmarkGather(self, name, axis, gather_indices, use_xla_jit):
def BuilderFn():
inputs = variables.Variable(
array_ops.zeros([100, 100, 10, 100, 50], dtype=dtypes.float32),
dtype=dtypes.float32,
name='input')
indices = variables.Variable(
gather_indices, dtype=dtypes.int32, name='indices')
gather_t = array_ops.gather(inputs, indices, axis=axis)
return '%s.axis%d' % (name, axis), [gather_t]
xla_test.Benchmark(self, BuilderFn, use_xla_jit=use_xla_jit, device='cpu')
def _benchmarkSliceGather(self, axis, use_xla_jit):
"""Benchmarks a gather op that's really a dynamic slice."""
self._benchmarkGather('slice_gather', axis, [1], use_xla_jit)
def _benchmarkNontrivialGather(self, axis, use_xla_jit):
self._benchmarkGather('nontrivial_gather', axis, [9, 1, 0, 2] * 4,
use_xla_jit)
def benchmarkSliceGatherAxis0(self):
self._benchmarkSliceGather(axis=0, use_xla_jit=False)
def benchmarkSliceGatherAxis0XLA(self):
self._benchmarkSliceGather(axis=0, use_xla_jit=True)
def benchmarkSliceGatherAxis1(self):
self._benchmarkSliceGather(axis=1, use_xla_jit=False)
def benchmarkSliceGatherAxis1XLA(self):
self._benchmarkSliceGather(axis=1, use_xla_jit=True)
def benchmarkSliceGatherAxis4(self):
self._benchmarkSliceGather(axis=4, use_xla_jit=False)
def benchmarkSliceGatherAxis4XLA(self):
self._benchmarkSliceGather(axis=4, use_xla_jit=True)
def benchmarkNontrivialGatherAxis0(self):
self._benchmarkNontrivialGather(axis=0, use_xla_jit=False)
def benchmarkNontrivialGatherAxis0XLA(self):
self._benchmarkNontrivialGather(axis=0, use_xla_jit=True)
def benchmarkNontrivialGatherAxis1(self):
self._benchmarkNontrivialGather(axis=1, use_xla_jit=False)
def benchmarkNontrivialGatherAxis1XLA(self):
self._benchmarkNontrivialGather(axis=1, use_xla_jit=True)
def benchmarkNontrivialGatherAxis4(self):
self._benchmarkNontrivialGather(axis=4, use_xla_jit=False)
def benchmarkNontrivialGatherAxis4XLA(self):
self._benchmarkNontrivialGather(axis=4, use_xla_jit=True)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/gather_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for eager execution using XLA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import convolutional
from tensorflow.python.layers import pooling
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import googletest
from tensorflow.python.training import adam
class EagerTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
def testGradientTape(self):
with self.test_scope():
x = constant_op.constant(1.0)
y = constant_op.constant(10.0)
with backprop.GradientTape(persistent=True) as tape:
tape.watch(x)
tape.watch(y)
a = x + y + x * y
da_dx = tape.gradient(a, x)
da_dy = tape.gradient(a, y)
self.assertEqual(11.0, da_dx.numpy())
self.assertEqual(2.0, da_dy.numpy())
def testExecuteListOutputLen0(self):
with self.test_scope():
empty = constant_op.constant([], dtype=dtypes.float32)
result = array_ops.unstack(empty, 0)
self.assertTrue(isinstance(result, list))
self.assertEqual(0, len(result))
def testExecuteListOutputLen1(self):
with self.test_scope():
split_dim = constant_op.constant(1)
value = constant_op.constant([[0., 1., 2.], [3., 4., 5.]])
result = array_ops.split(value, 1, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(1, len(result))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0])
def testExecuteListOutputLen3(self):
with self.test_scope():
split_dim = constant_op.constant(1)
value = constant_op.constant([[0., 1., 2.], [3., 4., 5.]])
result = array_ops.split(value, 3, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(3, len(result))
self.assertAllEqual([[0], [3]], result[0])
self.assertAllEqual([[1], [4]], result[1])
self.assertAllEqual([[2], [5]], result[2])
def testBasicGraph(self):
# Run some ops eagerly
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
# Run some ops graphly
with context.graph_mode(), self.session():
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, self.evaluate(product))
def testDegenerateSlices(self):
with self.test_scope():
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
# degenerate by offering a forward interval with a negative stride
self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :])
# degenerate with a reverse interval with a positive stride
self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :])
# empty interval in every dimension
self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1])
def testIdentity(self):
with self.test_scope():
self.assertAllEqual(2, array_ops.identity(2))
def testRandomOps(self):
with self.test_scope():
tensor = gen_random_ops.random_uniform((2, 2), dtypes.float32)
row0 = tensor[0].numpy()
row1 = tensor[1].numpy()
# It should be very unlikely to rng to generate two equal rows.
self.assertFalse((row0 == row1).all())
def testIdentityOnVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(True)
i = array_ops.identity(v)
self.assertAllEqual(True, i.numpy())
def testAssignAddVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
v.assign_add(2.0)
self.assertEqual(3.0, v.numpy())
def testReadAssignRead(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
val1 = v.read_value()
v.assign_add(2.0)
val2 = v.read_value()
self.assertEqual(1.0, val1.numpy())
self.assertEqual(3.0, val2.numpy())
def testGradient(self):
def f(x):
return x
with self.test_scope():
grad_fn = backprop.gradients_function(f)
self.assertAllEqual(2., grad_fn(1., dy=2.)[0])
def testVariableGradient(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(1.0)
def f():
x = v0 * v0
return x
grads = backprop.implicit_grad(f)()
self.assertEqual(2., grads[0][0].numpy())
def testMultipleVariableReads(self):
# This test makes sure consecutive variable reads don't copy
# the underlying memory.
with self.test_scope():
# Create 128MiB variables
var = resource_variable_ops.ResourceVariable(
array_ops.ones([32, 1024, 1024]))
# Read the same variable 100 times. If the underlying tensor
# is not copied, this is a trivial operation. If it is copied,
# this will eat over 13GB and OOM.
values = []
for _ in range(100):
values.append(var.value())
# The shape, shape_n, size, and rank are tested here because their
# execution kernels (as opposed to compilation only tf2xla kernels)
# are distincts from tf2xla kernels.
def testShape(self):
def const(value):
return array_ops.shape(
constant_op.constant(value)).numpy()
def ones(value):
return array_ops.shape(
array_ops.ones(value)).numpy()
with self.test_scope():
# Shapes of directly constructed tensors
self.assertAllEqual([], const(3))
self.assertAllEqual([3], const([1.0, 2.0, 3.0]))
self.assertAllEqual([2, 2], const([[1.0, 2.0], [3.0, 4.0]]))
self.assertAllEqual([2, 1, 2], const([[[1.0, 2.0]], [[3.0, 4.0]]]))
# Shapes of tensors created by op running on device
# We make this distinction because directly constructed tensors
# are treated differently in a few places that can influence shape:
# - they always have on_host_tensor
# - they and their shapes can be cached
# - they end up on device via a copy, instead of as program output
self.assertAllEqual([], ones([]))
self.assertAllEqual([3], ones([3]))
self.assertAllEqual([2, 2], ones([2, 2]))
self.assertAllEqual([2, 1, 2], ones([2, 1, 2]))
def testShapeN(self):
with self.test_scope():
# Shapes of directly constructed tensors
shapes = array_ops.shape_n([
constant_op.constant(1.0),
constant_op.constant([1.0, 2.0, 3.0]),
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])])
self.assertAllEqual(
[[], [3], [2, 2]],
[x.numpy().tolist() for x in shapes])
# Shapes of tensors created by op running on device
shapes = array_ops.shape_n([
array_ops.ones([]),
array_ops.ones([3]),
array_ops.ones([2, 2])])
self.assertAllEqual(
[[], [3], [2, 2]],
[x.numpy().tolist() for x in shapes])
def testSize(self):
with self.test_scope():
self.assertEqual(
1, array_ops.size(constant_op.constant(1.0)).numpy())
self.assertEqual(
3, array_ops.size(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertEqual(
4, array_ops.size(
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])).numpy())
def testRank(self):
with self.test_scope():
self.assertEqual(
0, array_ops.rank(constant_op.constant(1.0)).numpy())
self.assertEqual(
1, array_ops.rank(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertEqual(
2, array_ops.rank(
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])).numpy())
def testAdam(self):
with self.test_scope():
optimizer = adam.AdamOptimizer(0.1)
x = resource_variable_ops.ResourceVariable(10.0)
with backprop.GradientTape() as tape:
y = x * x
dy_dx = tape.gradient(y, x)
optimizer.apply_gradients([(dy_dx, x)])
self.assertAlmostEqual(9.9, x.numpy(), places=3)
def testAdamSparse(self):
with ops.device('/cpu:0'):
# Create 2-D embedding for 3 objects on CPU because sparse/sliced updates
# are not implemented on TPU.
embedding_matrix = resource_variable_ops.ResourceVariable(
array_ops.ones([3, 2]))
with self.test_scope():
with backprop.GradientTape() as tape:
embedding = embedding_ops.embedding_lookup(embedding_matrix, [1])
y = math_ops.reduce_sum(embedding)
dy_dx = tape.gradient(y, embedding_matrix)
self.assertIsInstance(dy_dx, ops.IndexedSlices)
optimizer = adam.AdamOptimizer(0.1)
# The gradient application operations will run on CPU because optimizer
# updates are always collocated with the variable.
optimizer.apply_gradients([(dy_dx, embedding_matrix)])
# This assign_add will run on CPU because when an input to an
# operation is a resource, this operation is placed on the resource's
# device by the eager runtime.
embedding_matrix.assign_add(array_ops.ones([3, 2]))
self.assertAllClose([[2.0, 2.0],
[1.9, 1.9],
[2.0, 2.0]], embedding_matrix.numpy())
class EagerFunctionTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_scope():
matmul = function.defun(math_ops.matmul)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t, transpose_a=True)
self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])
def testConv(self):
if 'GPU' in self.device:
# TODO(b/32333178)
self.skipTest('Current implementation of RandomStandardNormal kernel '
'is very slow on GPU, and has been blacklisted.')
with self.test_scope():
data_format = 'channels_last'
conv = convolutional.Conv2D(
filters=1, kernel_size=2, padding='VALID',
data_format=data_format, activation=nn_ops.relu,
kernel_initializer=init_ops.ones_initializer(),
bias_initializer=init_ops.zeros_initializer())
pool = pooling.MaxPooling2D(2, 2, data_format=data_format)
def model(x):
x = conv(x)
return pool(x)
model = function.defun(model)
x = array_ops.ones([1, 4, 4, 1])
y = model(x)
self.assertAllEqual(y.numpy(), [[[[4.]]]])
def testReadVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return v.read_value()
var = f()
self.assertEqual(1.0, var.numpy())
def testResourceVariableNoInlineReadWrite(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
w = resource_variable_ops.ResourceVariable(0.0)
@function.defun_with_attributes(attributes={'_noinline': True})
def g(x):
w.assign(w.read_value() + x)
return v.read_value() + x * w.read_value()
@function.defun_with_attributes(attributes={'_noinline': True})
def f():
return g(1.0) + g(2.0) + g(3.0) + g(4.0) + g(5.0)
# 1 + 1*1 + 1 + 2*3 + 1 + 3*6 + 1 + 4*10 + 1 + 5*15
self.assertEqual(145.0, f().numpy())
self.assertEqual(15.0, w.read_value().numpy())
def testResourceVariableNoInlineReadOnly(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(10.0)
@function.defun_with_attributes(attributes={'_noinline': True})
def g():
return v.read_value()
@function.defun_with_attributes(attributes={'_noinline': True})
def f():
return g() + g() + g() + g() + g()
self.assertEqual(50.0, f().numpy())
def testResourceVariableNoInlineWriteOnly(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(0.0)
@function.defun_with_attributes(attributes={'_noinline': True})
def g(x):
v.assign(x)
@function.defun_with_attributes(attributes={'_noinline': True})
def f():
g(1.0)
g(2.0)
g(3.0)
g(4.0)
g(5.0)
f()
self.assertEqual(5.0, v.read_value().numpy())
def testUpdateVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
def f(v):
v.assign_add(1.0)
return v
f = function.defun(f)
var = f(v)
self.assertEqual(2.0, var.numpy())
def testReturnResourceHandle(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable([[1.0, 2.0], [3.0, 4.0]])
def f(v):
return v.handle
f = function.defun(f)
handle = f(v)
self.assertAllEqual(v.numpy(),
resource_variable_ops.read_variable_op(
handle, dtypes.float32).numpy())
def testReturnMultipleResourceHandles(self):
with self.test_scope():
v1 = resource_variable_ops.ResourceVariable(1.25)
v2 = resource_variable_ops.ResourceVariable(2.0)
def f(v):
return v.handle, 3.0 * v, v2.handle, v + v2
f = function.defun(f)
v1_handle, v1_times_3, v2_handle, variable_sum = f(v1)
self.assertAllEqual(v1.numpy(),
resource_variable_ops.read_variable_op(
v1_handle, dtypes.float32).numpy())
self.assertEqual(3.75, v1_times_3.numpy())
self.assertAllEqual(v2.numpy(),
resource_variable_ops.read_variable_op(
v2_handle, dtypes.float32).numpy())
self.assertEqual(3.25, variable_sum.numpy())
def testAllArgumentKinds(self):
"""Test a complex function that takes different argument kinds.
tf2xla machinery that translates, compiles, and runs defuns
classifies arguments into: compile-time constants, regular tensors,
and resources. This test creates a function with a mix of all these
kinds. Moreover, the order of function arguments is intentionally mixed up.
This also tests the case when the same argument is a compile-time constant
as well as used in an operation that normally expects its inputs to be
in device memory - addition in this case.
"""
with self.test_scope():
def foo(c1, r1, v1, c2, v2, r2):
# c1 and c2 are compile-time constants
# r1 and r2 are regular tensors
# v1 and v2 are resource variables
a = c1 + r1
b = math_ops.cast(c2, dtypes.float32) + v2
c = array_ops.slice(v1, c1, c2)
d = r2 * v2
return a, b, c, d
foo = function.defun(foo)
c1 = [0, 0]
c2 = array_ops.ones([2], dtype=dtypes.int32)
r1 = array_ops.ones([2])
r2 = [[2., 2.], [3., 3.]]
v1 = resource_variable_ops.ResourceVariable([[1., 2.], [3., 4.]])
v2 = resource_variable_ops.ResourceVariable([[10., 20.], [30., 40.]])
a, b, c, d = foo(c1, r1, v1, c2, v2, r2)
self.assertAllEqual([1, 1], a.numpy())
self.assertAllEqual([[11., 21.], [31., 41.]], b.numpy())
self.assertAllEqual([[1.]], c.numpy())
self.assertAllEqual([[20., 40.], [90., 120.]], d.numpy())
def testDefunInGradientTape(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def f(x):
x = v0 * v0 * x
return x
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = f(x)
dy = tape.gradient(y, v0)
self.assertEqual(75, y.numpy())
self.assertEqual(30, dy.numpy())
def testGradientTapeInDefun(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def f():
x = constant_op.constant(1.0)
with backprop.GradientTape() as tape:
y = v0 * x
dy = tape.gradient(y, v0)
return dy
dy = f()
self.assertEqual(1.0, dy.numpy())
def testSliceInDefun(self):
with self.test_scope():
@function.defun
def f(x, y):
return x[0::2, y:, ...]
x = array_ops.ones([2, 3, 4], dtype=dtypes.float32)
y = array_ops.ones([], dtype=dtypes.int32)
with backprop.GradientTape() as tape:
tape.watch(x)
tape.watch(y)
z = f(x, y)
dz = tape.gradient(z, x)
self.assertAllEqual(np.ones([1, 2, 4]), z.numpy())
self.assertAllEqual((2, 3, 4), dz.shape.as_list())
def testNestedDefun(self):
with self.test_scope():
@function.defun
def times_two(x):
return 2. * x
@function.defun
def two_x_plus_1(x):
return times_two(x) + 1.
x = constant_op.constant([2., 3., 4.])
y = two_x_plus_1(x)
self.assertAllEqual([5., 7., 9.], y.numpy())
def testNestedDefunWithVariable(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def g(x):
x = v0 * x
return x
@function.defun
def f(x):
x = g(v0 * x)
return x
x = constant_op.constant(3.0)
y = f(x)
self.assertEqual(75.0, y.numpy())
def testNestedDefunInGradientTape(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def g(x):
x = v0 * x
return x
@function.defun
def f(x):
x = g(v0 * x)
return x
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = f(x)
dy = tape.gradient(y, v0)
self.assertEqual(75, y.numpy())
self.assertEqual(30, dy.numpy())
def testNestedDefunInGradientTapeDifferentVars(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
v1 = resource_variable_ops.ResourceVariable(3.0)
@function.defun
def g(x):
x = v1 * x
return x
@function.defun
def f(x):
x = g(v0 * x)
return x
x = constant_op.constant(3.0)
with backprop.GradientTape(persistent=True) as tape:
y = f(x)
dy_v0 = tape.gradient(y, v0)
dy_v1 = tape.gradient(y, v1)
self.assertEqual(45, y.numpy())
self.assertEqual(9, dy_v0.numpy())
self.assertEqual(15, dy_v1.numpy())
def testWhileInDefun(self):
with self.test_scope():
@def_function.function
def f(start):
c = lambda x: math_ops.less(x, 13.0)
b = lambda x: math_ops.add(x, 1.0)
return control_flow_ops.while_loop(c, b, [start])
y = f(constant_op.constant(3.0))
self.assertEqual(13.0, y.numpy())
def testAutoGraphWhileInDefun(self):
with self.test_scope():
@def_function.function
def f(start):
x = start
while x < 13.0:
x += 1.0
return x
y = f(constant_op.constant(3.0))
self.assertEqual(13.0, y.numpy())
def testCondInDefun(self):
with self.test_scope():
@def_function.function
def f(pred, value):
fn1 = lambda: math_ops.add(value, 1.0)
fn2 = lambda: math_ops.subtract(value, 1.0)
return control_flow_ops.cond(pred, fn1, fn2)
plus_one = f(constant_op.constant(True), constant_op.constant(10.0))
minus_one = f(constant_op.constant(False), constant_op.constant(10.0))
self.assertEqual(11.0, plus_one.numpy())
self.assertEqual(9.0, minus_one.numpy())
def testAutoGraphCondInDefun(self):
with self.test_scope():
@def_function.function
def f(pred, value):
if pred:
return value + 1.0
else:
return value - 1.0
plus_one = f(constant_op.constant(True), constant_op.constant(10.0))
minus_one = f(constant_op.constant(False), constant_op.constant(10.0))
self.assertEqual(11.0, plus_one.numpy())
self.assertEqual(9.0, minus_one.numpy())
def testScanInDefun(self):
with self.test_scope():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name='data')
v = constant_op.constant(2.0, name='v')
@def_function.function
def f(y):
# pylint: disable=unnecessary-lambda
return functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), y, initializer=v)
# pylint: enable=unnecessary-lambda
r = f(elems)
self.assertAllEqual([2., 4., 12., 48., 240., 1440.], self.evaluate(r))
def testFeedDeviceMemoryToOpExpectingHostMemory(self):
@function.defun
def f(dims, value):
return array_ops.fill(dims, value)
with self.test_scope():
x = constant_op.constant([4], dtype=dtypes.int64)
y = f(x, 3)
self.assertAllEqual([3, 3, 3, 3], y)
def testRequestNotToCompile(self):
with self.test_scope():
def f(x):
with ops.device('device:CPU:0'):
y = 2.0 * x
return x, y
wholly_compiled_f = def_function.function(f)
op_by_op_f = def_function.function(f, experimental_compile=False)
x = constant_op.constant([0.0, 2.0], name='data')
# When function is wholly compiled, all outputs will be on the
# device on which it is run.
r_x, r_y = wholly_compiled_f(x)
self.assertAllEqual([0.0, 2.0], r_x)
self.assertAllEqual([0.0, 4.0], r_y)
if context.executing_eagerly():
# backing_device is only available for eager tensors.
self.assertRegexpMatches(r_x.backing_device, self.device)
self.assertRegexpMatches(r_y.backing_device, self.device)
# When function is executed op-by-op, requested devices will be
# respected.
r_x, r_y = op_by_op_f(x)
self.assertAllEqual([0.0, 2.0], r_x)
self.assertAllEqual([0.0, 4.0], r_y)
if context.executing_eagerly():
# backing_device is only available for eager tensors.
self.assertRegexpMatches(r_x.backing_device, self.device)
self.assertRegexpMatches(r_y.backing_device, 'device:CPU:0')
class ExcessivePaddingTest(xla_test.XLATestCase):
"""Test that eager execution works with TPU flattened tensors.
Tensors that would normally be excessively padded when written
to TPU memory are reshaped to 1-D flat tensors.
This test case verifies that such tensors work with eager execution.
The flattening currently only happens on TPU, but tests should work
fine with all backends as flattening is transparent.
"""
def testFromConstant(self):
with self.test_scope():
# Create constant of shape [100, 2, 1]. This tensor would be
# excessively padded on TPU.
tensor = constant_op.constant(100 * [[[10.0], [2.0]]])
# Use reduce_sum since it requires correctly working with
# a particular dimension.
reduced = math_ops.reduce_sum(tensor, axis=1)
self.assertAllEqual(100 * [[12.0]], reduced)
def testFromOperation(self):
with self.test_scope():
tensor = array_ops.ones([3, 100, 2, 2])
reduced = math_ops.reduce_sum(tensor, axis=[0, 2, 3])
self.assertAllEqual(100 * [12.0], reduced)
def testAsFunctionInput(self):
with self.test_scope():
@function.defun
def f(x):
return math_ops.reduce_sum(x, axis=2)
tensor = constant_op.constant(100 * [[[10.0, 2.0]]])
reduced = f(tensor)
self.assertAllEqual(100 * [[12.0]], reduced)
def testAsFunctionOutput(self):
with self.test_scope():
@function.defun
def f(x):
return x * constant_op.constant(100 * [[[10.0, 2.0]]])
y = f(3)
reduced = math_ops.reduce_sum(y, axis=2)
self.assertAllEqual(100 * [[36.0]], reduced)
def multiple_tpus():
devices = context.context().devices()
return len([d for d in devices if 'device:TPU:' in d]) > 1
class MultiDeviceTest(xla_test.XLATestCase):
"""Test running TPU computation on more than one core."""
def testBasic(self):
if not multiple_tpus():
self.skipTest('MultiDeviceTest requires multiple TPU devices.')
# Compute 10 on TPU core 0
with ops.device('device:TPU:0'):
two = constant_op.constant(2)
five = constant_op.constant(5)
ten = two * five
self.assertAllEqual(10, ten)
# Compute 6 on TPU core 1
with ops.device('device:TPU:1'):
two = constant_op.constant(2)
three = constant_op.constant(3)
six = two * three
self.assertAllEqual(6, six)
# Copy 10 and 6 to CPU and sum them
self.assertAllEqual(16, ten + six)
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(log_device_placement=True))
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/eager_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
def NHWCToNCHW(input_tensor):
"""Convert the input from NHWC format to NCHW.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 3, 1, 2])
else:
return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
def NCHWToNHWC(input_tensor):
"""Convert the input from NCHW format to NHWC.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 2, 3, 1])
else:
return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs
"""
test_configs = ["NHWC", "NCHW"]
return test_configs
class PoolingTest(xla_test.XLATestCase):
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
data_format, expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, currently only co.MaxPool.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
"""
total_size = np.prod(input_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.array([f * 1.0 for f in range(1, total_size + 1)], dtype=np.float32)
x = x.reshape(input_sizes)
with self.session() as sess:
with self.test_scope():
inputs = array_ops.placeholder(dtypes.float32)
t = inputs
if data_format == "NCHW":
t = NHWCToNCHW(t)
ksize = NHWCToNCHW(ksize)
strides = NHWCToNCHW(strides)
t = pool_func(t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
t = NCHWToNHWC(t)
actual = sess.run(t, {inputs: x})
self.assertAllClose(expected, actual.flatten(), rtol=1e-5, atol=1e-6)
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,
data_format, expected)
def testMaxPoolValidPadding(self):
expected_output = [13.0, 14.0, 15.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testMaxPoolSamePadding(self):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingNonSquareWindow(self):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0])
def testMaxPoolValidPaddingUnevenStride(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0])
def testMaxPoolSamePaddingFilter4(self):
expected_output = [
21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,
61.0, 62.0, 63.0, 64.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingFilter8(self):
expected_output = [
145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 161.0, 162.0,
163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 177.0, 178.0, 179.0, 180.0,
181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0,
191.0, 192.0, 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0, 305.0, 306.0,
307.0, 308.0, 309.0, 310.0, 311.0, 312.0, 313.0, 314.0, 315.0, 316.0,
317.0, 318.0, 319.0, 320.0, 401.0, 402.0, 403.0, 404.0, 405.0, 406.0,
407.0, 408.0, 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0, 441.0, 442.0,
443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 465.0, 466.0, 467.0, 468.0,
469.0, 470.0, 471.0, 472.0, 481.0, 482.0, 483.0, 484.0, 485.0, 486.0,
487.0, 488.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
# Tests for DepthwiseMaxPooling on CPU only.
def testDepthwiseMaxPool1x1DepthWindow1(self):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0])
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0])
def testKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33])
def testKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11])
# Average pooling
def testAvgPoolValidPadding(self):
expected_output = [7, 8, 9]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testAvgPoolSamePadding(self):
expected_output = [7., 8., 9., 11.5, 12.5, 13.5]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
class PoolGradTest(xla_test.XLATestCase):
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
def _VerifyOneTest(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling gradient function.
Args:
pool_func: Forward pooling function
pool_grad_func: Pooling gradient function for pool_grad_func
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
pool_grad_grad_func: Second-order gradient function, if available.
"""
total_size = np.prod(input_sizes)
# TODO(b/73062247): MaxPoolGradGrad can confuse gradients when x is equally
# maximal at 16 bits. Switch to np.random.randn when resolved.
x = np.arange(1, total_size + 1, dtype=np.float32)
x *= (np.random.randint(2, size=total_size) * 2 - 1) # Flip signs randomly
# Verify some specifically interesting values...
x[np.random.choice(total_size)] = np.inf
x[np.random.choice(total_size)] = -np.inf
# TODO(b/74222344): Fix nan handling for max pool grad.
# x[np.random.choice(total_size)] = np.nan
x = x.reshape(input_sizes)
with self.session() as sess:
# Use the forward pool function to compute some corresponding outputs
# (needed for the CPU device, and we need the shape in both cases).
with ops.device(self.CPU_DEVICE):
inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
outputs = pool_func(
inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
output_vals = np.array(sess.run(outputs, {inputs: x}))
output_gradient_vals = np.arange(
1, output_vals.size + 1, dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)
output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)
# Use the Tensorflow CPU pooling gradient to compute the expected input
# gradients.
with ops.device(self.CPU_DEVICE):
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_vals.shape)
expected_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_input_gradient_vals = sess.run(
expected_input_gradients,
{inputs: x,
output_gradients: output_gradient_vals})
output_grad_gradients = array_ops.placeholder(
dtypes.float32, shape=expected_input_gradient_vals.shape)
if pool_grad_grad_func is not None:
expected_grad_gradients = pool_grad_grad_func(
inputs,
outputs,
output_grad_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_grad_gradients_vals = sess.run(expected_grad_gradients, {
inputs: x,
output_grad_gradients: output_grad_grad_vals
})
# Run the gradient op on the XLA device
with self.test_scope():
outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
xla_inputs = inputs
xla_outputs = outputs
xla_output_gradients = output_gradients
xla_output_grad_gradients = output_grad_gradients
xla_ksize = ksize
xla_strides = strides
if data_format == "NCHW":
xla_inputs = NHWCToNCHW(inputs)
xla_outputs = NHWCToNCHW(outputs)
xla_output_gradients = NHWCToNCHW(output_gradients)
xla_output_grad_gradients = NHWCToNCHW(output_grad_gradients)
xla_ksize = NHWCToNCHW(ksize)
xla_strides = NHWCToNCHW(strides)
actual_input_gradients = pool_grad_func(
xla_inputs,
xla_outputs,
xla_output_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_input_gradients = NCHWToNHWC(actual_input_gradients)
if pool_grad_grad_func is not None:
actual_grad_gradients = pool_grad_grad_func(
xla_inputs,
xla_outputs,
xla_output_grad_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_grad_gradients = NCHWToNHWC(actual_grad_gradients)
actual_input_gradients_vals = sess.run(actual_input_gradients, {
inputs: x,
outputs: output_vals,
output_gradients: output_gradient_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_input_gradient_vals,
actual_input_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_input_gradients_vals, inputs)
if pool_grad_grad_func is not None:
actual_grad_gradients_vals = sess.run(
actual_grad_gradients, {
inputs: x,
outputs: output_vals,
output_grad_gradients: output_grad_grad_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_grad_gradients_vals,
actual_grad_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_grad_gradients_vals, outputs)
def _VerifyValues(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling function.
Args:
pool_func: Pooling function to be called, e.g., tf.nn.max_pool2d
pool_grad_func: Corresponding pooling gradient function.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
pool_grad_grad_func: Second-order gradient function, if available.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=pool_grad_grad_func)
def _TestPooling(self, forward_op, backward_op, pool_grad_grad_func=None):
# VALID padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, non square window
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# VALID padding, uneven stride
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 4 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 8 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
def testMaxPool(self):
self._TestPooling(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
pool_grad_grad_func=gen_nn_ops.max_pool_grad_grad)
def testAvgPool(self):
# Wrapper around AvgPoolGrad that ignores extra arguments needed by
# MaxPoolGrad.
def AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding,
data_format):
del outputs # Unused by average-pooling gradients.
return gen_nn_ops.avg_pool_grad(
inputs.get_shape().as_list(),
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
self._TestPooling(nn_ops.avg_pool, AvgPoolGrad)
# The CPU implementation of AvgPoolGrad doesn't accept kernels smaller than
# the stride size, so we only run the following tests on MaxPoolGrad.
def testMaxPoolKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID")
def testMaxPoolKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/pooling_ops_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA matrix diag ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.compat import compat
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
# Test cases shared by MatrixDiagV2, MatrixDiagPartV2, and MatrixSetDiagV2.
# Copied from //third_party/tensorflow/python/kernel_tests/diag_op_test.py
def square_cases():
# pyformat: disable
mat = np.array([[[1, 2, 3, 4, 5],
[6, 7, 8, 9, 1],
[3, 4, 5, 6, 7],
[8, 9, 1, 2, 3],
[4, 5, 6, 7, 8]],
[[9, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 1],
[2, 3, 4, 5, 6]]])
tests = dict()
# tests[d_lower, d_upper] = (compact_diagonals, padded_diagnals)
tests[-1, -1] = (np.array([[6, 4, 1, 7],
[5, 2, 8, 5]]),
np.array([[[0, 0, 0, 0, 0],
[6, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 7, 0]],
[[0, 0, 0, 0, 0],
[5, 0, 0, 0, 0],
[0, 2, 0, 0, 0],
[0, 0, 8, 0, 0],
[0, 0, 0, 5, 0]]]))
tests[-4, -3] = (np.array([[[8, 5],
[4, 0]],
[[6, 3],
[2, 0]]]),
np.array([[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[8, 0, 0, 0, 0],
[4, 5, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[6, 0, 0, 0, 0],
[2, 3, 0, 0, 0]]]))
tests[-2, 1] = (np.array([[[2, 8, 6, 3, 0],
[1, 7, 5, 2, 8],
[6, 4, 1, 7, 0],
[3, 9, 6, 0, 0]],
[[1, 7, 4, 1, 0],
[9, 6, 3, 9, 6],
[5, 2, 8, 5, 0],
[1, 7, 4, 0, 0]]]),
np.array([[[1, 2, 0, 0, 0],
[6, 7, 8, 0, 0],
[3, 4, 5, 6, 0],
[0, 9, 1, 2, 3],
[0, 0, 6, 7, 8]],
[[9, 1, 0, 0, 0],
[5, 6, 7, 0, 0],
[1, 2, 3, 4, 0],
[0, 7, 8, 9, 1],
[0, 0, 4, 5, 6]]]))
tests[2, 4] = (np.array([[[5, 0, 0],
[4, 1, 0],
[3, 9, 7]],
[[4, 0, 0],
[3, 9, 0],
[2, 8, 5]]]),
np.array([[[0, 0, 3, 4, 5],
[0, 0, 0, 9, 1],
[0, 0, 0, 0, 7],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 2, 3, 4],
[0, 0, 0, 8, 9],
[0, 0, 0, 0, 5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]]))
# pyformat: enable
return (mat, tests)
def tall_cases():
# pyformat: disable
mat = np.array([[[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[9, 8, 7],
[6, 5, 4]],
[[3, 2, 1],
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[9, 8, 7]]])
tests = dict()
# tests[d_lower, d_upper] = (compact_diagonals, padded_diagnals)
tests[0, 0] = (np.array([[1, 5, 9],
[3, 2, 6]]),
np.array([[[1, 0, 0],
[0, 5, 0],
[0, 0, 9],
[0, 0, 0]],
[[3, 0, 0],
[0, 2, 0],
[0, 0, 6],
[0, 0, 0]]]))
tests[-4, -3] = (np.array([[[9, 5],
[6, 0]],
[[7, 8],
[9, 0]]]),
np.array([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[9, 0, 0],
[6, 5, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[7, 0, 0],
[9, 8, 0]]]))
tests[-2, -1] = (np.array([[[4, 8, 7],
[7, 8, 4]],
[[1, 5, 9],
[4, 8, 7]]]),
np.array([[[0, 0, 0],
[4, 0, 0],
[7, 8, 0],
[0, 8, 7],
[0, 0, 4]],
[[0, 0, 0],
[1, 0, 0],
[4, 5, 0],
[0, 8, 9],
[0, 0, 7]]]))
tests[-2, 1] = (np.array([[[2, 6, 0],
[1, 5, 9],
[4, 8, 7],
[7, 8, 4]],
[[2, 3, 0],
[3, 2, 6],
[1, 5, 9],
[4, 8, 7]]]),
np.array([[[1, 2, 0],
[4, 5, 6],
[7, 8, 9],
[0, 8, 7],
[0, 0, 4]],
[[3, 2, 0],
[1, 2, 3],
[4, 5, 6],
[0, 8, 9],
[0, 0, 7]]]))
tests[1, 2] = (np.array([[[3, 0],
[2, 6]],
[[1, 0],
[2, 3]]]),
np.array([[[0, 2, 3],
[0, 0, 6],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 2, 1],
[0, 0, 3],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]]))
# pyformat: enable
return (mat, tests)
def fat_cases():
# pyformat: disable
mat = np.array([[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 1, 2, 3]],
[[4, 5, 6, 7],
[8, 9, 1, 2],
[3, 4, 5, 6]]])
tests = dict()
# tests[d_lower, d_upper] = (compact_diagonals, padded_diagnals)
tests[0, 0] = (np.array([[1, 6, 2],
[4, 9, 5]]),
np.array([[[1, 0, 0, 0],
[0, 6, 0, 0],
[0, 0, 2, 0]],
[[4, 0, 0, 0],
[0, 9, 0, 0],
[0, 0, 5, 0]]]))
tests[2, 2] = (np.array([[3, 8],
[6, 2]]),
np.array([[[0, 0, 3, 0],
[0, 0, 0, 8],
[0, 0, 0, 0]],
[[0, 0, 6, 0],
[0, 0, 0, 2],
[0, 0, 0, 0]]]))
tests[-2, 0] = (np.array([[[1, 6, 2],
[5, 1, 0],
[9, 0, 0]],
[[4, 9, 5],
[8, 4, 0],
[3, 0, 0]]]),
np.array([[[1, 0, 0, 0],
[5, 6, 0, 0],
[9, 1, 2, 0]],
[[4, 0, 0, 0],
[8, 9, 0, 0],
[3, 4, 5, 0]]]))
tests[-1, 1] = (np.array([[[2, 7, 3],
[1, 6, 2],
[5, 1, 0]],
[[5, 1, 6],
[4, 9, 5],
[8, 4, 0]]]),
np.array([[[1, 2, 0, 0],
[5, 6, 7, 0],
[0, 1, 2, 3]],
[[4, 5, 0, 0],
[8, 9, 1, 0],
[0, 4, 5, 6]]]))
tests[0, 3] = (np.array([[[4, 0, 0],
[3, 8, 0],
[2, 7, 3],
[1, 6, 2]],
[[7, 0, 0],
[6, 2, 0],
[5, 1, 6],
[4, 9, 5]]]),
np.array([[[1, 2, 3, 4],
[0, 6, 7, 8],
[0, 0, 2, 3]],
[[4, 5, 6, 7],
[0, 9, 1, 2],
[0, 0, 5, 6]]]))
# pyformat: enable
return (mat, tests)
class MatrixDiagTest(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self,
params,
solution,
rtol=1e-3,
atol=1e-5):
"""Verifies that matrix_diag produces `solution` when fed `params`.
Args:
params: dictionary containing input parameters to matrix_diag.
solution: numpy array representing the expected output of matrix_diag.
rtol: relative tolerance for equality test.
atol: absolute tolerance for equality test.
"""
diagonal = params["diagonal"]
with self.session() as session:
for dtype in self.numeric_types - {np.int8, np.uint8}:
expected = solution.astype(dtype)
with self.test_scope():
params["diagonal"] = array_ops.placeholder(
dtype, diagonal.shape, name="diagonal")
output = array_ops.matrix_diag(**params)
result = session.run(output,
{params["diagonal"]: diagonal.astype(dtype)})
self.assertEqual(output.dtype, expected.dtype)
self.assertAllCloseAccordingToType(
expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)
# Generic tests applicable to both v1 and v2 ops.
# Originally from unary_ops_tests.py.
def testV1(self):
# pyformat: disable
vecs1 = np.array([[1, 2],
[3, 4]])
solution1 = np.array([[[1, 0], [0, 2]],
[[3, 0], [0, 4]]])
vecs2 = np.array([1, 2, 3, 4])
solution2 = np.array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
vecs3 = np.array([[[1, 2, 3],
[4, 5, 6]],
[[7, 8, 9], # pylint: disable=bad-whitespace
[10, 11, 12]]])
solution3 = np.array([[[[1, 0, 0],
[0, 2, 0],
[0, 0, 3]],
[[4, 0, 0],
[0, 5, 0],
[0, 0, 6]]],
[[[7, 0, 0],
[0, 8, 0],
[0, 0, 9]],
[[10, 0, 0],
[0, 11, 0],
[0, 0, 12]]]])
# pyformat: enable
self._assertOpOutputMatchesExpected({"diagonal": vecs1}, solution1)
self._assertOpOutputMatchesExpected({"diagonal": vecs2}, solution2)
self._assertOpOutputMatchesExpected({"diagonal": vecs3}, solution3)
# From here onwards are v2-only tests.
def testSquare(self):
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/ops/array_ops.py)
for _, tests in [square_cases()]:
for diag_index, (vecs, solution) in tests.items():
self._assertOpOutputMatchesExpected(
{
"diagonal": vecs[0],
"k": diag_index
}, solution[0])
def testSquareBatch(self):
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/ops/array_ops.py)
for _, tests in [square_cases()]:
for diag_index, (vecs, solution) in tests.items():
self._assertOpOutputMatchesExpected(
{
"diagonal": vecs,
"k": diag_index
}, solution)
def testRectangularBatch(self):
# LINT.IfChange
if not compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/ops/array_ops.py)
return
# Stores expected num_rows and num_cols (when the other is given).
# expected[(d_lower, d_upper)] = (expected_num_rows, expected_num_cols)
test_list = list()
# Square cases:
expected = {
(-1, -1): (5, 4),
(-4, -3): (5, 2),
(-2, 1): (5, 5),
(2, 4): (3, 5),
}
test_list.append((expected, square_cases()))
# Tall cases
expected = {
(0, 0): (3, 3),
(-4, -3): (5, 2),
(-2, -1): (4, 3),
(-2, 1): (3, 3),
(1, 2): (2, 3)
}
test_list.append((expected, tall_cases()))
# Fat cases
expected = {
(2, 2): (2, 4),
(-2, 0): (3, 3),
(-1, 1): (3, 3),
(0, 3): (3, 3)
}
test_list.append((expected, fat_cases()))
# Giving both num_rows and num_cols
for _, tests in [tall_cases(), fat_cases()]:
for diag_index, (vecs, solution) in tests.items():
self._assertOpOutputMatchesExpected(
{
"diagonal": vecs,
"k": diag_index,
"num_rows": solution.shape[-2],
"num_cols": solution.shape[-1]
}, solution)
# Giving just num_rows or num_cols.
for expected, (_, tests) in test_list:
for diag_index, (new_num_rows, new_num_cols) in expected.items():
vecs, solution = tests[diag_index]
solution_given_num_rows = solution.take(
indices=range(new_num_cols), axis=-1)
self._assertOpOutputMatchesExpected(
{
"diagonal": vecs,
"k": diag_index,
"num_rows": solution_given_num_rows.shape[-2]
}, solution_given_num_rows)
solution_given_num_cols = solution.take(
indices=range(new_num_rows), axis=-2)
self._assertOpOutputMatchesExpected(
{
"diagonal": vecs,
"k": diag_index,
"num_cols": solution_given_num_cols.shape[-1]
}, solution_given_num_cols)
def testPadding(self):
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/ops/array_ops.py)
for padding_value in [555, -11]:
for _, tests in [square_cases(), tall_cases(), fat_cases()]:
for diag_index, (vecs, solution) in tests.items():
mask = (solution == 0)
solution = solution + (mask * padding_value)
self._assertOpOutputMatchesExpected(
{
"diagonal": vecs,
"k": diag_index,
"num_rows": solution.shape[-2],
"num_cols": solution.shape[-1],
"padding_value": padding_value
}, solution)
class MatrixSetDiagTest(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self,
params,
solution,
rtol=1e-3,
atol=1e-5):
"""Verifies that matrix_set_diag produces `solution` when fed `params`.
Args:
params: dictionary containing input parameters to matrix_set_diag.
solution: numpy array representing the expected output of matrix_set_diag.
rtol: relative tolerance for equality test.
atol: absolute tolerance for equality test.
"""
input = params["input"] # pylint: disable=redefined-builtin
diagonal = params["diagonal"]
with self.session() as session:
for dtype in self.numeric_types - {np.int8, np.uint8}:
expected = solution.astype(dtype)
with self.test_scope():
params["input"] = array_ops.placeholder(
dtype, input.shape, name="input")
params["diagonal"] = array_ops.placeholder(
dtype, diagonal.shape, name="diagonal")
output = array_ops.matrix_set_diag(**params)
result = session.run(
output, {
params["input"]: input.astype(dtype),
params["diagonal"]: diagonal.astype(dtype)
})
self.assertEqual(output.dtype, expected.dtype)
self.assertAllCloseAccordingToType(
expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)
# Generic tests applicable to both v1 and v2 ops.
# Originally from binary_ops_tests.py.
def testV1(self):
test_cases = list()
# pyformat: disable
# pylint: disable=bad-whitespace
# Square cases.
input = np.array([[0, 1, 0], # pylint: disable=redefined-builtin
[1, 0, 1],
[1, 1, 1]])
diag = np.array([1, 2, 3])
solution = np.array([[1, 1, 0],
[1, 2, 1],
[1, 1, 3]])
test_cases.append(({"input": input, "diagonal": diag}, solution))
input = np.array([[[1, 0, 3],
[0, 2, 0],
[1, 0, 3]],
[[4, 0, 4],
[0, 5, 0],
[2, 0, 6]]])
diag = np.array([[-1, 0, -3],
[-4, -5, -6]])
solution = np.array([[[-1, 0, 3],
[ 0, 0, 0],
[ 1, 0, -3]],
[[-4, 0, 4],
[ 0, -5, 0],
[ 2, 0, -6]]])
test_cases.append(({"input": input, "diagonal": diag}, solution))
# Rectangular cases.
input = np.array([[0, 1, 0],
[1, 0, 1]])
diag = np.array([3, 4])
solution = np.array([[3, 1, 0],
[1, 4, 1]])
test_cases.append(({"input": input, "diagonal": diag}, solution))
input = np.array([[0, 1],
[1, 0],
[1, 1]])
diag = np.array([3, 4])
solution = np.array([[3, 1],
[1, 4],
[1, 1]])
test_cases.append(({"input": input, "diagonal": diag}, solution))
input = np.array([[[1, 0, 3],
[0, 2, 0]],
[[4, 0, 4],
[0, 5, 0]]])
diag = np.array([[-1, -2], [-4, -5]])
solution = np.array([[[-1, 0, 3],
[ 0, -2, 0]],
[[-4, 0, 4],
[ 0, -5, 0]]])
test_cases.append(({"input": input, "diagonal": diag}, solution))
# pylint: enable=bad-whitespace
# pyformat: enable
for test in test_cases:
self._assertOpOutputMatchesExpected(test[0], test[1])
# From here onwards are v2-only tests.
def testSingleMatrix(self):
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/ops/array_ops.py)
for _, tests in [square_cases(), tall_cases(), fat_cases()]:
for diag_index, (vecs, banded_mat) in tests.items():
mask = (banded_mat[0] == 0)
input_mat = np.random.randint(10, size=mask.shape)
solution = input_mat * mask + banded_mat[0]
self._assertOpOutputMatchesExpected(
{
"input": input_mat,
"diagonal": vecs[0],
"k": diag_index
}, solution)
def testBatch(self):
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/ops/array_ops.py)
for _, tests in [square_cases(), tall_cases(), fat_cases()]:
for diag_index, (vecs, banded_mat) in tests.items():
mask = (banded_mat == 0)
input_mat = np.random.randint(10, size=mask.shape)
solution = input_mat * mask + banded_mat
self._assertOpOutputMatchesExpected(
{
"input": input_mat,
"diagonal": vecs,
"k": diag_index
}, solution)
class MatrixDiagPartTest(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self,
params,
solution,
rtol=1e-3,
atol=1e-5):
"""Verifies that matrix_diag_part produces `solution` when fed `params`.
Args:
params: dictionary containing input parameters to matrix_diag_part.
solution: numpy array representing the expected output.
rtol: relative tolerance for equality test.
atol: absolute tolerance for equality test.
"""
input = params["input"] # pylint: disable=redefined-builtin
with self.session() as session:
for dtype in self.numeric_types - {np.int8, np.uint8}:
expected = solution.astype(dtype)
with self.test_scope():
params["input"] = array_ops.placeholder(
dtype, input.shape, name="input")
output = array_ops.matrix_diag_part(**params)
result = session.run(output, {
params["input"]: input.astype(dtype),
})
self.assertEqual(output.dtype, expected.dtype)
self.assertAllCloseAccordingToType(
expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)
# Generic tests applicable to both v1 and v2 ops.
# Originally from unary_ops_tests.py.
def testV1(self):
matrices = np.arange(3 * 2 * 4).reshape([3, 2, 4])
solution = np.array([[0, 5], [8, 13], [16, 21]])
self._assertOpOutputMatchesExpected({"input": matrices}, solution)
# From here onwards are v2-only tests.
def testSingleMatrix(self):
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/ops/array_ops.py)
for mat, tests in [square_cases(), tall_cases(), fat_cases()]:
for diag_index, (solution, _) in tests.items():
self._assertOpOutputMatchesExpected({
"input": mat[0],
"k": diag_index
}, solution[0])
def testBatch(self):
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/ops/array_ops.py)
for mat, tests in [square_cases(), tall_cases(), fat_cases()]:
for diag_index, (solution, _) in tests.items():
self._assertOpOutputMatchesExpected({
"input": mat,
"k": diag_index
}, solution)
def testPadding(self):
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/ops/array_ops.py)
for padding_value in [555, -11]:
for mat, tests in [square_cases(), tall_cases(), fat_cases()]:
for diag_index, (solution, _) in tests.items():
mask = (solution == 0)
solution = solution + (mask * padding_value)
self._assertOpOutputMatchesExpected(
{
"input": mat,
"k": diag_index,
"padding_value": padding_value
}, solution)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/matrix_diag_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ExtractImagePatches op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ExtractImagePatches(xla_test.XLATestCase):
"""Functional tests for ExtractImagePatches op."""
def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):
"""Tests input-output pairs for the ExtractImagePatches op.
Args:
image: Input tensor with shape: [batch, in_rows, in_cols, depth].
ksizes: Patch size specified as: [ksize_rows, ksize_cols].
strides: Output strides, specified as [stride_rows, stride_cols].
rates: Atrous rates, specified as [rate_rows, rate_cols].
padding: Padding type.
patches: Expected output.
"""
ksizes = [1] + ksizes + [1]
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.session():
image_placeholder = array_ops.placeholder(dtypes.float32)
with self.test_scope():
out_tensor = array_ops.extract_image_patches(
image_placeholder,
ksizes=ksizes,
strides=strides,
rates=rates,
padding=padding,
name="im2col")
feed_dict = {image_placeholder: image}
self.assertAllClose(patches, out_tensor.eval(feed_dict=feed_dict))
def testKsize1x1Stride1x1Rate1x1(self):
"""Verifies that for 1x1 kernel the output equals the input."""
# [2, 3, 4, 5]
image = np.reshape(range(120), [2, 3, 4, 5])
# [2, 3, 4, 5]
patches = np.reshape(range(120), [2, 3, 4, 5])
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[1, 1],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize1x1Stride2x3Rate1x1(self):
"""Test for 1x1 kernel and strides."""
# [2, 4, 5, 3]
image = np.reshape(range(120), [2, 4, 5, 3])
# [2, 2, 2, 3]
patches = image[:, ::2, ::3, :]
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[2, 3],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize2x2Stride1x1Rate1x1Valid(self):
"""Test for 2x2 kernel with VALID padding."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 1, 1, 4]
patches = [[[[1, 2, 3, 4]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
patches=patches)
def testKsize2x2Stride1x1Rate1x1Same(self):
"""Test for 2x2 kernel with SAME padding."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 2, 2, 4]
patches = [[[[1, 2, 3, 4], [2, 0, 4, 0]], [[3, 4, 0, 0], [4, 0, 0, 0]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
patches=patches)
def testKsize2x2Stride1x1Rate2x2Valid(self):
"""Test for 2x2 kernel with 2x2 dilation."""
# [1, 2, 2, 1]
image = np.arange(16).reshape(1, 4, 4, 1).astype(np.float32)
# [1, 2, 2, 4]
patches = [[[[0, 2, 8, 10], [1, 3, 9, 11]],
[[4, 6, 12, 14], [5, 7, 13, 15]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[2, 2],
padding="VALID",
patches=patches)
def testKsize2x2Stride1x1Rate1x1ValidDepth2(self):
"""Test for 2x2 kernel with VALID padding."""
# [1, 2, 2, 2]
image = [[[[1, 5], [2, 6]], [[3, 7], [4, 8]]]]
# [1, 1, 1, 8]
patches = [[[[1, 5, 2, 6, 3, 7, 4, 8]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
patches=patches)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/extract_image_patches_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for while loops in XLA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class WhileTest(xla_test.XLATestCase):
def testSingletonLoopHandrolled(self):
# Define a function for the loop body
@function.Defun(dtypes.int32)
def loop_body(step):
step_out = step + constant_op.constant(1, dtype=dtypes.int32)
return step_out
# Define a function for the loop condition
@function.Defun(dtypes.int32)
def loop_cond(step):
return step < 10
with self.session() as sess:
init_index = array_ops.placeholder(dtypes.int32, [])
with self.test_scope():
loop_outputs = xla.while_loop([init_index], loop_cond, loop_body)
result = sess.run(loop_outputs, {init_index: 0})
self.assertAllClose(result, [10], rtol=1e-3)
def testCountingLoopHandrolled(self):
# Define a function for the loop body
@function.Defun(dtypes.int32, dtypes.float32)
def loop_body(step, rsum):
step_out = step + constant_op.constant(1, dtype=dtypes.int32)
sum_out = rsum + constant_op.constant(1.5, dtype=dtypes.float32)
return step_out, sum_out
# Define a function for the loop condition
@function.Defun(dtypes.int32, dtypes.float32)
def loop_cond(step, rsum):
del rsum
return step < 10
with self.session() as sess:
init_index = array_ops.placeholder(dtypes.int32, [])
init_sum = array_ops.placeholder(dtypes.float32, [])
with self.test_scope():
loop_outputs = xla.while_loop([init_index, init_sum], loop_cond,
loop_body)
result = sess.run(loop_outputs, {init_index: 0, init_sum: 0.0})
self.assertAllClose(result, [10, 15.0], rtol=1e-3)
no_iters_result = sess.run(loop_outputs, {init_index: 10, init_sum: 0.0})
self.assertAllClose(no_iters_result, [10, 0.0], rtol=1e-3)
def testCountingLoopHandrolledC64(self):
# Define a function for the loop body
@function.Defun(dtypes.int32, dtypes.complex64)
def loop_body(step, rsum):
step_out = step + constant_op.constant(1, dtype=dtypes.int32)
sum_out = rsum + constant_op.constant(1.5 + 2j, dtype=dtypes.complex64)
return step_out, sum_out
# Define a function for the loop condition
@function.Defun(dtypes.int32, dtypes.complex64)
def loop_cond(step, rsum):
del rsum
return step < 10
with self.session() as sess:
init_index = array_ops.placeholder(dtypes.int32, [])
init_sum = array_ops.placeholder(dtypes.complex64, [])
with self.test_scope():
loop_outputs = xla.while_loop([init_index, init_sum], loop_cond,
loop_body)
result = sess.run(loop_outputs, {init_index: 0, init_sum: 0.0})
self.assertAllClose(result[1], np.complex64(15 + 20j), rtol=1e-3)
no_iters_result = sess.run(loop_outputs, {init_index: 10, init_sum: 0.0})
self.assertAllClose(no_iters_result[1], np.complex64(0), rtol=1e-3)
def testLoopWithConstantOutput(self):
# Define a function for the loop body
@function.Defun(dtypes.int32, dtypes.int32)
def loop_body(step, x):
del x
step_out = step + constant_op.constant(1, dtype=dtypes.int32)
return (step_out, 7)
# Define a function for the loop condition
@function.Defun(dtypes.int32, dtypes.int32)
def loop_cond(step, x):
del x
return step < 10
with self.session() as sess:
init_index = array_ops.placeholder(dtypes.int32, [])
with self.test_scope():
loop_outputs = xla.while_loop([init_index, 42], loop_cond, loop_body)
result = sess.run(loop_outputs, {init_index: 0})
self.assertAllClose(result, [10, 7], rtol=1e-3)
def _testMaxItersSimple(self):
if is_compile_on_demand():
self.skipTest("list_ops are not supported in cpu_ondemand")
with self.session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def create_while_loop():
iterations = array_ops.size(p, name="iterations")
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, 1.0),
maximum_iterations=iterations,
name="outer")
return array_ops.identity(r[1])
output = create_while_loop()
output = gradients_impl.gradients(output, v)[0]
result = sess.run(output, feed_dict={p: [0, 0, 0]})
print(result)
xla_context.Exit()
def testMaxItersSimple(self):
self.skipTest("Fails with v1 control flow")
# This fails with old control.
# self._testMaxItersSimple()
@test_util.enable_control_flow_v2
def testMaxItersSimpleV2(self):
self._testMaxItersSimple()
def _testNestedWhileLoopWithMaxItersFromOuterContext(self):
if is_compile_on_demand():
self.skipTest("list_ops are not supported in cpu_ondemand")
with self.session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def mid_body_builder(iterations):
def mid_body(i, x):
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, x),
maximum_iterations=iterations,
name="inner")
return (i + 1, gradients_impl.gradients(x + r[1], v)[0])
return mid_body
def outer_body(i, x):
iterations = array_ops.size(p, name="iterations")
return (i + 1, x + control_flow_ops.while_loop(
lambda *_: True,
mid_body_builder(iterations), (0, x),
maximum_iterations=iterations,
name="mid")[1])
def create_while_loop():
r = control_flow_ops.while_loop(
lambda *_: True,
outer_body, (0, 1.0),
maximum_iterations=5,
name="outer")
return array_ops.identity(r[1])
# p:placeholder
# j = 0
# i, x = 0, 1.
# while j++ < 5:
# i1, x1 = 0, x
# while i1++ < len(p):
# i2, x2 = 0, x1
# while i2++ < len(p):
# x2 = v * x2
# x1 = grad(x1 + x2, v)
# x = x1
# output = x
output = create_while_loop()
sess.run(output, feed_dict={p: [0, 0, 0]})
xla_context.Exit()
def testNestedWhileLoopWithMaxItersFromOuterContext(self):
self._testNestedWhileLoopWithMaxItersFromOuterContext()
@test_util.enable_control_flow_v2
def testNestedWhileLoopWithMaxItersFromOuterContextV2(self):
self._testNestedWhileLoopWithMaxItersFromOuterContext()
@test_util.enable_control_flow_v2
def testMap(self):
if is_compile_on_demand():
self.skipTest("list_ops are not supported in cpu_ondemand")
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, name="data")
r = map_fn.map_fn(lambda x: math_ops.multiply(math_ops.add(x, 3), 2),
elems)
self.assertAllEqual(r, np.array([(x + 3) * 2 for x in nums]))
xla_context.Exit()
def is_compile_on_demand():
return ("TF_XLA_FLAGS" in os.environ and
"tf_xla_compile_on_demand" in os.environ["TF_XLA_FLAGS"])
if __name__ == "__main__":
os.environ["TF_XLA_FLAGS"] = ("--tf_xla_min_cluster_size=2 " +
os.environ.get("TF_XLA_FLAGS", ""))
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/while_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ArgMin and ArgMax Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ArgMinMaxTest(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self, op, axis, output_type, op_input,
expected):
"""Verifies that 'op' produces 'expected' when fed input 'op_input' .
Args:
op: argmin or argmax operator to test.
axis: integer axis to reduce across.
output_type: numpy datatype of the output to produce.
op_input: numpy input array to use as input to 'op'.
expected: numpy array representing the expected output of 'op'.
"""
with self.session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(op_input.dtype), op_input.shape, name="a")
output = op(pinp, axis=axis, output_type=output_type)
result = session.run(output, {pinp: op_input})
self.assertAllEqual(result, expected)
def testArgMinMax(self):
# Complex numbers do not support argmin/argmax.
minmax_types = self.all_types & {np.int32, np.int64}
for dtype in self.int_types | self.float_types:
# output_type is a numpy data type that is used to specify the desired
# output type of the op as well as to convert the Python number to the
# array scalar of the type.
for output_type in minmax_types:
self._assertOpOutputMatchesExpected(
math_ops.argmax,
axis=0,
output_type=output_type,
op_input=np.array([1, 10, 27, 3, 3, 4], dtype=dtype),
expected=output_type(2))
self._assertOpOutputMatchesExpected(
math_ops.argmax,
axis=0,
output_type=output_type,
op_input=np.array([[4, 1, 7], [3, 2, 4]], dtype=dtype),
expected=np.array([0, 1, 0], dtype=output_type))
self._assertOpOutputMatchesExpected(
math_ops.argmax,
axis=1,
output_type=output_type,
op_input=np.array([[4, 1], [3, 2]], dtype=dtype),
expected=np.array([0, 0], dtype=output_type))
self._assertOpOutputMatchesExpected(
math_ops.argmin,
axis=0,
output_type=output_type,
op_input=np.array([3, 10, 27, 3, 2, 4], dtype=dtype),
expected=output_type(4))
self._assertOpOutputMatchesExpected(
math_ops.argmin,
axis=0,
output_type=output_type,
op_input=np.array([[4, 1, 7], [3, 2, 4]], dtype=dtype),
expected=np.array([1, 0, 1], dtype=output_type))
self._assertOpOutputMatchesExpected(
math_ops.argmin,
axis=1,
output_type=output_type,
op_input=np.array([[4, 1], [3, 2]], dtype=dtype),
expected=np.array([1, 1], dtype=output_type))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tests/argminmax_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental library that exposes XLA operations directly in TensorFlow.
It is sometimes useful to be able to build HLO programs directly from
TensorFlow. This file provides Tensorflow operators that mirror the semantics of
HLO operators as closely as possible.
Note: There is no promise of backward or forward compatibility for operators
defined in this module. This is primarily because the underlying HLO operators
do not promise backward or forward compatibility.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tf2xla.ops import gen_xla_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# TODO(phawkins): provide wrappers for all XLA operators. Currently the missing
# ops include:
# infeed/outfeed (available via tf.contrib.tpu)
# collectives, e.g., cross-replica-sum (available via tf.contrib.tpu)
# conditional
# gather/scatter
# collapse
# This file reuses builtin names (following XLA's names, so we can call things
# like xla.max), so we capture the builtin versions here.
# pylint: disable=redefined-builtin
_max = max
_min = min
_slice = slice # pylint: disable=invalid-name
constant = constant_op.constant
# Unary operators.
# For most arithmetic operators there is a TensorFlow operator
# that exactly corresponds to each XLA operator. Rather than defining
# XLA-specific variants, we reuse the corresponding TensorFlow operator.
# TODO(phawkins): It would be even better to have TensorFlow operators that 1:1
# wrap every HLO operator, because that would allow us to be confident that the
# semantics match.
def _unary_op(fn):
"""Wrapper that restricts `fn` to have the correct signature."""
def unary_op_wrapper(x, name=None):
return fn(x, name=name)
return unary_op_wrapper
abs = _unary_op(math_ops.abs)
# TODO(phawkins): implement clz.
conj = _unary_op(math_ops.conj)
cos = _unary_op(math_ops.cos)
ceil = _unary_op(math_ops.ceil)
digamma = _unary_op(math_ops.digamma)
erf = _unary_op(math_ops.erf)
erfc = _unary_op(math_ops.erfc)
# TODO(phawkins): implement erfinv
exp = _unary_op(math_ops.exp)
expm1 = _unary_op(math_ops.expm1)
floor = _unary_op(math_ops.floor)
imag = _unary_op(math_ops.imag)
is_finite = _unary_op(math_ops.is_finite)
lgamma = _unary_op(math_ops.lgamma)
log = _unary_op(math_ops.log)
log1p = _unary_op(math_ops.log1p)
logical_not = _unary_op(math_ops.logical_not)
neg = _unary_op(math_ops.neg)
real = _unary_op(math_ops.real)
# TODO(phawkins): unlike xla::Round, this rounds to even instead of zero for
# numbers halfway between two integers.
round = _unary_op(math_ops.round)
sin = _unary_op(math_ops.sin)
sign = _unary_op(math_ops.sign)
tanh = _unary_op(math_ops.tanh)
# Binary operators
# The main difference between TensorFlow and XLA binary ops is the broadcasting
# semantics. TensorFlow uses Numpy-style broadcasting semantics, whereas XLA
# requires an explicit specification of which dimensions to broadcast if the
# arguments have different ranks.
def _broadcasting_binary_op(fn):
"""Wraps a binary Tensorflow operator and performs XLA-style broadcasting."""
def broadcasting_binary_op_wrapper(x, y, broadcast_dims=None, name=None):
"""Inner wrapper function."""
broadcast_dims = broadcast_dims or []
broadcast_dims = ops.convert_to_tensor(broadcast_dims, dtypes.int64)
# Rather than relying on having static shape information in the TensorFlow
# graph, we use an XlaBroadcastHelper op that can compute the correct shapes
# at JIT compilation time.
x, y = gen_xla_ops.xla_broadcast_helper(x, y, broadcast_dims)
return fn(x, y, name=name)
return broadcasting_binary_op_wrapper
# Map from TF signed types to TF unsigned types.
_SIGNED_TO_UNSIGNED_TABLE = {
dtypes.int8: dtypes.uint8,
dtypes.int16: dtypes.uint16,
dtypes.int32: dtypes.uint32,
dtypes.int64: dtypes.uint64,
}
# Map from TF unsigned types to TF signed types.
_UNSIGNED_TO_SIGNED_TABLE = {
dtypes.uint8: dtypes.int8,
dtypes.uint16: dtypes.int16,
dtypes.uint32: dtypes.int32,
dtypes.uint64: dtypes.int64,
}
def _shift_right_logical_helper(x, y, name=None):
"""Performs an integer right logical shift irrespective of input type."""
assert y.dtype == x.dtype
dtype = x.dtype
signed = dtype in _SIGNED_TO_UNSIGNED_TABLE
if signed:
unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[dtype]
x = math_ops.cast(x, unsigned_dtype)
y = math_ops.cast(y, unsigned_dtype)
output = bitwise_ops.right_shift(x, y, name=name)
if signed:
output = math_ops.cast(output, dtype)
return output
def _shift_right_arithmetic_helper(x, y, name=None):
"""Performs an integer right arithmetic shift irrespective of input type."""
assert y.dtype == x.dtype
dtype = x.dtype
unsigned = dtype in _UNSIGNED_TO_SIGNED_TABLE
if unsigned:
signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[dtype]
x = math_ops.cast(x, signed_dtype)
y = math_ops.cast(y, signed_dtype)
output = bitwise_ops.right_shift(x, y, name=name)
if unsigned:
output = math_ops.cast(output, dtype)
return output
add = _broadcasting_binary_op(math_ops.add)
sub = _broadcasting_binary_op(math_ops.sub)
mul = _broadcasting_binary_op(math_ops.mul)
div = _broadcasting_binary_op(math_ops.div)
rem = _broadcasting_binary_op(gen_math_ops.mod)
max = _broadcasting_binary_op(math_ops.maximum)
min = _broadcasting_binary_op(math_ops.minimum)
atan2 = _broadcasting_binary_op(math_ops.atan2)
complex = _broadcasting_binary_op(math_ops.complex)
logical_and = _broadcasting_binary_op(math_ops.logical_and)
logical_or = _broadcasting_binary_op(math_ops.logical_or)
logical_xor = _broadcasting_binary_op(math_ops.logical_xor)
eq = _broadcasting_binary_op(math_ops.equal)
ne = _broadcasting_binary_op(math_ops.not_equal)
ge = _broadcasting_binary_op(math_ops.greater_equal)
gt = _broadcasting_binary_op(math_ops.greater)
le = _broadcasting_binary_op(math_ops.less_equal)
lt = _broadcasting_binary_op(math_ops.less)
pow = _broadcasting_binary_op(math_ops.pow)
shift_left = _broadcasting_binary_op(bitwise_ops.left_shift)
shift_right_logical = _broadcasting_binary_op(_shift_right_logical_helper)
shift_right_arithmetic = _broadcasting_binary_op(_shift_right_arithmetic_helper)
def _binary_op(fn):
"""Wrapper that restricts `fn` to have the correct signature."""
def binary_op_wrapper(x, y, name=None):
return fn(x, y, name=name)
return binary_op_wrapper
transpose = _binary_op(array_ops.transpose)
rev = _binary_op(array_ops.reverse)
bitcast_convert_type = array_ops.bitcast
def broadcast(x, dims, name=None):
x = ops.convert_to_tensor(x)
shape = array_ops.concat([constant_op.constant(dims),
array_ops.shape(x)],
axis=0)
return array_ops.broadcast_to(x, shape, name=name)
def clamp(a, x, b, name=None):
return min(max(a, x, name=name), b, name=name)
concatenate = array_ops.concat
def conv(lhs,
rhs,
window_strides,
padding,
lhs_dilation,
rhs_dilation,
dimension_numbers,
feature_group_count=1,
precision_config=None,
name=None):
"""Wraps the XLA ConvGeneralDilated operator.
ConvGeneralDilated is the most general form of XLA convolution and is
documented at
https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
Args:
lhs: the input tensor
rhs: the kernel tensor
window_strides: the inter-window strides
padding: the padding to apply at the start and end of each input dimensions
lhs_dilation: dilation to apply between input elements
rhs_dilation: dilation to apply between kernel elements
dimension_numbers: a `ConvolutionDimensionNumbers` proto.
feature_group_count: number of feature groups for grouped convolution.
precision_config: a `xla.PrecisionConfig` proto.
name: an optional name for the operator
Returns:
A tensor representing the output of the convolution.
"""
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_conv(
lhs,
rhs,
window_strides=window_strides,
padding=padding,
lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
feature_group_count=feature_group_count,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
name=name)
convert_element_type = math_ops.cast
def dot(lhs, rhs, name=None):
return math_ops.tensordot(lhs, rhs, axes=1, name=name)
def dot_general(lhs, rhs, dimension_numbers, precision_config=None, name=None):
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_dot(
lhs,
rhs,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
name=name)
def self_adjoint_eig(a, lower, max_iter, epsilon):
return gen_xla_ops.xla_self_adjoint_eig(a, lower, max_iter, epsilon)
def svd(a, max_iter, epsilon, precision_config=None):
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_svd(a, max_iter, epsilon, precision_config_proto)
dynamic_slice = gen_xla_ops.xla_dynamic_slice
dynamic_update_slice = gen_xla_ops.xla_dynamic_update_slice
einsum = gen_xla_ops.xla_einsum
# TODO(phawkins): generalize tf.pad to support interior padding, and then remove
# the XLA-specific pad operator.
pad = gen_xla_ops.xla_pad
def random_normal(mu, sigma, dims, name=None):
mu = ops.convert_to_tensor(mu)
return random_ops.random_normal(
dims, mean=mu, stddev=sigma, dtype=mu.dtype, name=name)
def random_uniform(minval, maxval, dims, name=None):
minval = ops.convert_to_tensor(minval)
return random_ops.random_uniform(
dims, minval, maxval, dtype=minval.dtype, name=name)
recv = gen_xla_ops.xla_recv
reduce = gen_xla_ops.xla_reduce
def reduce_window(operand,
init,
reducer,
window_dimensions,
window_strides=None,
base_dilations=None,
window_dilations=None,
padding=None,
name=None):
"""Wraps the XLA ReduceWindow operator.
ReduceWindow is documented at
https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow .
Args:
operand: the input tensor
init: a scalar tensor representing the initial value for the reduction
reducer: a reduction function that combines a pair of scalars.
window_dimensions: shape of the window, as a list of integers
window_strides: inter-window strides, as a list of integers. Optional; if
omitted, defaults to strides of 1.
padding: padding to apply to 'operand'. List of (low, high) pairs of
integers that specify the padding to apply before and after each
dimension. Optional; if omitted, defaults to no padding.
name: the operator name, or None.
Returns:
A tensor that represents the output of the reduce_window operator.
"""
window_strides = window_strides or [1] * len(window_dimensions)
base_dilations = base_dilations or [1] * len(window_dimensions)
window_dilations = window_dilations or [1] * len(window_dimensions)
padding = padding or [(0, 0)] * len(window_dimensions)
return gen_xla_ops.xla_reduce_window(
input=operand,
init_value=init,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=base_dilations,
window_dilations=window_dilations,
padding=padding,
computation=reducer,
name=name)
replica_id = gen_xla_ops.xla_replica_id
def reshape(x, new_sizes, dimensions=None, name=None):
if dimensions is not None:
x = array_ops.transpose(x, dimensions)
x = array_ops.reshape(x, new_sizes, name=name)
return x
def select(condition, x, y, name=None):
return array_ops.where(condition, x, y, name)
select_and_scatter = gen_xla_ops.xla_select_and_scatter
send = gen_xla_ops.xla_send
def slice(x, start_dims, limit_dims, strides):
spec = [
_slice(start, limit, stride)
for (start, limit, stride) in zip(start_dims, limit_dims, strides)
]
return x[tuple(spec)]
sort = gen_xla_ops.xla_sort
key_value_sort = gen_xla_ops.xla_key_value_sort
while_loop = gen_xla_ops.xla_while
dequantize = gen_xla_ops.xla_dequantize
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/tf2xla/python/xla.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lit runner configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import platform
import lit.formats
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
# Lint for undefined variables is disabled as config is not defined inside this
# file, instead config is injected by way of evaluating runlit.cfg.py from
# runlit.site.cfg.py which in turn is evaluated by lit.py. The structure is
# common for lit tests and intended to only persist temporarily (b/136126535).
# pylint: disable=undefined-variable
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = 'MLIR ' + os.path.basename(config.mlir_test_dir)
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.cc', '.hlo', '.hlotxt', '.mlir', '.pbtxt', '.py']
# test_source_root: The root path where tests are located.
config.test_source_root = config.mlir_test_dir
# test_exec_root: The root path where tests should be run.
config.test_exec_root = os.environ['RUNFILES_DIR']
if platform.system() == 'Windows':
tool_patterns = [
ToolSubst('FileCheck.exe', unresolved='fatal'),
# Handle these specially as they are strings searched for during testing.
ToolSubst('count.exe', unresolved='fatal'),
ToolSubst('not.exe', unresolved='fatal')]
llvm_config.config.substitutions.append(
('%python', '"%s"' % (sys.executable)))
llvm_config.add_tool_substitutions(
tool_patterns, [llvm_config.config.llvm_tools_dir])
else:
llvm_config.use_default_substitutions()
# Tweak the PATH to include the tools dir.
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
tool_dirs = config.mlir_tf_tools_dirs + [
config.mlir_tools_dir, config.llvm_tools_dir
]
tool_names = [
'mlir-opt', 'mlir-translate', 'tf-opt', 'tf_tfl_translate',
'flatbuffer_to_string', 'flatbuffer_translate', 'tf-mlir-translate',
'mlir-tflite-runner', 'hlo_to_llvm_ir'
]
tools = [ToolSubst(s, unresolved='ignore') for s in tool_names]
llvm_config.add_tool_substitutions(tools, tool_dirs)
# pylint: enable=undefined-variable
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/runlit.cfg.py
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lit runner site configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import platform
import lit.llvm
# Handle the test srcdir for platforms. On windows, things are weird with bazel.
if platform.system == 'Windows':
srcdir = os.environ['TEST_SRCDIR']
real_test_srcdir = srcdir[:srcdir.find('tensorflow/compiler/mlir')]
external_srcdir = os.path.join(real_test_srcdir, 'external')
else:
real_test_srcdir = os.environ['TEST_SRCDIR']
external_srcdir = real_test_srcdir
# Lint for undefined variables is disabled as config is not defined inside this
# file, instead config is injected by lit.py. The structure is common for lit
# tests and intended to only persist temporarily (b/136126535).
# pylint: disable=undefined-variable
config.llvm_tools_dir = os.path.join(external_srcdir, 'llvm-project', 'llvm')
config.mlir_obj_root = os.path.join(real_test_srcdir)
config.mlir_tools_dir = os.path.join(external_srcdir, 'llvm-project', 'mlir')
# TODO(jpienaar): Replace with suffices in build rule.
config.suffixes = ['.td', '.mlir', '.pbtxt']
mlir_tf_tools_dirs = [
'tensorflow/compiler/mlir',
'tensorflow/compiler/mlir/lite',
'tensorflow/compiler/mlir/tensorflow',
'tensorflow/compiler/mlir/xla',
'tensorflow/compiler/xla/service/gpu/tests',
]
config.mlir_tf_tools_dirs = [
os.path.join(real_test_srcdir, os.environ['TEST_WORKSPACE'], s)
for s in mlir_tf_tools_dirs
]
test_dir = os.environ['TEST_TARGET']
test_dir = test_dir.strip('/').rsplit(':', 1)[0]
config.mlir_test_dir = os.path.join(real_test_srcdir,
os.environ['TEST_WORKSPACE'], test_dir)
if platform.system == 'Windows':
# Configure this to work with msys2, TF's preferred windows bash.
config.lit_tools_dir = '/usr/bin'
lit.llvm.initialize(lit_config, config)
# Let the main config do the real work.
lit_config.load_config(
config,
os.path.join(
os.path.join(real_test_srcdir, os.environ['TEST_WORKSPACE'],
'tensorflow/compiler/mlir/runlit.cfg.py')))
# pylint: enable=undefined-variable
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/runlit.site.cfg.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test file to display the error message and verify it with FileCheck."""
# RUN: %p/saved_model_error | FileCheck %s
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from absl import app
import tensorflow.compat.v2 as tf
if hasattr(tf, 'enable_v2_behavior'):
tf.enable_v2_behavior()
class TestModule(tf.Module):
"""The test model has unsupported op."""
@tf.function(input_signature=[tf.TensorSpec(shape=[3, 3], dtype=tf.float32)])
def model(self, x):
y = tf.math.betainc(x, 0.5, 1.0) # Not supported
return y + y
class TestGraphDebugInfo(object):
"""Test stack trace can be displayed."""
def testSavedModelDebugInfo(self):
"""Save a saved model with unsupported ops, and then load and convert it."""
# saved the model
test_model = TestModule()
saved_model_path = '/tmp/test.saved_model'
save_options = tf.saved_model.SaveOptions(save_debug_info=True)
tf.saved_model.save(test_model, saved_model_path, options=save_options)
# load the model and convert
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path)
converter.experimental_new_converter = True
converter.convert()
# pylint: disable=line-too-long
# CHECK-LABEL: testSavedModelDebugInfo
# CHECK: error: 'tf.Betainc' op is neither a custom op nor a flex op
# CHECK: attrs=attr_protos, op_def=op_def)
# CHECK: ^
# CHECK: {{.*tensorflow/python/ops/gen_math_ops.py:[0-9]+:[0-9]+: note: called from}}
# CHECK: "Betainc", a=a, b=b, x=x, name=name)
# CHECK: ^
# CHECK: {{.*tensorflow/compiler/mlir/lite/tests/debuginfo/saved_model_error.py:[0-9]+:[0-9]+: note: called from}}
# CHECK: y = tf.math.betainc(x, 0.5, 1.0) # Not supported
# CHECK: ^
# CHECK: <unknown>:0: error: failed while converting: 'main'
# pylint: enable=line-too-long
def main(argv):
"""test driver method writes the error message to stdout."""
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
try:
TestGraphDebugInfo().testSavedModelDebugInfo()
except Exception as e: # pylint: disable=broad-except
sys.stdout.write('testSavedModelDebugInfo')
sys.stdout.write(str(e))
if __name__ == '__main__':
app.run(main)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/lite/tests/debuginfo/saved_model_error.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test file to display the error message and verify it with FileCheck."""
# RUN: %p/concrete_function_error | FileCheck %s
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from absl import app
import tensorflow.compat.v2 as tf
if hasattr(tf, 'enable_v2_behavior'):
tf.enable_v2_behavior()
class TestGraphDebugInfo(object):
"""Test stack trace can be displayed."""
def testConcreteFunctionDebugInfo(self):
"""Create a concrete func with unsupported ops, and convert it."""
@tf.function(
input_signature=[tf.TensorSpec(shape=[3, 3], dtype=tf.float32)])
def model(x):
y = tf.math.betainc(x, 0.5, 1.0) # Not supported
return y + y
func = model.get_concrete_function()
converter = tf.lite.TFLiteConverter.from_concrete_functions([func])
converter.experimental_new_converter = True
converter.convert()
# pylint: disable=line-too-long
# CHECK-LABEL: testConcreteFunctionDebugInfo
# CHECK: error: 'tf.Betainc' op is neither a custom op nor a flex op
# CHECK: attrs=attr_protos, op_def=op_def)
# CHECK: ^
# CHECK: {{.*tensorflow/python/ops/gen_math_ops.py:[0-9]+:[0-9]+: note: called from}}
# CHECK: "Betainc", a=a, b=b, x=x, name=name)
# CHECK: ^
# CHECK: {{.*tensorflow/compiler/mlir/lite/tests/debuginfo/concrete_function_error.py:[0-9]+:[0-9]+: note: called from}}
# CHECK: y = tf.math.betainc(x, 0.5, 1.0) # Not supported
# CHECK: ^
# CHECK: <unknown>:0: error: failed while converting: 'main'
# pylint: enable=line-too-long
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
try:
TestGraphDebugInfo().testConcreteFunctionDebugInfo()
except Exception as e: # pylint: disable=broad-except
sys.stdout.write('testConcreteFunctionDebugInfo')
sys.stdout.write(str(e))
if __name__ == '__main__':
app.run(main)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/lite/tests/debuginfo/concrete_function_error.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/structured_output | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
# The fNNNN name prefixes in this file are such that the sorted order of the
# functions in the resulting MLIR output match the order in the source file,
# allowing us to conveniently co-locate the CHECK's with the code they are
# checking.
#
# Note: CHECK-DAG doesn't work with CHECK-SAME/CHECK-NEXT.
# Check index paths for results.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = []})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0000_single_return"]
@tf.function(input_signature=[])
def f0000_single_return(self):
return tf.constant(1.0, shape=[1])
# Check index paths for results with multiple return values.
# Note that semantically in Python, multiple return values are equivalent
# to returning a tuple/list.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0001_multiple_results_no_punctuation"]
@tf.function(input_signature=[])
def f0001_multiple_results_no_punctuation(self):
return tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])
# Check index paths for results written explicitly with parentheses.
# This is semantically equivalent to the earlier test without parentheses,
# but this test serves as documentation of this behavior for the purposes
# of tf_saved_model users.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0002_multiple_results_parentheses"]
@tf.function(input_signature=[])
def f0002_multiple_results_parentheses(self):
return (tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2]))
# Check index paths for results written explicitly with brackets.
# This is semantically equivalent to the earlier test without parentheses,
# but this test serves as documentation of this behavior for the purposes
# of tf_saved_model users.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0003_multiple_results_brackets"]
@tf.function(input_signature=[])
def f0003_multiple_results_brackets(self):
return [tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])]
# Check index paths for lists.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0, 0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [0, 1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0004_list_2_elements"]
@tf.function(input_signature=[])
def f0004_list_2_elements(self):
return [[tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])]]
# Check index paths for dicts.
# Keys are linearized in sorted order, matching `tf.nest.flatten`.
# More thorough testing of this is in structured_input.py. The underlying code
# path for linearization is shared, so no need to replicate that testing here.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = ["y"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0005_dict_2_keys"]
@tf.function(input_signature=[])
def f0005_dict_2_keys(self):
return {
'x': tf.constant(1.0, shape=[1]),
'y': tf.constant(1.0, shape=[2]),
}
# Check index paths for outputs are correctly handled in the presence of
# multiple return statements.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<f32> {tf_saved_model.index_path = [0]}
# CHECK-SAME: ) -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0006_multiple_return_statements"]
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def f0006_multiple_return_statements(self, x):
if x > 3.:
return {'x': tf.constant(1.0, shape=[1])}
else:
return {'x': tf.constant(1.0, shape=[1])}
if __name__ == '__main__':
common.do_test(TestModule)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/structured_output.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/dag_object_graph | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class Child(tf.Module):
def __init__(self):
super(Child, self).__init__()
self.my_variable = tf.Variable(3.)
# Creates a dag object graph.
# There is only one instance of `Child`, but it is reachable via two names.
# Thus, self.my_variable is reachable via two paths.
class TestModule(tf.Module):
def __init__(self):
super(TestModule, self).__init__()
self.child1 = Child()
self.child2 = self.child1
# CHECK: tf_saved_model.global_tensor
# CHECK-SAME: tf_saved_model.exported_names = ["child1.my_variable", "child2.my_variable"]
if __name__ == '__main__':
common.do_test(TestModule)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/dag_object_graph.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/duplicate_method_names_v1 | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1
# Tests different SignatureDef's with identical method_name string
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: {{.*}})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key"]
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: {{.*}})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key2"]
def Test():
x = tf.constant(1.0, shape=(3, 3))
y = tf.constant(1.0, shape=(3, 3))
s = tf.transpose(x)
t = tf.transpose(y)
tensor_info_s = tf.compat.v1.saved_model.utils.build_tensor_info(s)
tensor_info_t = tf.compat.v1.saved_model.utils.build_tensor_info(t)
signature_def = tf.saved_model.signature_def_utils.build_signature_def(
inputs=None, outputs={'s': tensor_info_s}, method_name='some_function')
signature_def2 = tf.saved_model.signature_def_utils.build_signature_def(
inputs=None, outputs={'t': tensor_info_t}, method_name='some_function')
# Create two signatures that share the same variable.
return {'key': signature_def, 'key2': signature_def2}
if __name__ == '__main__':
common_v1.set_tf_options()
common_v1.do_test(Test())
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/duplicate_method_names_v1.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/cyclic_object_graph | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class ReferencesParent(tf.Module):
def __init__(self, parent):
super(ReferencesParent, self).__init__()
self.parent = parent
# CHECK: tf_saved_model.global_tensor
# CHECK-SAME: tf_saved_model.exported_names = ["child.my_variable"]
self.my_variable = tf.Variable(3.)
# Creates a cyclic object graph.
class TestModule(tf.Module):
def __init__(self):
super(TestModule, self).__init__()
self.child = ReferencesParent(self)
if __name__ == '__main__':
common.do_test(TestModule)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/cyclic_object_graph.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/structured_input | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
# The fNNNN name prefixes in this file are such that the sorted order of the
# functions in the resulting MLIR output match the order in the source file,
# allowing us to conveniently co-locate the CHECK's with the code they are
# checking.
#
# Note: CHECK-DAG doesn't work with CHECK-SAME/CHECK-NEXT.
# Check index paths for arguments.
# The outer layer of the index path indexes into the arguments.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: %arg1: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0000_function_arity"]
@tf.function(input_signature=[
tf.TensorSpec([1], tf.float32),
tf.TensorSpec([2], tf.float32)
])
def f0000_function_arity(self, x, y):
return
# Check index paths for lists.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<f32> {tf_saved_model.index_path = [0, 0]},
# CHECK-SAME: %arg1: tensor<f32> {tf_saved_model.index_path = [0, 1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0001_list_2_elements"]
@tf.function(input_signature=[[
tf.TensorSpec([], tf.float32),
tf.TensorSpec([], tf.float32),
]])
def f0001_list_2_elements(self, l):
return
# Check index paths for dicts.
# Keys are linearized in sorted order, matching `tf.nest.flatten`.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<1xf32> {tf_saved_model.index_path = [0, "x"]},
# CHECK-SAME: %arg1: tensor<2xf32> {tf_saved_model.index_path = [0, "y"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0002_dict_2_keys"]
@tf.function(input_signature=[{
'x': tf.TensorSpec([1], tf.float32),
'y': tf.TensorSpec([2], tf.float32),
}])
def f0002_dict_2_keys(self, d):
return
# Check index paths for dicts, where the keys are not in sorted order.
# The index path should be insensitive to the key order.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<1xf32> {tf_saved_model.index_path = [0, "x"]},
# CHECK-SAME: %arg1: tensor<2xf32> {tf_saved_model.index_path = [0, "y"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0003_dict_2_keys_out_of_order"]
@tf.function(input_signature=[{
'y': tf.TensorSpec([2], tf.float32),
'x': tf.TensorSpec([1], tf.float32),
}])
def f0003_dict_2_keys_out_of_order(self, d):
return
# Slightly stronger stress test of multiple dict keys.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<1xf32> {tf_saved_model.index_path = [0, "a"]},
# CHECK-SAME: %arg1: tensor<2xf32> {tf_saved_model.index_path = [0, "b"]},
# CHECK-SAME: %arg2: tensor<3xf32> {tf_saved_model.index_path = [0, "c"]},
# CHECK-SAME: %arg3: tensor<4xf32> {tf_saved_model.index_path = [0, "x"]},
# CHECK-SAME: %arg4: tensor<5xf32> {tf_saved_model.index_path = [0, "y"]},
# CHECK-SAME: %arg5: tensor<6xf32> {tf_saved_model.index_path = [0, "z"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0004_dict_many_keys"]
@tf.function(input_signature=[{
'x': tf.TensorSpec([4], tf.float32),
'y': tf.TensorSpec([5], tf.float32),
'z': tf.TensorSpec([6], tf.float32),
'a': tf.TensorSpec([1], tf.float32),
'b': tf.TensorSpec([2], tf.float32),
'c': tf.TensorSpec([3], tf.float32),
}])
def f0004_dict_many_keys(self, d):
return
# Check a slightly more complex recursive structure.
# Note that list elements can have heterogenous types.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<1xf32> {tf_saved_model.index_path = [0, "x", 0]},
# CHECK-SAME: %arg1: tensor<2xf32> {tf_saved_model.index_path = [0, "x", 1]},
# CHECK-SAME: %arg2: tensor<3xf32> {tf_saved_model.index_path = [0, "y"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0005_more_complex_recursive_structure"]
@tf.function(input_signature=[{
'x': [tf.TensorSpec([1], tf.float32),
tf.TensorSpec([2], tf.float32)],
'y': tf.TensorSpec([3], tf.float32),
}])
def f0005_more_complex_recursive_structure(self, d):
return
if __name__ == '__main__':
common.do_test(TestModule)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/structured_input.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/keras | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
def mnist_model():
"""Creates a MNIST model."""
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
return model
class TestModule(tf.Module):
def __init__(self):
super(TestModule, self).__init__()
self.model = mnist_model()
# CHECK: func {{@[a-zA-Z_0-9]+}}(%arg0: tensor<1x28x28x1xf32> {tf_saved_model.index_path = [0]}
# CHECK: attributes {{.*}} tf_saved_model.exported_names = ["my_predict"]
@tf.function(input_signature=[
tf.TensorSpec([1, 28, 28, 1], tf.float32),
])
def my_predict(self, x):
return self.model(x)
if __name__ == '__main__':
common.do_test(TestModule, exported_names=['my_predict'])
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/keras.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Serves as a common "main" function for all the SavedModel tests.
There is a fair amount of setup needed to initialize tensorflow and get it
into a proper TF2 execution mode. This hides that boilerplate.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from tensorflow.python import pywrap_mlir # pylint: disable=g-direct-tensorflow-import
# Use /tmp to make debugging the tests easier (see README.md)
flags.DEFINE_string('save_model_path', '', 'Path to save the model to.')
FLAGS = flags.FLAGS
def set_tf_options():
# Default TF1.x uses reference variables that are not supported by SavedModel
# v1 Importer. To use SavedModel V1 Importer, resource variables should be
# enabled.
tf.enable_resource_variables()
tf.compat.v1.disable_eager_execution()
# This function needs to take a "create_module_fn", as opposed to just the
# module itself, because the creation of the module has to be delayed until
# after absl and tensorflow have run various initialization steps.
def do_test(signature_def_map, show_debug_info=False):
"""Runs test.
1. Performs absl and tf "main"-like initialization that must run before almost
anything else.
2. Converts signature_def_map to SavedModel V1
3. Converts SavedModel V1 to MLIR
4. Prints the textual MLIR to stdout (it is expected that the caller will have
FileCheck checks in its file to check this output).
This is only for use by the MLIR SavedModel importer tests.
Args:
signature_def_map: A map from string key to signature_def. The key will be
used as function name in the resulting MLIR.
show_debug_info: If true, shows debug locations in the resulting MLIR.
"""
# Make LOG(ERROR) in C++ code show up on the console.
# All `Status` passed around in the C++ API seem to eventually go into
# `LOG(ERROR)`, so this makes them print out by default.
logging.set_stderrthreshold('error')
def app_main(argv):
"""Function passed to absl.app.run."""
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.save_model_path:
save_model_path = FLAGS.save_model_path
else:
save_model_path = tempfile.mkdtemp(suffix='.saved_model')
sess = tf.Session()
sess.run(tf.initializers.global_variables())
builder = tf.saved_model.builder.SavedModelBuilder(save_model_path)
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map,
strip_default_attrs=True)
builder.save()
logging.info('Saved model to: %s', save_model_path)
mlir = pywrap_mlir.experimental_convert_saved_model_v1_to_mlir(
save_model_path, ','.join([tf.saved_model.tag_constants.SERVING]),
show_debug_info)
# We don't strictly need this, but it serves as a handy sanity check
# for that API, which is otherwise a bit annoying to test.
# The canonicalization shouldn't affect these tests in any way.
mlir = pywrap_mlir.experimental_run_pass_pipeline(mlir,
'tf-standard-pipeline',
show_debug_info)
print(mlir)
app.run(app_main)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/common_v1.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/debug_info | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([], tf.float32),
tf.TensorSpec([], tf.float32)
])
def some_function(self, x, y):
return x + y
# Basic check that the debug info file is being correctly saved and loaded.
#
# CHECK: "tf.AddV2"{{.*}}callsite("{{[^"]*}}/debug_info.py":{{[0-9]+}}:{{[0-9]+}}
if __name__ == '__main__':
common.do_test(TestModule, show_debug_info=True)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/debug_info.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/partially_shaped_variables | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
def __init__(self):
super(TestModule, self).__init__()
# CHECK: "tf_saved_model.global_tensor"() {is_mutable, {{.*}} tf_saved_model.exported_names = ["v0"], type = tensor<*xf32>, value = dense<0.000000e+00> : tensor<1xf32>} : () -> ()
# CHECK: "tf_saved_model.global_tensor"() {is_mutable, {{.*}} tf_saved_model.exported_names = ["v1"], type = tensor<?xf32>, value = dense<[0.000000e+00, 1.000000e+00]> : tensor<2xf32>} : () -> ()
self.v0 = tf.Variable([0.], shape=tf.TensorShape(None))
self.v1 = tf.Variable([0., 1.], shape=[None])
if __name__ == '__main__':
common.do_test(TestModule, exported_names=[])
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/partially_shaped_variables.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/shapes_for_arguments | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
# Check that we get shapes annotated on function arguments.
#
# Besides checking the shape on the function input argument, this test also
# checks that the shape on the input argument is propagated to the return
# value.
# We eventually want to move the shape inference to a pass separate from
# the initial import, in which case that aspect of this test doesn't make much
# sense and will be superceded by MLIR->MLIR shape inference tests.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(%arg0: tensor<f32> {{.*}}) -> (tensor<f32> {{.*}})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["some_function"]
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def some_function(self, x):
return x
if __name__ == '__main__':
common.do_test(TestModule)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/shapes_for_arguments.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: (! %p/exported_python_args 2>&1) | FileCheck %s
# pylint: disable=missing-docstring,line-too-long,dangerous-default-value
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def some_function(self, x):
return self.callee(x)
# CHECK: While importing SavedModel function 'callee': in input signature:
# CHECK-SAME: Unhandled structured value kind {{.*}} at index path: <value>.1.foo
@tf.function
def callee(self, x, n={'foo': 42}):
return x
if __name__ == '__main__':
common.do_test(TestModule)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/exported_python_args.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Serves as a common "main" function for all the SavedModel tests.
There is a fair amount of setup needed to initialize tensorflow and get it
into a proper TF2 execution mode. This hides that boilerplate.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
from tensorflow.python import pywrap_mlir # pylint: disable=g-direct-tensorflow-import
# Use /tmp to make debugging the tests easier (see README.md)
flags.DEFINE_string('save_model_path', '',
'Path to save the model to.')
FLAGS = flags.FLAGS
# This function needs to take a "create_module_fn", as opposed to just the
# module itself, because the creation of the module has to be delayed until
# after absl and tensorflow have run various initialization steps.
def do_test(create_module_fn, exported_names=None, show_debug_info=False):
"""Runs test.
1. Performs absl and tf "main"-like initialization that must run before almost
anything else.
2. Converts `tf.Module` to SavedModel
3. Converts SavedModel to MLIR
4. Prints the textual MLIR to stdout (it is expected that the caller will have
FileCheck checks in its file to check this output).
This is only for use by the MLIR SavedModel importer tests.
Args:
create_module_fn: A callable taking no arguments, which returns the
`tf.Module` to be converted and printed.
exported_names: A set of exported names for the MLIR converter (default is
"export all").
show_debug_info: If true, shows debug locations in the resulting MLIR.
"""
if exported_names is None:
exported_names = []
# Make LOG(ERROR) in C++ code show up on the console.
# All `Status` passed around in the C++ API seem to eventually go into
# `LOG(ERROR)`, so this makes them print out by default.
logging.set_stderrthreshold('error')
# In true TF2 releases, v2 behavior is enabled as part of module __init__. In
# TF1 builds, it must be enabled manually. If you get an error here,
# it means that TF was used in V1 mode prior to calling this.
tf.enable_v2_behavior()
def app_main(argv):
"""Function passed to absl.app.run."""
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.save_model_path:
save_model_path = FLAGS.save_model_path
else:
save_model_path = tempfile.mkdtemp(suffix='.saved_model')
save_options = tf.saved_model.SaveOptions(save_debug_info=show_debug_info)
tf.saved_model.save(
create_module_fn(), save_model_path, options=save_options)
logging.info('Saved model to: %s', save_model_path)
mlir = pywrap_mlir.experimental_convert_saved_model_to_mlir(
save_model_path, ','.join(exported_names), show_debug_info)
# We don't strictly need this, but it serves as a handy sanity check
# for that API, which is otherwise a bit annoying to test.
# The canonicalization shouldn't affect these tests in any way.
mlir = pywrap_mlir.experimental_run_pass_pipeline(mlir, 'canonicalize',
show_debug_info)
print(mlir)
app.run(app_main)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/common.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/basic_v1 | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1
# Verify that the tf.versions attribute exists. It is difficult to enforce
# contents, since the version numbers change over time. The conversion logic
# itself is verified in the common graphdef converter, so here just assert
# it is being invoked.
# CHECK: module
# CHECK-SAME: tf.versions
# CHECK-SAME: bad_consumers
# CHECK-SAME: min_consumer
# CHECK-SAME: producer
# CHECK: "tf_saved_model.global_tensor"() {is_mutable, sym_name = "[[VAR:[a-zA-Z_0-9]+]]", type = tensor<1x3xf32>, value = {{.*}} : tensor<1x3xf32>} : () -> ()
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: [[ARG0:%.*]]: tensor<3x1xf32> {tf_saved_model.index_path = ["x"]},
# CHECK-SAME: [[ARG1:%.*]]: tensor<!tf.resource<tensor<1x3xf32>>> {tf_saved_model.bound_input = @[[VAR]]})
# CHECK-SAME: -> (tensor<3x3xf32> {tf_saved_model.index_path = ["r"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key"]
# CHECK-NEXT: [[R0:%.*]] = "tf.ReadVariableOp"([[ARG1]]) {{{.*}}} : (tensor<!tf.resource<tensor<1x3xf32>>>) -> tensor<1x3xf32>
# CHECK-NEXT: [[R1:%.*]] = "tf.MatMul"([[ARG0]], [[R0]]) {{{.*}}} : (tensor<3x1xf32>, tensor<1x3xf32>) -> tensor<3x3xf32>
# CHECK-NEXT: return [[R1]] : tensor<3x3xf32>
def Test():
x = tf.constant([[1.0], [1.0], [1.0]])
y = tf.compat.v1.get_variable(
name='y',
shape=(1, 3),
initializer=tf.random_normal_initializer(),
trainable=True)
r = tf.matmul(x, y)
tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)
tensor_info_r = tf.compat.v1.saved_model.utils.build_tensor_info(r)
return {
'key': (tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'r': tensor_info_r},
method_name='some_function'))
}
if __name__ == '__main__':
common_v1.set_tf_options()
common_v1.do_test(Test())
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/basic_v1.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/basic | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
# Verify that the tf.versions attribute exists. It is difficult to enforce
# contents, since the version numbers change over time. The conversion logic
# itself is verified in the common graphdef converter, so here just assert
# it is being invoked.
# CHECK: module
# CHECK-SAME: tf.versions
# CHECK-SAME: bad_consumers
# CHECK-SAME: min_consumer
# CHECK-SAME: producer
class TestModule(tf.Module):
def __init__(self):
super(TestModule, self).__init__()
self.v42 = tf.Variable(42.0)
self.c43 = tf.constant(43.0)
# CHECK: "tf_saved_model.global_tensor"() {is_mutable, sym_name = "[[VAR:[a-zA-Z_0-9]+]]", tf_saved_model.exported_names = ["v42"], type = tensor<f32>, value = dense<4.200000e+01> : tensor<f32>} : () -> ()
# CHECK: "tf_saved_model.global_tensor"() {sym_name = "[[CONST:[a-zA-Z_0-9]+]]", tf_saved_model.exported_names = [], type = tensor<f32>, value = dense<4.300000e+01> : tensor<f32>} : () -> ()
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<f32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: %arg1: tensor<!tf.resource<tensor<f32>>> {tf_saved_model.bound_input = @[[VAR]]},
# CHECK-SAME: %arg2: tensor<!tf.resource<tensor<f32>>> {tf_saved_model.bound_input = @[[CONST]]}) -> (
# CHECK-SAME: tensor<f32> {tf_saved_model.index_path = []})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["some_function"]
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def some_function(self, x):
return x + self.v42 + self.c43
if __name__ == '__main__':
common.do_test(TestModule)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/basic.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/shapes_for_variables | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
# Check that we get shapes for variables used in the graph.
# In this case, what we are testing is that the return type of the function is
# correctly inferred, which requires understanding the shape of the variable
# (in particular, the ReadVariableOp that reads it and returns a tensor).
#
# We eventually want to move the shape inference to a pass separate from
# the initial import, in which case this test doesn't make much sense and
# will be superceded by MLIR->MLIR shape inference tests.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}({{.*}}) -> (tensor<f32> {{.*}})
# CHECK: tf_saved_model.exported_names = ["some_function"]
def __init__(self):
super(TestModule, self).__init__()
self.my_variable = tf.Variable(42.)
@tf.function(input_signature=[])
def some_function(self):
return self.my_variable
if __name__ == '__main__':
common.do_test(TestModule)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/shapes_for_variables.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/call_to_exported | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
def __init__(self):
super(TestModule, self).__init__()
self.v = tf.Variable(42.0)
# We guarantee that there are no calls to exported functions from inside the
# module.
#
# If there is a call to an exported function, we create a wrapper function
# that forwards to the other function and put the tf_saved_model attributes on
# the wrapper.
#
# The reason for doing this is so that later interprocedural passes don't have
# to worry about what to do with these attributes.
# An example of where this would happen is when converting to XLA, which
# requires eliminating mutable variables (and is thus sort of like an
# interprocedural SSA formation, which in particular will
# modify signatures interprocedurally).
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<f32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: %arg1: tensor<!tf.resource<{{.*}}>> {tf_saved_model.bound_input = {{@[a-zA-Z_0-9]+}}}
# CHECK-SAME: ) -> (
# CHECK-SAME: tensor<f32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<f32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes{{.*}}tf_saved_model.exported_names = ["callee"]
# CHECK: "tf.StatefulPartitionedCall"{{.*}}f = @[[CALLEE_INTERNAL:[a-zA-Z_0-9]+]]
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<f32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: %arg1: tensor<!tf.resource<{{.*}}>> {tf_saved_model.bound_input = {{@[a-zA-Z_0-9]+}}}
# CHECK-SAME: ) -> (
# CHECK-SAME: tensor<f32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<*xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes{{.*}}tf_saved_model.exported_names = ["caller"]
# CHECK: "tf.StatefulPartitionedCall"{{.*}}f = @[[CALLEE_INTERNAL]]
#
# CHECK: func @[[CALLEE_INTERNAL]]
# CHECK-NOT: tf_saved_model.exported_names
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def callee(self, x):
return x, self.v
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def caller(self, x):
return self.callee(x)
if __name__ == '__main__':
common.do_test(TestModule)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/call_to_exported.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/multi_variables_v1 | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1
# CHECK: "tf_saved_model.global_tensor"() {is_mutable, sym_name = "[[VAR0:[a-zA-Z_0-9]+]]", type = tensor<5x3xf32>, value = {{.*}} : tensor<5x3xf32>} : () -> ()
# CHECK: "tf_saved_model.global_tensor"() {is_mutable, sym_name = "[[VAR1:[a-zA-Z_0-9]+]]", type = tensor<3x5xf32>, value = {{.*}} : tensor<3x5xf32>} : () -> ()
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: [[ARG0:%.*]]: tensor<!tf.resource<tensor<5x3xf32>>> {tf_saved_model.bound_input = @[[VAR0]]},
# CHECK-SAME: [[ARG1:%.*]]: tensor<!tf.resource<tensor<3x5xf32>>> {tf_saved_model.bound_input = @[[VAR1]]})
# CHECK-SAME: -> (tensor<5x5xf32> {{{.*}}})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key"]
# CHECK-NEXT: [[R0:%.*]] = "tf.ReadVariableOp"([[ARG0]]) {{{.*}}} : (tensor<!tf.resource<tensor<5x3xf32>>>) -> tensor<5x3xf32>
# CHECK-NEXT: [[R1:%.*]] = "tf.ReadVariableOp"([[ARG1]]) {{{.*}}} : (tensor<!tf.resource<tensor<3x5xf32>>>) -> tensor<3x5xf32>
# CHECK-NEXT: [[R2:%.*]] = "tf.MatMul"([[R0]], [[R1]]) {{{.*}}} : (tensor<5x3xf32>, tensor<3x5xf32>) -> tensor<5x5xf32>
def Test():
x = tf.compat.v1.get_variable(
name='x',
shape=(5, 3),
initializer=tf.random_normal_initializer(),
trainable=True)
y = tf.compat.v1.get_variable(
name='y',
shape=(3, 5),
initializer=tf.random_normal_initializer(),
trainable=True)
z = tf.matmul(x, y)
tensor_info_z = tf.compat.v1.saved_model.utils.build_tensor_info(z)
return {
'key': (tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs=None,
outputs={'z': tensor_info_z},
method_name='some_function'))
}
if __name__ == '__main__':
common_v1.set_tf_options()
common_v1.do_test(Test())
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/multi_variables_v1.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/multi_arguments_v1 | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1
# Tests multiple inputs with index paths.
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: [[ARG0:%.*]]: tensor<5x3xf32> {tf_saved_model.index_path = ["x"]},
# CHECK-SAME: [[ARG1:%.*]]: tensor<3x5xf32> {tf_saved_model.index_path = ["y"]})
# CHECK-SAME: -> (tensor<5x5xf32> {tf_saved_model.index_path = ["s"]},
# CHECK-SAME: tensor<3x3xf32> {tf_saved_model.index_path = ["t"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key"]
def Test():
x = tf.constant(1.0, shape=(5, 3))
y = tf.constant(1.0, shape=(3, 5))
s = tf.matmul(x, y)
t = tf.matmul(y, x)
tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)
tensor_info_y = tf.compat.v1.saved_model.utils.build_tensor_info(y)
tensor_info_s = tf.compat.v1.saved_model.utils.build_tensor_info(s)
tensor_info_t = tf.compat.v1.saved_model.utils.build_tensor_info(t)
return {
'key': (tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={
'x': tensor_info_x,
'y': tensor_info_y
},
outputs={
's': tensor_info_s,
't': tensor_info_t
},
method_name='some_function'))
}
if __name__ == '__main__':
common_v1.set_tf_options()
common_v1.do_test(Test())
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/multi_arguments_v1.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/shared_variable_v1 | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1
# CHECK: "tf_saved_model.global_tensor"() {is_mutable, sym_name = "[[VAR:[a-zA-Z_0-9]+]]", type = tensor<1x3xf32>, value = {{.*}} : tensor<1x3xf32>} : () -> ()
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: [[ARG0:%.*]]: tensor<3x1xf32> {tf_saved_model.index_path = ["x"]},
# CHECK-SAME: [[ARG1:%.*]]: tensor<!tf.resource<tensor<1x3xf32>>> {tf_saved_model.bound_input = @[[VAR]]})
# CHECK-SAME: -> (tensor<3x3xf32> {tf_saved_model.index_path = ["r"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key"]
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: [[ARG2:%.*]]: tensor<3x1xf32> {tf_saved_model.index_path = ["x"]},
# CHECK-SAME: [[ARG3:%.*]]: tensor<!tf.resource<tensor<1x3xf32>>> {tf_saved_model.bound_input = @[[VAR]]})
# CHECK-SAME: -> (tensor<3x3xf32> {tf_saved_model.index_path = ["r"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key2"]
def Test():
x = tf.constant([[1.0], [1.0], [1.0]])
y = tf.get_variable(
name='y',
shape=(1, 3),
initializer=tf.random_normal_initializer(),
trainable=True)
r = tf.matmul(x, y)
tensor_info_x = tf.saved_model.utils.build_tensor_info(x)
tensor_info_r = tf.saved_model.utils.build_tensor_info(r)
signature_def = tf.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'r': tensor_info_r},
method_name='some_function')
signature_def2 = tf.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'r': tensor_info_r},
method_name='some_other_function')
# Create two signatures that share the same variable.
return {'key': signature_def, 'key2': signature_def2}
if __name__ == '__main__':
common_v1.set_tf_options()
common_v1.do_test(Test())
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/shared_variable_v1.py
|
"""Gradients for XLA ops."""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
@ops.RegisterGradient("XlaClusterOutput")
def _XlaClusterOutputGrad(_, grad):
del grad # unused
raise RuntimeError("Gradient computation of graph in xla.compile() is "
"prohibited because it can cause performance degradation."
"Please move gradient computation inside xla.compile().")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/jit/ops/xla_ops_grad.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate tensorflow graphs for testing tfcompile."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import app
from tensorflow.python.training import saver as saver_lib
FLAGS = None
def tfadd(_):
x = constant_op.constant([1], name='x_const')
y = constant_op.constant([2], name='y_const')
math_ops.add(x, y, name='x_y_sum')
def tfadd_with_ckpt(out_dir):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = variables.VariableV1(constant_op.constant([0]), name='y_saved')
math_ops.add(x, y, name='x_y_sum')
init_op = variables.global_variables_initializer()
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
with session.Session() as sess:
sess.run(init_op)
sess.run(y.assign(y + 42))
# Without the checkpoint, the variable won't be set to 42.
ckpt = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt.ckpt')
saver.save(sess, ckpt)
def tfadd_with_ckpt_saver(out_dir):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = variables.VariableV1(constant_op.constant([0]), name='y_saved')
math_ops.add(x, y, name='x_y_sum')
init_op = variables.global_variables_initializer()
saver = saver_lib.Saver(name='abcprefix', write_version=saver_pb2.SaverDef.V1)
with session.Session() as sess:
sess.run(init_op)
sess.run(y.assign(y + 42))
# Without the checkpoint, the variable won't be set to 42.
ckpt_file = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt_saver.ckpt')
saver.save(sess, ckpt_file)
# Without the SaverDef, the restore op won't be named correctly.
saver_file = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt_saver.saver')
with open(saver_file, 'wb') as f:
f.write(saver.as_saver_def().SerializeToString())
def tfassert_eq(_):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = array_ops.placeholder(dtypes.int32, name='y_hold')
control_flow_ops.Assert(
math_ops.equal(x, y), ['Expected x == y.'], name='assert_eq')
math_ops.add(x, math_ops.negative(y), name='x_y_diff')
def tfcond(_):
p = array_ops.placeholder(dtypes.bool, name='p_hold')
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = array_ops.placeholder(dtypes.int32, name='y_hold')
z = control_flow_ops.cond(p, lambda: x, lambda: y)
array_ops.identity(z, name='result')
def tfgather(_):
params = array_ops.placeholder(dtypes.float32, name='params')
indices = array_ops.placeholder(dtypes.int32, name='indices')
array_ops.gather(params, indices, name='gather_output')
def tfmatmul(_):
x = array_ops.placeholder(dtypes.float32, name='x_hold')
y = array_ops.placeholder(dtypes.float32, name='y_hold')
math_ops.matmul(x, y, name='x_y_prod')
def tfmatmulandadd(_):
# This tests multiple outputs.
x = array_ops.placeholder(dtypes.float32, name='x_hold')
y = array_ops.placeholder(dtypes.float32, name='y_hold')
math_ops.matmul(x, y, name='x_y_prod')
math_ops.add(x, y, name='x_y_sum')
def tffunction(_):
@function.Defun(dtypes.int32, dtypes.int32)
def test_func(a, b):
return a + b
x = constant_op.constant([1], name='x_const')
y = constant_op.constant([2], name='y_const')
test_func(x, y, name='func_call') # pylint: disable=unexpected-keyword-arg
def tfsplits(_):
"""A more complex graph, including splits."""
x = array_ops.placeholder(dtypes.float32, shape=[2, 2], name='x')
y = array_ops.placeholder(dtypes.float32, shape=[2, 2], name='y')
for _ in range(3):
x0, x1 = array_ops.split(x, 2, 0)
y0, y1 = array_ops.split(y, 2, 0)
x0 += 1
y0 += 1
z = math_ops.matmul(x, y, name='x_y_prod')
a = array_ops.concat([x0, y1], axis=0, name='concat_x0_y1')
b = array_ops.concat([y0, x1], axis=0, name='concat_y0_x1')
x = math_ops.matmul(a, b, name='a_b')
y = math_ops.add(x, z)
array_ops.identity(y, name='result')
def tftop_k(_):
x = array_ops.placeholder(dtypes.int32, shape=[5], name='x')
output = nn_ops.top_k(x, 2, name='values')
array_ops.identity(output[1], name='indices')
def tfvariable(_):
x = variables.Variable(1000.0, name='x')
old_x = x.value()
with ops.control_dependencies([old_x]):
new_x = x.assign_add(42.0)
array_ops.stack([old_x, new_x], name='result')
def tfvariable_sequential_updates(_):
x = variables.Variable(1.0, name='x')
y = variables.Variable(1.0, name='y')
updates = control_flow_ops.no_op()
for _ in range(3):
with ops.control_dependencies([updates]):
x_val = x.read_value() + y
updates = x.assign_sub(0.1 * x_val)
array_ops.identity(updates, name='result')
def write_graph(build_graph, out_dir):
"""Build a graph using build_graph and write it out."""
g = ops.Graph()
with g.as_default():
build_graph(out_dir)
filename = os.path.join(out_dir, 'test_graph_%s.pb' % build_graph.__name__)
with open(filename, 'wb') as f:
f.write(g.as_graph_def().SerializeToString())
def main(_):
write_graph(tfadd, FLAGS.out_dir)
write_graph(tfadd_with_ckpt, FLAGS.out_dir)
write_graph(tfadd_with_ckpt_saver, FLAGS.out_dir)
write_graph(tfassert_eq, FLAGS.out_dir)
write_graph(tfcond, FLAGS.out_dir)
write_graph(tffunction, FLAGS.out_dir)
write_graph(tfgather, FLAGS.out_dir)
write_graph(tfmatmul, FLAGS.out_dir)
write_graph(tfmatmulandadd, FLAGS.out_dir)
write_graph(tfsplits, FLAGS.out_dir)
write_graph(tftop_k, FLAGS.out_dir)
write_graph(tfvariable, FLAGS.out_dir)
write_graph(tfvariable_sequential_updates, FLAGS.out_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--out_dir',
type=str,
default='',
help='Output directory for graphs, checkpoints and savers.')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compiler/aot/tests/make_test_graphs.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""Pretrain Retro."""
from functools import partial
import torch
from megatron import get_args, get_retro_args
from megatron import get_timers
from megatron import get_tokenizer
from megatron import print_rank_0
from megatron.core import mpu, tensor_parallel
from megatron.core.enums import ModelType
from megatron.model import GPTModel
from megatron.training import pretrain
from megatron.utils import get_ltor_masks_and_position_ids
from tools.retro.query.retro_dataset import get_retro_datasets
from pretrain_gpt import (
loss_func,
model_provider,
train_valid_test_datasets_provider as standard_datasets_provider,
)
def get_batch(data_iterator):
"""Generate a batch"""
args = get_args()
retro_args = get_retro_args()
tokenizer = get_tokenizer()
# Items and their type.
keys = ['text']
datatype = torch.int64
if args.retro_add_retriever:
keys += 'neighbor_tokens',
# Broadcast data.
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
data_b = tensor_parallel.broadcast_data(keys, data, datatype)
# Unpack.
tokens_ = data_b['text'].long()
labels = tokens_[:, 1:].contiguous()
tokens = tokens_[:, :-1].contiguous()
if args.retro_add_retriever:
# note: [bs * l * k, r]
# note: 2x == neighbor, continuation
neighbor_tokens = data_b['neighbor_tokens'] \
.view(-1, retro_args.retro_gpt_retrieved_length).long()
# Get the masks and postition ids.
attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eod,
args.reset_position_ids,
args.reset_attention_mask,
args.eod_mask_loss)
if args.retro_add_retriever:
_, _, neighbor_position_ids = get_ltor_masks_and_position_ids(
neighbor_tokens,
tokenizer.eod,
args.reset_position_ids,
args.reset_attention_mask,
args.eod_mask_loss)
neighbor_attention_mask = None
return tokens, labels, loss_mask, attention_mask, position_ids, \
neighbor_tokens, neighbor_attention_mask, neighbor_position_ids
else:
return tokens, labels, loss_mask, attention_mask, position_ids
def forward_step(data_iterator, model):
"""Forward step."""
args = get_args()
timers = get_timers()
# Get the batch.
timers('batch-generator').start()
if args.retro_add_retriever:
tokens, labels, loss_mask, attention_mask, position_ids, \
neighbor_tokens, neighbor_attention_mask, neighbor_position_ids = \
get_batch(data_iterator)
else:
tokens, labels, loss_mask, attention_mask, position_ids = get_batch(
data_iterator)
neighbor_tokens, neighbor_attention_mask, neighbor_position_ids = \
None, None, None
timers('batch-generator').stop()
output_tensor = model(tokens, position_ids, attention_mask,
retriever_input_ids=neighbor_tokens,
retriever_position_ids=neighbor_position_ids,
retriever_attn_mask=neighbor_attention_mask,
labels=labels)
return output_tensor, partial(loss_func, loss_mask)
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
if args.retro_add_retriever:
return get_retro_datasets()
else:
return standard_datasets_provider(train_val_test_num_samples)
if __name__ == "__main__":
pretrain(train_valid_test_datasets_provider,
model_provider,
ModelType.retro_decoder,
forward_step,
args_defaults={'tokenizer_type': 'GPT2BPETokenizer',
'retro_add_retriever': True})
|
Megatron-LM-master
|
pretrain_retro.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Pretrain VIT"""
import torch
import torch.nn.functional as F
from functools import partial
from megatron import get_args, get_timers, print_rank_0, print_rank_last
from megatron.core.enums import ModelType
from megatron.data.vit_dataset import build_train_valid_datasets
from megatron.model.vision.inpainting import VitInpaintingModel
from megatron.model.vision.inpainting import MitInpaintingModel
from megatron.training import pretrain
from megatron.utils import average_losses_across_data_parallel_group
from tasks.vision.segmentation.metrics import SSIM, PSNR
from megatron.arguments import core_transformer_config_from_args
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
args = get_args()
config = core_transformer_config_from_args(args)
if args.vision_backbone_type == 'vit':
model = VitInpaintingModel(config=config,
pre_process=pre_process,
post_process=post_process)
elif args.vision_backbone_type == 'mit':
model = MitInpaintingModel(config=config,
pre_process=pre_process,
post_process=post_process)
else:
raise Exception('{} vision backbone is not supported.'.format(
args.vision_backbone_type))
return model
def get_batch(data_iterator):
"""Build the batch."""
data = next(data_iterator)
# only data parallelism; no need for broadcast
images = data[0][0].cuda()
masks = data[0][1].cuda()
return images, masks
def loss_func(images, masks, masked_images, outputs, non_loss_data=False):
outputs = outputs.contiguous().float()
masks_flip = 1-masks
flip_masked_outputs = outputs.masked_fill(masks_flip.bool(), 0)
flip_masked_images = images.masked_fill(masks_flip.bool(), 0)
ssim_fun = SSIM()
psnr_fun = PSNR()
if not non_loss_data:
mask_count = torch.count_nonzero(masks)
loss = F.mse_loss(
flip_masked_outputs,
flip_masked_images.float(),
reduction="sum"
)
loss = loss/mask_count
ssim = ssim_fun(flip_masked_outputs, flip_masked_images.float())
psnr = psnr_fun(flip_masked_outputs, flip_masked_images.float())
averaged_loss = average_losses_across_data_parallel_group(
[loss, psnr, ssim]
)
return loss, {"loss": averaged_loss[0],
"psnr": averaged_loss[1],
'ssim': averaged_loss[2]}
else:
synth_images = masked_images.float() + flip_masked_outputs
ssim = ssim_fun(synth_images, images.float())
psnr = psnr_fun(synth_images, images.float())
return torch.cat((images, masked_images, synth_images), dim=2), ssim, psnr
def forward_step(data_iterator, model):
"""Forward step."""
timers = get_timers()
# Get the batch.
timers("batch-generator", log_level=2).start()
(
images,
masks,
) = get_batch(data_iterator)
timers("batch-generator").stop()
masked_images = images.masked_fill(masks.bool(), 0)
outputs = model(masked_images)
# Forward mode
return outputs, partial(loss_func, images, masks, masked_images)
def process_non_loss_data(data, iteration, writer):
psnr_sum = 0
ssim_sum = 0
for (output_tb, ssim, psnr) in data:
output_tb[output_tb < 0] = 0
output_tb[output_tb > 1] = 1
writer.add_images("gt-input-output-vald", output_tb,
global_step=iteration, walltime=None,
dataformats='NCHW')
psnr_sum = psnr_sum + psnr.item()
ssim_sum = ssim_sum + ssim.item()
psnr = psnr_sum/len(data)
ssim = ssim_sum/len(data)
writer.add_scalar('PSNR generate value-validation', psnr, iteration)
writer.add_scalar('SSIM generate value-validation', ssim, iteration)
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0(
"> building train, validation, and test datasets " "for VIT ..."
)
train_ds, valid_ds = build_train_valid_datasets(
data_path=args.data_path,
image_size=(args.img_h, args.img_w)
)
print_rank_0("> finished creating VIT datasets ...")
return train_ds, valid_ds, None
if __name__ == "__main__":
pretrain(
train_valid_test_datasets_provider,
model_provider,
ModelType.encoder_or_decoder,
forward_step,
process_non_loss_data,
args_defaults={'dataloader_type': 'cyclic', 'vision_pretraining': True}
)
|
Megatron-LM-master
|
pretrain_vision_inpaint.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""Pretrain GPT"""
import os
import torch
from functools import partial
from megatron import get_args
from megatron import print_rank_0
from megatron import get_timers
from megatron import get_tokenizer
from megatron.core import tensor_parallel
from megatron.core.enums import ModelType
from megatron.data.gpt_dataset import build_train_valid_test_datasets
from megatron.model import GPTModel
from megatron.training import pretrain
from megatron.utils import get_ltor_masks_and_position_ids
from megatron.utils import average_losses_across_data_parallel_group
from megatron.arguments import core_transformer_config_from_args
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
args = get_args()
print_rank_0('building GPT model ...')
config = core_transformer_config_from_args(get_args())
model = GPTModel(
config,
num_tokentypes=0,
parallel_output=True,
pre_process=pre_process,
post_process=post_process
)
return model
def get_batch(data_iterator):
"""Generate a batch"""
args = get_args()
tokenizer = get_tokenizer()
# Items and their type.
keys = ['text']
datatype = torch.int64
# Broadcast data.
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
data_b = tensor_parallel.broadcast_data(keys, data, datatype)
# Unpack.
tokens_ = data_b['text'].long()
labels = tokens_[:, 1:].contiguous()
tokens = tokens_[:, :-1].contiguous()
# Get the masks and postition ids.
attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eod,
args.reset_position_ids,
args.reset_attention_mask,
args.eod_mask_loss)
return tokens, labels, loss_mask, attention_mask, position_ids
def loss_func(loss_mask, output_tensor):
losses = output_tensor.float()
loss_mask = loss_mask.view(-1).float()
loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
# Check individual rank losses are not NaN prior to DP all-reduce.
args = get_args()
if args.check_for_nan_in_loss_and_grad:
global_rank = torch.distributed.get_rank()
assert not loss.isnan(), (
f'Rank {global_rank}: found NaN in local forward loss calculation. '
f'Device: {torch.cuda.current_device()}, node: {os.uname()[1]}'
)
# Reduce loss for logging.
averaged_loss = average_losses_across_data_parallel_group([loss])
return loss, {'lm loss': averaged_loss[0]}
def forward_step(data_iterator, model):
"""Forward step."""
args = get_args()
timers = get_timers()
# Get the batch.
timers('batch-generator', log_level=2).start()
tokens, labels, loss_mask, attention_mask, position_ids = get_batch(
data_iterator)
timers('batch-generator').stop()
output_tensor = model(tokens, position_ids, attention_mask,
labels=labels)
return output_tensor, partial(loss_func, loss_mask)
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0('> building train, validation, and test datasets '
'for GPT ...')
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
data_prefix=args.data_path,
splits_string=args.split,
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup),
train_data_prefix=args.train_data_path,
valid_data_prefix=args.valid_data_path,
test_data_prefix=args.test_data_path,
data_cache_path=args.data_cache_path)
print_rank_0("> finished creating GPT datasets ...")
return train_ds, valid_ds, test_ds
if __name__ == "__main__":
pretrain(train_valid_test_datasets_provider,
model_provider,
ModelType.encoder_or_decoder,
forward_step,
args_defaults={'tokenizer_type': 'GPT2BPETokenizer'})
|
Megatron-LM-master
|
pretrain_gpt.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Pretrain BERT"""
from functools import partial
import torch
import torch.nn.functional as F
from megatron import get_args
from megatron import print_rank_0
from megatron import get_timers
from megatron.core import tensor_parallel
from megatron.core.enums import ModelType
from megatron.data.dataset_utils import build_train_valid_test_datasets
from megatron.model import BertModel
from megatron.training import pretrain
from megatron.utils import average_losses_across_data_parallel_group
from megatron.arguments import core_transformer_config_from_args
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
print_rank_0('building BERT model ...')
args = get_args()
config = core_transformer_config_from_args(args)
num_tokentypes = 2 if args.bert_binary_head else 0
model = BertModel(
config=config,
num_tokentypes=num_tokentypes,
add_binary_head=args.bert_binary_head,
parallel_output=True,
pre_process=pre_process,
post_process=post_process)
return model
def get_batch(data_iterator):
"""Build the batch."""
# Items and their type.
keys = ['text', 'types', 'labels', 'is_random', 'loss_mask', 'padding_mask']
datatype = torch.int64
# Broadcast data.
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
data_b = tensor_parallel.broadcast_data(keys, data, datatype)
# Unpack.
tokens = data_b['text'].long()
types = data_b['types'].long()
sentence_order = data_b['is_random'].long()
loss_mask = data_b['loss_mask'].float()
lm_labels = data_b['labels'].long()
padding_mask = data_b['padding_mask'].long()
return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
def loss_func(loss_mask, sentence_order, output_tensor):
lm_loss_, sop_logits = output_tensor
lm_loss_ = lm_loss_.float()
loss_mask = loss_mask.float()
lm_loss = torch.sum(
lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
if sop_logits is not None:
sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(),
sentence_order.view(-1),
ignore_index=-1)
sop_loss = sop_loss.float()
loss = lm_loss + sop_loss
averaged_losses = average_losses_across_data_parallel_group(
[lm_loss, sop_loss])
return loss, {'lm loss': averaged_losses[0],
'sop loss': averaged_losses[1]}
else:
loss = lm_loss
averaged_losses = average_losses_across_data_parallel_group(
[lm_loss])
return loss, {'lm loss': averaged_losses[0]}
def forward_step(data_iterator, model):
"""Forward step."""
args = get_args()
timers = get_timers()
# Get the batch.
timers('batch-generator', log_level=2).start()
tokens, types, sentence_order, loss_mask, lm_labels, padding_mask = get_batch(
data_iterator)
timers('batch-generator').stop()
if not args.bert_binary_head:
types = None
# Forward pass through the model.
output_tensor = model(tokens, padding_mask, tokentype_ids=types,
lm_labels=lm_labels)
return output_tensor, partial(loss_func, loss_mask, sentence_order)
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0('> building train, validation, and test datasets '
'for BERT ...')
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
data_prefix=args.data_path,
splits_string=args.split,
train_valid_test_num_samples=train_val_test_num_samples,
max_seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup),
binary_head=args.bert_binary_head)
print_rank_0("> finished creating BERT datasets ...")
return train_ds, valid_ds, test_ds
if __name__ == "__main__":
pretrain(train_valid_test_datasets_provider, model_provider,
ModelType.encoder_or_decoder,
forward_step, args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'})
|
Megatron-LM-master
|
pretrain_bert.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Pretrain BERT for Inverse Cloze Task"""
from functools import partial
import math
import torch
import torch.distributed as dist
import torch.nn.functional as F
from megatron import get_args
from megatron import print_rank_0
from megatron import get_timers
from megatron.core import mpu
from megatron.core.enums import ModelType
from megatron.data.biencoder_dataset_utils import get_ict_batch
from megatron.data.dataset_utils import build_train_valid_test_datasets
from megatron.model.biencoder_model import biencoder_model_provider
from megatron.training import pretrain
from megatron.utils import average_losses_across_data_parallel_group
def pretrain_ict_model_provider(pre_process=True, post_process=True):
args = get_args()
model = biencoder_model_provider(
only_context_model=False,
only_query_model=False,
biencoder_shared_query_context_model=\
args.biencoder_shared_query_context_model,
pre_process=pre_process, post_process=post_process)
return model
def get_group_world_size_rank():
group = mpu.get_data_parallel_group()
rank = torch.distributed.get_rank(group=group)
world_size = torch.distributed.get_world_size(group=group)
return group, rank, world_size
class AllgatherFromDataParallelRegion(torch.autograd.Function):
@staticmethod
def forward(ctx, input_):
assert input_.dim() == 2
group, rank, world_size = get_group_world_size_rank()
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank] = input_
torch.distributed.all_gather(tensor_list, input_, group=group)
output = torch.cat(tensor_list, dim=0).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
group, rank, world_size = get_group_world_size_rank()
assert grad_output.shape[0] % world_size == 0
dim_size = grad_output.shape[0] // world_size
output_list = torch.split(grad_output, dim_size, dim=0)
# get chunk from this rank
output = output_list[rank].contiguous()
return output
def loss_func(output_tensor):
args = get_args()
query_logits, context_logits = output_tensor
micro_batch_size = query_logits.shape[0]
# recall we assert that tensor_model_parallel_size == 1
assert mpu.get_tensor_model_parallel_world_size() == 1, \
"Model parallel size > 1 not supported for ICT"
global_batch_size = dist.get_world_size() * micro_batch_size
all_query_logits = AllgatherFromDataParallelRegion.apply(query_logits)
all_context_logits = AllgatherFromDataParallelRegion.apply(context_logits)
# scores are inner products between query and context embeddings
retrieval_scores = torch.matmul(all_query_logits,
torch.transpose(all_context_logits, 0, 1))
# scaling the retriever scores
if args.retriever_score_scaling:
retrieval_scores = retrieval_scores / math.sqrt(args.hidden_size)
softmax_scores = F.log_softmax(retrieval_scores, dim=1)
sorted_vals, sorted_indices = torch.topk(softmax_scores,
k=softmax_scores.shape[1], sorted=True)
def topk_accuracy(k):
return torch.cuda.FloatTensor([sum([int(i in sorted_indices[i, :k]) \
for i in range(global_batch_size)]) / global_batch_size])
topk_accs = [topk_accuracy(int(k)) for k in args.retriever_report_topk_accuracies]
labels = torch.arange(global_batch_size).long().cuda()
loss = F.nll_loss(softmax_scores, labels, reduction='mean')
reduced_losses = average_losses_across_data_parallel_group([loss, *topk_accs])
# Scale the retrieval loss
loss = loss * mpu.get_data_parallel_world_size()
# create stats_dict with retrieval loss and all specified top-k accuracies
topk_acc_dict = {'top{}_acc'.format(k): v * 100 for k, v in \
zip(args.retriever_report_topk_accuracies, reduced_losses[1:])}
stats_dict = dict(loss=reduced_losses[0], **topk_acc_dict)
return loss, stats_dict
def forward_step(data_iterator, model):
"""Forward step."""
args = get_args()
timers = get_timers()
# Get the batch.
timers('batch-generator', log_level=2).start()
query_tokens, query_mask, \
context_tokens, context_mask, context_indices = get_ict_batch(data_iterator)
timers('batch-generator').stop()
# Query and Context Types
query_types = torch.cuda.LongTensor(*query_tokens.shape).fill_(0)
context_types = torch.cuda.LongTensor(*context_tokens.shape).fill_(0)
# Forward model.
output_tensor = model(query_tokens, query_mask, query_types, context_tokens,
context_mask, context_types)
return output_tensor, partial(loss_func)
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid and test datasets."""
args = get_args()
print_rank_0('> building train, validation, and test datasets '
'for BERT ICT...')
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
data_prefix=args.data_path,
splits_string=args.split,
train_valid_test_num_samples=train_val_test_num_samples,
max_seq_length=args.seq_length,
masked_lm_prob=args.mask_prob,
short_seq_prob=args.short_seq_prob,
seed=args.seed,
skip_warmup=(not args.mmap_warmup),
binary_head=False,
dataset_type='ict')
print_rank_0("> finished creating BERT ICT datasets ...")
return train_ds, valid_ds, test_ds
if __name__ == "__main__":
pretrain(train_valid_test_datasets_provider,
pretrain_ict_model_provider,
ModelType.encoder_or_decoder,
forward_step,
args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'})
|
Megatron-LM-master
|
pretrain_ict.py
|
from setuptools import setup, find_packages
"""Setup for pip package."""
import importlib.util
import os
import setuptools
spec = importlib.util.spec_from_file_location('package_info', 'megatron/core/package_info.py')
package_info = importlib.util.module_from_spec(spec)
spec.loader.exec_module(package_info)
__contact_emails__ = package_info.__contact_emails__
__contact_names__ = package_info.__contact_names__
__description__ = package_info.__description__
__download_url__ = package_info.__download_url__
__homepage__ = package_info.__homepage__
__keywords__ = package_info.__keywords__
__license__ = package_info.__license__
__package_name__ = package_info.__package_name__
__repository_url__ = package_info.__repository_url__
__version__ = package_info.__version__
if os.path.exists('megatron/core/README.md'):
with open("megatron/core/README.md", "r", encoding='utf-8') as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
else:
long_description = 'See ' + __homepage__
long_description_content_type = "text/plain"
###############################################################################
# Dependency Loading #
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
def req_file(filename, folder="megatron/core"):
with open(os.path.join(folder, filename), encoding='utf-8') as f:
content = f.readlines()
# you may also want to remove whitespace characters
# Example: `\n` at the end of each line
return [x.strip() for x in content]
install_requires = req_file("requirements.txt")
###############################################################################
setuptools.setup(
name=__package_name__,
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__,
description=__description__,
long_description=long_description,
long_description_content_type=long_description_content_type,
# The project's main homepage.
url=__repository_url__,
download_url=__download_url__,
# Author details
author=__contact_names__,
author_email=__contact_emails__,
# maintainer Details
maintainer=__contact_names__,
maintainer_email=__contact_emails__,
# The licence under which the project is released
license=__license__,
classifiers=[
# How mature is this project? Common values are
# 1 - Planning
# 2 - Pre-Alpha
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
# 6 - Mature
# 7 - Inactive
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
# Indicate what your project relates to
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Supported python versions
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
# Additional Setting
'Environment :: Console',
'Natural Language :: English',
'Operating System :: OS Independent',
],
packages=['megatron.core', 'megatron.core.pipeline_parallel', 'megatron.core.tensor_parallel'],
install_requires=install_requires,
# Add in any packaged data.
include_package_data=True,
# PyPI package information.
keywords=__keywords__,
)
|
Megatron-LM-master
|
setup.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import torch.distributed as dist
from functools import partial
from megatron import get_args, get_timers, print_rank_0
from megatron.core.enums import ModelType
from megatron.data.vit_dataset import build_train_valid_datasets
from megatron.model.vision.dino import DINOPretrainModel
from megatron.model.vision.knn_monitor import knn_predict, get_feature_bank
from megatron.training import pretrain
from megatron.utils import average_losses_across_data_parallel_group, unwrap_model
from megatron.arguments import core_transformer_config_from_args
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
config = core_transformer_config_from_args(get_args())
return DINOPretrainModel(config, pre_process=pre_process, post_process=post_process)
def get_batch(data_iterator):
"""Build the batch."""
data = next(data_iterator)
# only data parallelism; no need for broadcast
if isinstance(data[0], list):
images = [aug.cuda() for aug in data[0]]
else:
images = data[0].cuda()
labels = data[1].cuda()
return images, labels
def loss_func(model, labels, output_tensor, collect_data=False):
args = get_args()
model = unwrap_model(model)
if model.training:
student_output, teacher_output = output_tensor
loss = model.dino_loss(student_output, teacher_output, args.curr_iteration)
averaged_loss = average_losses_across_data_parallel_group([loss])
return loss, {"loss": averaged_loss[0]}
else:
_, teacher_feature = output_tensor
feature_bank, feature_labels, classes = get_feature_bank()
feature = F.normalize(teacher_feature.float(), dim=1)
knn_accs = []
for k in [10, 20, 100, 200]:
pred_labels = knn_predict(feature, feature_bank,
feature_labels, classes, k, 0.07)
knn_acc = (pred_labels[:, 0] == labels).float().mean()
knn_accs.append(knn_acc)
averaged_loss = average_losses_across_data_parallel_group(knn_accs)
return 0, {"knn_acc_10": averaged_loss[0],
"knn_acc_20": averaged_loss[1],
"knn_acc_100": averaged_loss[2],
"knn_acc_200": averaged_loss[3]}
def forward_step(data_iterator, model):
"""Forward step."""
timers = get_timers()
# Get the batch.
timers("batch-generator", log_level=2).start()
(
images,
labels,
) = get_batch(data_iterator)
timers("batch-generator").stop()
return model(images), partial(loss_func, model, labels)
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0(
"> building train, validation, and test datasets " "for VIT ..."
)
train_ds, valid_ds = build_train_valid_datasets(
data_path=args.data_path,
image_size=(args.img_h, args.img_w)
)
print_rank_0("> finished creating VIT datasets ...")
return train_ds, valid_ds, None
if __name__ == "__main__":
pretrain(
train_valid_test_datasets_provider,
model_provider,
ModelType.encoder_or_decoder,
forward_step,
args_defaults={'dataloader_type': 'cyclic', 'vision_pretraining': True}
)
|
Megatron-LM-master
|
pretrain_vision_dino.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Pretrain GPT"""
import torch
from functools import partial
from megatron import get_args
from megatron.arguments import core_transformer_config_from_args
from megatron import print_rank_0
from megatron import get_timers
from megatron import get_tokenizer
from megatron.core import tensor_parallel
from megatron.core.enums import ModelType
from megatron.data.gpt_dataset import build_train_valid_test_datasets
from megatron.core.models.gpt import GPTModel
from megatron.training import pretrain
from megatron.utils import get_ltor_masks_and_position_ids
from megatron.utils import average_losses_across_data_parallel_group
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
args = get_args()
config = core_transformer_config_from_args(args)
print_rank_0('building GPT model ...')
model = GPTModel(
config=config,
vocab_size=args.padded_vocab_size,
max_sequence_length=args.max_position_embeddings,
pre_process=pre_process,
post_process=post_process,
fp16_lm_cross_entropy=args.fp16_lm_cross_entropy,
parallel_output=True,
share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights,
position_embedding_type=args.position_embedding_type,
rotary_percent=args.rotary_percent
)
return model
def get_batch(data_iterator):
"""Generate a batch"""
args = get_args()
tokenizer = get_tokenizer()
# Items and their type.
keys = ['text']
datatype = torch.int64
# Broadcast data.
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
data_b = tensor_parallel.broadcast_data(keys, data, datatype)
# Unpack.
tokens_ = data_b['text'].long()
labels = tokens_[:, 1:].contiguous()
tokens = tokens_[:, :-1].contiguous()
# Get the masks and postition ids.
attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eod,
args.reset_position_ids,
args.reset_attention_mask,
args.eod_mask_loss)
return tokens, labels, loss_mask, attention_mask, position_ids
def loss_func(loss_mask, output_tensor):
losses = output_tensor.float()
loss_mask = loss_mask.view(-1).float()
loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
# Reduce loss for logging.
averaged_loss = average_losses_across_data_parallel_group([loss])
return loss, {'lm loss': averaged_loss[0]}
def forward_step(data_iterator, model):
"""Forward step."""
args = get_args()
timers = get_timers()
# Get the batch.
timers('batch-generator', log_level=2).start()
tokens, labels, loss_mask, attention_mask, position_ids = get_batch(
data_iterator)
timers('batch-generator').stop()
output_tensor = model(tokens, position_ids, attention_mask,
labels=labels)
return output_tensor, partial(loss_func, loss_mask)
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0('> building train, validation, and test datasets '
'for GPT ...')
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
data_prefix=args.data_path,
splits_string=args.split,
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup),
train_data_prefix=args.train_data_path,
valid_data_prefix=args.valid_data_path,
test_data_prefix=args.test_data_path,
data_cache_path=args.data_cache_path)
print_rank_0("> finished creating GPT datasets ...")
return train_ds, valid_ds, test_ds
if __name__ == "__main__":
pretrain(train_valid_test_datasets_provider, model_provider,
ModelType.encoder_or_decoder,
forward_step,
args_defaults={'tokenizer_type': 'GPT2BPETokenizer'}
)
|
Megatron-LM-master
|
pretrain_gpt_core.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Pretrain VIT"""
import torch
import torch.nn.functional as F
from functools import partial
from megatron import get_args, get_timers, print_rank_0
from megatron.core.enums import ModelType
from megatron.data.vit_dataset import build_train_valid_datasets
from megatron.model.vision.classification import VitClassificationModel
from megatron.model.vision.classification import MitClassificationModel
from megatron.training import pretrain
from megatron.utils import average_losses_across_data_parallel_group
from megatron.arguments import core_transformer_config_from_args
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
args = get_args()
config = core_transformer_config_from_args(args)
if args.vision_backbone_type == 'vit':
print_rank_0("building VIT model ...")
model = VitClassificationModel(config=config,
num_classes=args.num_classes,
pre_process=pre_process,
post_process=post_process)
elif args.vision_backbone_type == 'mit':
print_rank_0("building MIT model ...")
model = MitClassificationModel(num_classes=args.num_classes,
pre_process=pre_process,
post_process=post_process)
else:
raise Exception('{} vision backbone is not supported.'.format(
args.vision_backbone_type))
return model
def get_batch(data_iterator):
"""Build the batch."""
data = next(data_iterator)
# only data parallelism; no need for broadcast
images = data[0].cuda()
labels = data[1].cuda()
return images, labels
def loss_func(labels, output_tensor):
logits = output_tensor.contiguous().float()
loss = F.cross_entropy(logits, labels)
outputs = torch.argmax(logits, -1)
correct = (outputs == labels).float()
accuracy = torch.mean(correct)
averaged_loss = average_losses_across_data_parallel_group([loss, accuracy])
return loss, {"loss": averaged_loss[0], "accuracy": averaged_loss[1]}
def forward_step(data_iterator, model):
"""Forward step."""
timers = get_timers()
# Get the batch.
timers("batch-generator", log_level=2).start()
(
images,
labels,
) = get_batch(data_iterator)
timers("batch-generator").stop()
# Forward model. lm_labels
output_tensor = model(images)
return output_tensor, partial(loss_func, labels)
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0(
"> building train, validation, and test datasets " "for VIT ..."
)
train_ds, valid_ds = build_train_valid_datasets(
data_path=args.data_path,
image_size=(args.img_h, args.img_w)
)
print_rank_0("> finished creating VIT datasets ...")
return train_ds, valid_ds, None
if __name__ == "__main__":
pretrain(
train_valid_test_datasets_provider,
model_provider,
ModelType.encoder_or_decoder,
forward_step,
args_defaults={'dataloader_type': 'cyclic', 'vision_pretraining': True}
)
|
Megatron-LM-master
|
pretrain_vision_classify.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Pretrain T5"""
from functools import partial
import torch
from megatron import (
get_args,
get_timers,
print_rank_0
)
from megatron.core import tensor_parallel
from megatron.core.enums import ModelType
from megatron.data.dataset_utils import build_train_valid_test_datasets
from megatron.model import T5Model
from megatron.training import pretrain
from megatron.utils import average_losses_across_data_parallel_group
from megatron.arguments import core_transformer_config_from_args
"""
Pipeline parallelism for T5
===========================
T5 is a model architecture with both encoder and decoder blocks.
Consequently, pipeline parallelism is implemented slightly differently
compared to architectures like GPT and BERT.
In particular, when pipeline_model_parallel_world_size > 1, each stage
either executes an encoder block or a decoder block. The
--pipeline-model-parallel-split-rank argument controls the rank at which
the split happens: all ranks lower than this argument execute the
encoder block, and all ranks equal to or higher than this argument value
execute the decoder block.
In the encoder section of the model, only one tensor is sent downstream:
the intermediate encoder_hidden_state. In the decoder section of the
model, two tensors are sent downstream in the forward pass: the fully
computed encoder_hidden_state, and the intermediate decoder_hidden_state.
In particular, these are the shapes of the tensors sent between
different workers:
If rank is in decoder section:
intermediate decoder_hidden_state (pre-transpose),
complete encoder_hidden_state (post-transpose).
If rank is at boundary between encoder and decoder sections:
complete encoder_hidden_state (post-transpose).
If rank is in encoder section:
intermediate encoder_hidden_state (pre-transpose).
Additionally, we have code in the backward_step function in schedules.py
to accumulate the encoder_hidden_state gradient across skip connections
(encoder_hidden_state fed in as input to each layer in the decoder).
"""
def model_provider(pre_process=True, post_process=True,
add_encoder=True, add_decoder=True):
"""Build the model."""
print_rank_0('building T5 model ...')
config = core_transformer_config_from_args(get_args())
model = T5Model(config=config,
num_tokentypes=0,
parallel_output=True,
pre_process=pre_process,
post_process=post_process,
add_encoder=add_encoder,
add_decoder=add_decoder)
return model
def get_batch(data_iterator):
"""Build the batch."""
keys = ['text_enc', 'text_dec', 'labels', 'loss_mask',
'enc_mask', 'dec_mask', 'enc_dec_mask']
datatype = torch.int64
# Broadcast data.
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
data_b = tensor_parallel.broadcast_data(keys, data, datatype)
# Unpack.
tokens_enc = data_b['text_enc'].long()
tokens_dec = data_b['text_dec'].long()
labels = data_b['labels'].long()
loss_mask = data_b['loss_mask'].float()
enc_mask = (data_b['enc_mask'] < 0.5)
dec_mask = (data_b['dec_mask'] < 0.5)
enc_dec_mask = (data_b['enc_dec_mask'] < 0.5)
return tokens_enc, tokens_dec, loss_mask, labels, \
enc_mask, dec_mask, enc_dec_mask
def loss_func(loss_mask, output_tensor):
lm_loss_ = output_tensor.float()
lm_loss = torch.sum(
lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
loss = lm_loss
averaged_losses = average_losses_across_data_parallel_group([lm_loss])
return loss, {'lm loss': averaged_losses[0]}
def forward_step(data_iterator, model):
"""Forward step."""
args = get_args()
timers = get_timers()
# Get the batch.
timers('batch generator', log_level=2).start()
tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask \
= get_batch(data_iterator)
timers('batch generator').stop()
# Forward model lm_labels
output_tensor = model(tokens_enc,
tokens_dec,
enc_mask,
dec_mask,
enc_dec_mask,
tokentype_ids=None,
lm_labels=lm_labels)
return output_tensor, partial(loss_func, loss_mask)
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0('> building train, validation, and test datasets '
'for T5 ...')
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
data_prefix=args.data_path,
splits_string=args.split,
train_valid_test_num_samples=train_val_test_num_samples,
max_seq_length=args.encoder_seq_length,
max_seq_length_dec=args.decoder_seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup),
dataset_type='t5')
print_rank_0("> finished creating T5 datasets ...")
return train_ds, valid_ds, test_ds
if __name__ == "__main__":
pretrain(train_valid_test_datasets_provider, model_provider, ModelType.encoder_and_decoder,
forward_step, args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'})
|
Megatron-LM-master
|
pretrain_t5.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Finetune utilities."""
from functools import partial
import sys
import torch
from megatron import get_args, get_num_microbatches
from megatron import print_rank_0
from megatron import get_timers
from megatron.core import mpu
from megatron.core.enums import ModelType
from megatron.checkpointing import load_checkpoint
from megatron.checkpointing import save_checkpoint
from megatron.training import evaluate_and_print_results
from megatron.training import setup_model_and_optimizer
from megatron.training import train_step
from megatron.training import training_log
from megatron.utils import average_losses_across_data_parallel_group
from megatron.utils import calc_params_l2_norm
from megatron.utils import check_adlr_autoresume_termination
def process_batch(batch):
"""Process batch and produce inputs for the model."""
args = get_args()
tokens = batch['text'].long().cuda().contiguous()
types = batch['types'].long().cuda().contiguous()
labels = batch['label'].long().cuda().contiguous()
attention_mask = batch['padding_mask'].float().cuda().contiguous()
if args.fp16:
attention_mask = attention_mask.half()
return tokens, types, labels, attention_mask
def cross_entropy_loss_func(labels, output_tensor):
logits = output_tensor
# Cross-entropy loss.
loss_func = torch.nn.CrossEntropyLoss()
loss = loss_func(logits.contiguous().float(), labels)
# Reduce loss for logging.
averaged_loss = average_losses_across_data_parallel_group([loss])
return loss, {'lm loss': averaged_loss[0]}
def _cross_entropy_forward_step(batch, model):
"""Simple forward step with cross-entropy loss."""
timers = get_timers()
# Get the batch.
timers('batch-generator', log_level=2).start()
try:
batch_ = next(batch)
except BaseException:
batch_ = batch
tokens, types, labels, attention_mask = process_batch(batch_)
timers('batch-generator').stop()
# Forward model.
output_tensor = model(tokens, attention_mask, tokentype_ids=types)
return output_tensor, partial(cross_entropy_loss_func, labels)
def build_data_loader(dataset, micro_batch_size, num_workers, drop_last,
task_collate_fn=None):
"""Data loader. Note that batch-size is the local (per GPU) batch-size."""
# Sampler.
world_size = mpu.get_data_parallel_world_size()
rank = mpu.get_data_parallel_rank()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=world_size, rank=rank)
# Data loader. Note that batch size is the per GPU batch size.
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=micro_batch_size,
sampler=sampler,
shuffle=False,
num_workers=num_workers,
drop_last=drop_last,
pin_memory=True,
collate_fn=task_collate_fn)
return data_loader
def _build_infinite_size_dataloader(dataloader):
"""Build a looped dataloader with infinite size."""
iterator = dataloader.__iter__()
while True:
try:
yield iterator.__next__()
except StopIteration:
iterator = dataloader.__iter__()
def _build_train_valid_dataloaders(train_dataset, valid_dataset,
task_collate_fn=None):
"""Traing and validation dataloaders."""
args = get_args()
print_rank_0('building train and validation dataloaders ...')
# Training dataset.
train_dataloader = build_data_loader(train_dataset, args.micro_batch_size,
args.num_workers, not args.keep_last,
task_collate_fn)
# Set the training iterations.
args.train_iters_per_epoch = len(train_dataloader)
args.train_iters = args.epochs * args.train_iters_per_epoch
# Validation dataset. For this dataset, we do not need to set up
# shuffling so we can just use a simple infinite loop.
valid_dataloader_ = build_data_loader(valid_dataset, args.micro_batch_size,
args.num_workers, not args.keep_last,
task_collate_fn)
valid_dataloader = _build_infinite_size_dataloader(valid_dataloader_)
# Now that we've built the data loaders, set batch_size arguments
# to the actual batch size the model will see for this dataset.
# This is necessary so pipeline transfers know what size they are
# and the LR schedule, which is based on samples seen, gets set
# correctly.
args.orig_micro_batch_size = args.micro_batch_size
args.orig_global_batch_size = args.global_batch_size
if hasattr(train_dataset, 'sample_multiplier'):
# If our dataset as a sample_multiplier attribute that means
# each "sample" from the dataset actually has multiple samples
# that will collapse into the batch dimension (for example in
# the RACE dataset that has several options), we need to
# account for that when setting the micro batch size.
args.micro_batch_size *= train_dataset.sample_multiplier
args.global_batch_size *= train_dataset.sample_multiplier
return train_dataloader, valid_dataloader
def _train(model, optimizer, opt_param_scheduler, forward_step,
train_dataloader, valid_dataloader, end_of_epoch_callback):
"""Train the model."""
args = get_args()
timers = get_timers()
assert get_num_microbatches() == 1, "finetuning with gradient accumulation doesn't currently work"
# Turn on training mode which enables dropout.
for m in model:
m.train()
# Tracking loss.
losses_dict_sum = {}
# Starting epoch and iteration
start_epoch = args.iteration // args.train_iters_per_epoch
start_iteration = args.iteration % args.train_iters_per_epoch
iteration = args.iteration
# Memory reporting flag.
report_memory_flag = True
# For each remaining epoch
timers('interval-time', log_level=0).start(barrier=True)
for epoch in range(start_epoch, args.epochs):
print_rank_0('working on epoch {} ...'.format(epoch + 1))
# Set the data loader epoch to shuffle the index iterator.
train_dataloader.sampler.set_epoch(args.seed + epoch)
# For all the batches in the dataset.
for iteration_, batch in enumerate(train_dataloader):
# Ignore the iterations before starting value
if iteration_ < start_iteration:
continue
# Set to zero so the next epoch does not skip any batches.
start_iteration = 0
# Train for one step.
out = train_step(forward_step, batch, model, optimizer, opt_param_scheduler)
losses_dict, skipped_iter, grad_norm, num_zeros_in_grad = out
iteration += 1
# Logging.
params_norm = None
if args.log_params_norm:
params_norm = calc_params_l2_norm(model)
report_memory_flag = training_log(losses_dict, losses_dict_sum,
optimizer.param_groups[0]['lr'],
iteration,
optimizer.get_loss_scale().item(),
report_memory_flag, skipped_iter,
grad_norm, params_norm, num_zeros_in_grad)
# Autoresume
if args.adlr_autoresume and \
(iteration % args.adlr_autoresume_interval == 0):
check_adlr_autoresume_termination(iteration, model,
optimizer, opt_param_scheduler)
# Checkpointing
saved_checkpoint = False
if args.save and args.save_interval and \
iteration % args.save_interval == 0:
save_checkpoint(iteration, model, optimizer, opt_param_scheduler)
saved_checkpoint = True
# Evaluation
if args.eval_interval and iteration % args.eval_interval == 0:
prefix = 'iteration {}'.format(iteration)
evaluate_and_print_results(prefix, forward_step,
valid_dataloader, model,
iteration, None, False)
# Exiting based on iterations
if args.exit_interval and iteration % args.exit_interval == 0:
if not saved_checkpoint:
save_checkpoint(iteration, model, optimizer, opt_param_scheduler)
torch.distributed.barrier()
print_rank_0('exiting program at iteration {}'.format(iteration))
sys.exit()
# Checkpointing at the end of each epoch.
if args.save:
save_checkpoint(iteration, model, optimizer, opt_param_scheduler)
# Callback at the end of each epoch.
if end_of_epoch_callback is not None:
end_of_epoch_callback(model, epoch)
def finetune(train_valid_datasets_provider, model_provider,
model_type=ModelType.encoder_or_decoder,
forward_step=_cross_entropy_forward_step,
end_of_epoch_callback_provider=None,
task_collate_fn=None):
"""Main finetune function used across all tasks."""
args = get_args()
timers = get_timers()
assert args.rampup_batch_size is None, \
'batch size scaling is not supported for finetuning'
# Train and validation data loaders.
timers('train/valid/test dataset/dataloder', log_level=0).start()
if args.epochs > 0:
train_dataset, valid_dataset = train_valid_datasets_provider()
train_dataloader, valid_dataloader = _build_train_valid_dataloaders(
train_dataset, valid_dataset, task_collate_fn)
else:
args.train_iters = 0
timers('train/valid/test dataset/dataloder').stop()
# Build calback function.
timers('callback function', log_level=0).start()
end_of_epoch_callback = None
if end_of_epoch_callback_provider is not None:
end_of_epoch_callback = end_of_epoch_callback_provider()
timers('callback function').stop()
# Build model, optimizer and learning rate scheduler.
timers('model and optimizer', log_level=0).start()
model, optimizer, opt_param_scheduler = setup_model_and_optimizer(model_provider, model_type)
timers('model and optimizer').stop()
# If pretrained checkpoint is provided and we have not trained for
# any iteration (i.e., iteration is zero), then load the pretrained
# checkpoint.
timers('pretrained checkpoint', log_level=0).start(barrier=True)
if args.iteration == 0 and args.pretrained_checkpoint is not None:
original_load = args.load
args.load = args.pretrained_checkpoint
original_rng = args.no_load_rng
args.no_load_rng = True
_ = load_checkpoint(model, None, None)
args.load = original_load
args.no_load_rng = original_rng
# This is critical when only model is loaded. We should make sure
# main parameters are also updated.
optimizer.reload_model_params()
timers('pretrained checkpoint').stop()
# Print setup timing.
print_rank_0('done with setups ...')
timers.log(['train/valid/test dataset/dataloder', 'callback function',
'model and optimizer', 'pretrained checkpoint'], barrier=True)
print_rank_0('training ...')
# Finetune the model.
if args.epochs > 0:
_train(model, optimizer, opt_param_scheduler, forward_step,
train_dataloader, valid_dataloader, end_of_epoch_callback)
# Or just evaluate.
else:
if end_of_epoch_callback is not None:
print_rank_0('evaluation only mode, setting epoch to -1')
end_of_epoch_callback(model, epoch=-1, output_predictions=True)
print_rank_0('done :-)')
|
Megatron-LM-master
|
tasks/finetune_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
""" Tasks data utility."""
import re
import numpy as np
def clean_text(text):
"""Remove new lines and multiple spaces and adjust end of sentence dot."""
text = text.replace("\n", " ")
text = re.sub(r'\s+', ' ', text)
for _ in range(3):
text = text.replace(' . ', '. ')
return text
def build_sample(ids, types, paddings, label, unique_id):
"""Convert to numpy and return a sample consumed by the batch producer."""
ids_np = np.array(ids, dtype=np.int64)
types_np = np.array(types, dtype=np.int64)
paddings_np = np.array(paddings, dtype=np.int64)
sample = ({'text': ids_np,
'types': types_np,
'padding_mask': paddings_np,
'label': int(label),
'uid': int(unique_id)})
return sample
def build_tokens_types_paddings_from_text(text_a, text_b,
tokenizer, max_seq_length):
"""Build token types and paddings, trim if needed, and pad if needed."""
text_a_ids = tokenizer.tokenize(text_a)
text_b_ids = None
if text_b is not None:
text_b_ids = tokenizer.tokenize(text_b)
return build_tokens_types_paddings_from_ids(text_a_ids, text_b_ids,
max_seq_length, tokenizer.cls,
tokenizer.sep, tokenizer.pad)
def build_tokens_types_paddings_from_ids(text_a_ids, text_b_ids, max_seq_length,
cls_id, sep_id, pad_id):
"""Build token types and paddings, trim if needed, and pad if needed."""
ids = []
types = []
paddings = []
# [CLS].
ids.append(cls_id)
types.append(0)
paddings.append(1)
# A.
len_text_a = len(text_a_ids)
ids.extend(text_a_ids)
types.extend([0] * len_text_a)
paddings.extend([1] * len_text_a)
# [SEP].
ids.append(sep_id)
types.append(0)
paddings.append(1)
# B.
if text_b_ids is not None:
len_text_b = len(text_b_ids)
ids.extend(text_b_ids)
types.extend([1] * len_text_b)
paddings.extend([1] * len_text_b)
# Cap the size.
trimmed = False
if len(ids) >= max_seq_length:
max_seq_length_m1 = max_seq_length - 1
ids = ids[0:max_seq_length_m1]
types = types[0:max_seq_length_m1]
paddings = paddings[0:max_seq_length_m1]
trimmed = True
# [SEP].
if (text_b_ids is not None) or trimmed:
ids.append(sep_id)
if text_b_ids is None:
types.append(0)
else:
types.append(1)
paddings.append(1)
# Padding.
padding_length = max_seq_length - len(ids)
if padding_length > 0:
ids.extend([pad_id] * padding_length)
types.extend([pad_id] * padding_length)
paddings.extend([0] * padding_length)
return ids, types, paddings
|
Megatron-LM-master
|
tasks/data_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Main tasks functionality."""
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
from megatron import get_args
from megatron.initialize import initialize_megatron
def get_tasks_args(parser):
"""Provide extra arguments required for tasks."""
group = parser.add_argument_group(title='tasks')
group.add_argument('--task', type=str, required=True,
help='Task name.')
group.add_argument('--epochs', type=int, default=None,
help='Number of finetunning epochs. Zero results in '
'evaluation only.')
group.add_argument('--pretrained-checkpoint', type=str, default=None,
help='Pretrained checkpoint used for finetunning.')
group.add_argument('--keep-last', action='store_true',
help='Keep the last batch (maybe incomplete) in'
'the data loader')
group.add_argument('--train-data', nargs='+', default=None,
help='Whitespace separated paths or corpora names '
'for training.')
group.add_argument('--valid-data', nargs='*', default=None,
help='path(s) to the validation data.')
group.add_argument('--overlapping-eval', type=int, default=32,
help='Sliding window for overlapping evaluation.')
group.add_argument('--strict-lambada', action='store_true',
help='Use more difficult formulation of lambada.')
# Retriever args
group.add_argument('--qa-data-dev', type=str, default=None,
help='Path to the QA dataset dev file.')
group.add_argument('--qa-data-test', type=str, default=None,
help='Path to the QA dataset test file.')
# Faiss arguments for retriever
group.add_argument('--faiss-use-gpu', action='store_true',
help='Whether create the FaissMIPSIndex on GPU')
group.add_argument('--faiss-match', type=str, default='string', \
choices=['regex', 'string'], help="Answer matching '\
'logic type")
group.add_argument('--faiss-topk-retrievals', type=int, default=100,
help='Number of blocks to use as top-k during retrieval')
# finetune for retriever
group.add_argument('--eval-micro-batch-size', type=int, default=None,
help='Eval Batch size per model instance (local batch '
'size). Global batch size is local batch size '
'times data parallel size.')
group.add_argument('--train-with-neg', action='store_true',
help='Whether to use negative examples during model '
'training')
group.add_argument('--train-hard-neg', type=int, default=0,
help='Number of hard negative exmaples to use during '
'training')
# parameters for Av.rank validation method
# Following options/arguments have been taken directly from DPR codebase
group.add_argument('--val-av-rank-hard-neg', type=int, default=30,
help='Av.rank validation: how many hard negatives to'
' take from each question pool')
group.add_argument('--val-av-rank-other-neg', type=int, default=30,
help='Av.rank validation: how many other negatives to'
' take from each question pool')
return parser
if __name__ == '__main__':
initialize_megatron(extra_args_provider=get_tasks_args)
args = get_args()
if args.num_layers_per_virtual_pipeline_stage is not None:
print("Interleaved pipeline schedule is not yet supported for downstream tasks.")
exit()
if args.task == 'RACE':
from race.finetune import main
elif args.task in ['MNLI', 'QQP']:
from glue.finetune import main
elif args.task in ['LAMBADA', 'WIKITEXT103']:
from zeroshot_gpt.evaluate import main
elif args.task in ['ICT-ZEROSHOT-NQ', 'RETRIEVER-EVAL']:
from orqa.evaluate_orqa import main
elif args.task in ['RET-FINETUNE-NQ']:
from orqa.supervised.finetune import main
else:
raise NotImplementedError('Task {} is not implemented.'.format(
args.task))
main()
|
Megatron-LM-master
|
tasks/main.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Evaluation utilities."""
import os
import time
from functools import partial
import torch
from megatron import get_args
from megatron import print_rank_last, is_last_rank
from megatron.core import mpu
from megatron.schedules import get_forward_backward_func
from tasks.finetune_utils import build_data_loader
from tasks.finetune_utils import process_batch
def accuracy_func_provider(single_dataset_provider):
"""Provide function that calculates accuracies."""
args = get_args()
# Build dataloaders.
datapaths = args.valid_data
dataloaders = []
for datapath in datapaths:
dataset = single_dataset_provider(datapath)
dataloader = build_data_loader(
dataset, args.orig_micro_batch_size, num_workers=args.num_workers,
drop_last=(mpu.get_data_parallel_world_size() > 1))
dataloaders.append((dataset.dataset_name, dataloader))
def metrics_func(model, epoch, output_predictions=False):
print_rank_last('calculating metrics ...')
correct = 0
total = 0
if output_predictions:
assert mpu.get_data_parallel_world_size() == 1
named_predictions = []
names = 'predictions'
for name, dataloader in dataloaders:
output = calculate_correct_answers(name, model, dataloader,
epoch, output_predictions)
if not output_predictions:
correct_ans, total_count = output
else:
correct_ans, total_count, predictions = output
named_predictions.append((name, predictions))
names += '_' + name
correct += correct_ans
total += total_count
if is_last_rank():
percent = float(correct) * 100.0 / float(total)
print(' >> |epoch: {}| overall: correct / total = {} / {} = '
'{:.4f} %'.format(epoch, correct, total, percent))
if output_predictions and is_last_rank():
assert args.load is not None
filename = os.path.join(args.load, names + '.pt')
torch.save(named_predictions, filename)
return metrics_func
def calculate_correct_answers(name, model, dataloader,
epoch, output_predictions):
"""Calculate correct over total answers and return prediction if the
`output_predictions` is true."""
args = get_args()
forward_backward_func = get_forward_backward_func()
start_time = time.time()
for m in model:
m.eval()
saved_micro_batch_size = args.micro_batch_size
saved_global_batch_size = args.global_batch_size
ds = dataloader.dataset
if hasattr(ds, 'sample_multiplier'):
# If our dataset as a sample_multiplier attribute that means
# each "sample" from the dataset actually has multiple samples
# that will collapse into the batch dimension (for example in
# the RACE dataset that has several options), we need to
# account for that when setting the micro batch size.
sample_multiplier = ds.sample_multiplier
else:
sample_multiplier = 1
micro_batch_size_times_data_parallel = args.orig_micro_batch_size * args.data_parallel_size
num_micro_batches = args.orig_global_batch_size // micro_batch_size_times_data_parallel
def loss_func(output_predictions, labels, output_tensor):
logits = output_tensor
loss_dict = {}
# Add output predictions.
if output_predictions:
assert False
loss_dict['softmaxes'] = torch.nn.Softmax(dim=-1)(
logits.float()).data.cpu().numpy().tolist()
loss_dict['labels'] = labels.data.cpu().numpy().tolist()
loss_dict['ids'] = batch['uid'].cpu().numpy().tolist()
# Compute the correct answers.
predicted = torch.argmax(logits, dim=-1)
corrects = (predicted == labels)
# Add to the counters.
loss_dict['total'] = labels.size(0)
loss_dict['correct'] = corrects.sum().item()
return 0, loss_dict
# defined inside to capture output_predictions
def correct_answers_forward_step(batch, model):
try:
batch_ = next(batch)
except BaseException:
batch_ = batch
tokens, types, labels, attention_mask = process_batch(batch_)
# Forward model.
args = get_args()
output_tensor = model(tokens, attention_mask, tokentype_ids=types)
return output_tensor, partial(loss_func, output_predictions, labels)
with torch.no_grad():
# For all the batches in the dataset.
total = 0
correct = 0
if output_predictions:
# This option is only possible when data parallel size is 1.
assert mpu.get_data_parallel_world_size() == 1
softmaxes = []
labels = []
ids = []
for _, batch in enumerate(dataloader):
# For evaluation only mode we use drop_last = False to get all the
# samples, which means we might not have a full batch, so we
# adjust batch_size here to actual batch size of data
actual_batch_size = len(batch['label'])
# ... applying sample_multiplier if necessary
args.micro_batch_size = actual_batch_size * sample_multiplier
args.global_batch_size = actual_batch_size * sample_multiplier * num_micro_batches
loss_dicts = forward_backward_func(correct_answers_forward_step, batch, model,
optimizer=None, timers=None, forward_only=True)
for loss_dict in loss_dicts:
if output_predictions:
softmaxes.extend(loss_dict['softmaxes'])
labels.extend(loss_dict['labels'])
ids.extend(loss_dict['ids'])
total += loss_dict['total']
correct += loss_dict['correct']
for m in model:
m.train()
args.micro_batch_size = saved_micro_batch_size
args.global_batch_size = saved_global_batch_size
# Reduce.
if mpu.is_pipeline_last_stage():
unreduced = torch.cuda.LongTensor([correct, total])
torch.distributed.all_reduce(unreduced,
group=mpu.get_data_parallel_group())
# Print on screen.
correct_ans = unreduced[0].item()
total_count = unreduced[1].item()
percent = float(correct_ans) * 100.0 / float(total_count)
elapsed_time = time.time() - start_time
print_rank_last(' > |epoch: {}| metrics for {}: correct / total '
'= {} / {} = {:.4f} %, elapsed time (sec): {:.3f}'.format(
epoch, name, correct_ans, total_count,
percent, elapsed_time))
if output_predictions:
return correct_ans, total_count, (softmaxes, labels, ids)
return correct_ans, total_count
if output_predictions:
return 0, 0, ()
return 0, 0
|
Megatron-LM-master
|
tasks/eval_utils.py
|
import os
import argparse
import collections
import numpy as np
import torch
def process_files(args):
all_predictions = collections.OrderedDict()
all_labels = collections.OrderedDict()
all_uid = collections.OrderedDict()
for path in args.paths:
path = os.path.join(path, args.prediction_name)
try:
data = torch.load(path)
for dataset in data:
name, d = dataset
predictions, labels, uid = d
if name not in all_predictions:
all_predictions[name] = np.array(predictions)
if args.labels is None:
args.labels = [i for i in range(all_predictions[name].shape[1])]
if args.eval:
all_labels[name] = np.array(labels)
all_uid[name] = np.array(uid)
else:
all_predictions[name] += np.array(predictions)
assert np.allclose(all_uid[name], np.array(uid))
except Exception as e:
print(e)
continue
return all_predictions, all_labels, all_uid
def get_threshold(all_predictions, all_labels, one_threshold=False):
if one_threshold:
all_predictons = {'combined': np.concatenate(list(all_predictions.values()))}
all_labels = {'combined': np.concatenate(list(all_predictions.labels()))}
out_thresh = []
for dataset in all_predictions:
preds = all_predictions[dataset]
labels = all_labels[dataset]
out_thresh.append(calc_threshold(preds, labels))
return out_thresh
def calc_threshold(p, l):
trials = [(i) * (1. / 100.) for i in range(100)]
best_acc = float('-inf')
best_thresh = 0
for t in trials:
acc = ((apply_threshold(p, t).argmax(-1) == l).astype(float)).mean()
if acc > best_acc:
best_acc = acc
best_thresh = t
return best_thresh
def apply_threshold(preds, t):
assert (np.allclose(preds.sum(-1), np.ones(preds.shape[0])))
prob = preds[:, -1]
thresholded = (prob >= t).astype(int)
preds = np.zeros_like(preds)
preds[np.arange(len(thresholded)), thresholded.reshape(-1)] = 1
return preds
def threshold_predictions(all_predictions, threshold):
if len(threshold) != len(all_predictions):
threshold = [threshold[-1]] * (len(all_predictions) - len(threshold))
for i, dataset in enumerate(all_predictions):
thresh = threshold[i]
preds = all_predictions[dataset]
all_predictions[dataset] = apply_threshold(preds, thresh)
return all_predictions
def postprocess_predictions(all_predictions, all_labels, args):
for d in all_predictions:
all_predictions[d] = all_predictions[d] / len(args.paths)
if args.calc_threshold:
args.threshold = get_threshold(all_predictions, all_labels, args.one_threshold)
print('threshold', args.threshold)
if args.threshold is not None:
all_predictions = threshold_predictions(all_predictions, args.threshold)
return all_predictions, all_labels
def write_predictions(all_predictions, all_labels, all_uid, args):
all_correct = 0
count = 0
for dataset in all_predictions:
preds = all_predictions[dataset]
preds = np.argmax(preds, -1)
if args.eval:
correct = (preds == all_labels[dataset]).sum()
num = len(all_labels[dataset])
accuracy = correct / num
count += num
all_correct += correct
accuracy = (preds == all_labels[dataset]).mean()
print(accuracy)
if not os.path.exists(os.path.join(args.outdir, dataset)):
os.makedirs(os.path.join(args.outdir, dataset))
outpath = os.path.join(
args.outdir, dataset, os.path.splitext(
args.prediction_name)[0] + '.tsv')
with open(outpath, 'w') as f:
f.write('id\tlabel\n')
f.write('\n'.join(str(uid) + '\t' + str(args.labels[p])
for uid, p in zip(all_uid[dataset], preds.tolist())))
if args.eval:
print(all_correct / count)
def ensemble_predictions(args):
all_predictions, all_labels, all_uid = process_files(args)
all_predictions, all_labels = postprocess_predictions(all_predictions, all_labels, args)
write_predictions(all_predictions, all_labels, all_uid, args)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--paths', required=True, nargs='+',
help='paths to checkpoint directories used in ensemble')
parser.add_argument('--eval', action='store_true',
help='compute accuracy metrics against labels (dev set)')
parser.add_argument('--outdir',
help='directory to place ensembled predictions in')
parser.add_argument('--prediction-name', default='test_predictions.pt',
help='name of predictions in checkpoint directories')
parser.add_argument('--calc-threshold', action='store_true',
help='calculate threshold classification')
parser.add_argument('--one-threshold', action='store_true',
help='use on threshold for all subdatasets')
parser.add_argument('--threshold', nargs='+', default=None, type=float,
help='user supplied threshold for classification')
parser.add_argument('--labels', nargs='+', default=None,
help='whitespace separated list of label names')
args = parser.parse_args()
ensemble_predictions(args)
if __name__ == '__main__':
main()
|
Megatron-LM-master
|
tasks/ensemble_classifier.py
|
# The following code is adapted from
# https://github.com/facebookresearch/ParlAI/blob/master/parlai/core/metrics.py,
# which is licensed under the MIT license. More details on the license can be
# found at https://github.com/facebookresearch/ParlAI/blob/master/LICENSE.
"""Provides standard metric evaluations for dialog."""
from collections import Counter
from typing import List
import numpy as np
import re
re_art = re.compile(r'\b(a|an|the)\b')
re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
def normalize_answer(s):
"""
Lower text and remove punctuation, articles and extra whitespace.
"""
s = s.lower()
s = re_punc.sub(' ', s)
s = re_art.sub(' ', s)
s = ' '.join(s.split())
return s
class F1Metric:
"""
Helper class which computes token-level F1.
"""
@staticmethod
def _prec_recall_f1_score(pred_items, gold_items):
"""
Compute precision, recall and f1 given a set of gold and prediction items.
:param pred_items: iterable of predicted values
:param gold_items: iterable of gold values
:return: tuple (p, r, f1) for precision, recall, f1
"""
common = Counter(gold_items) & Counter(pred_items)
num_same = sum(common.values())
if num_same == 0:
return 0, 0, 0
precision = 1.0 * num_same / len(pred_items)
recall = 1.0 * num_same / len(gold_items)
f1 = (2 * precision * recall) / (precision + recall)
return precision, recall, f1
@staticmethod
def compute_each_pair(guess: str, answer: str):
if answer == "":
return None, None, None
if guess == "":
return 0, 0, 0
g_tokens = normalize_answer(guess).split()
a_tokens = normalize_answer(answer).split()
precision, recall, f1 = F1Metric._prec_recall_f1_score(g_tokens, a_tokens)
return precision, recall, f1
@staticmethod
def compute_all_pairs(guesses: List[str], answers: List[str]):
# additional augment:
assert len(guesses) == len(answers)
precision_list, recall_list, f1_list = [], [], []
for guess, answer in zip(guesses, answers):
precision, recall, f1 = F1Metric.compute_each_pair(guess, answer)
if precision is None or recall is None or f1 is None:
continue
precision_list.append(precision)
recall_list.append(recall)
f1_list.append(f1)
return np.mean(precision_list), np.mean(recall_list), np.mean(f1_list)
|
Megatron-LM-master
|
tasks/msdp/metrics.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Preprocessing for Wizard of Wikipedia and Wizard of Internet datasets"""
import torch
import argparse
from nltk import word_tokenize
from tqdm import tqdm
import numpy as np
import json
def get_args():
parser = argparse.ArgumentParser(description="Preprocessing")
parser.add_argument("--func", type=str, default=None,
help="choose to run which function")
parser.add_argument("--raw_file", type=str, default=None,
help="path of the input file")
parser.add_argument("--processed_file", type=str, default=None,
help="path of the output file")
parser.add_argument("--knwl_ref_file", type=str, default=None,
help="path of the knowledge reference file")
parser.add_argument("--resp_ref_file", type=str, default=None,
help="path of the knowledge reference file")
parser.add_argument("--knwl_gen_file", type=str, default=None,
help="path of the generated knowledge file")
parser.add_argument("--test_file", type=str, default=None,
help="path of the test file")
parser.add_argument("--train_file", type=str, default=None,
help="path of the train file")
parser.add_argument("--model_file", type=str, default=None,
help="path of the model file")
parser.add_argument("--data_type", type=str, default=None,
help="data types, choose one out of three types: \
wow_seen, wow_unseen, and woi")
parser.add_argument("--seed", type=int, default=1234,
help="random seed")
args = parser.parse_args()
return args
def process_wow_dataset(raw_file, processed_file, knwl_ref_file, resp_ref_file):
"""
This is a function used for processing the wizard of wikipedia (wow) dataset
Expected processed format:
topic \t dialogue context \t golden knowledge \t golden response
"""
# loading the raw data
print("> Loading data from %s" % raw_file)
with open(raw_file, "r") as fr:
dialog_data = json.load(fr)
print("> Processing data ...")
fproc = open(processed_file, "w")
fknwl = open(knwl_ref_file, "w") if knwl_ref_file else None
fresp = open(resp_ref_file, "w") if resp_ref_file else None
for i, sample in enumerate(tqdm(dialog_data)):
# get all the dialog data for a single dialog sample
dialog = sample["dialog"]
turn_list = [] # collect the dialog history
# processing for each single dialog sample
for j, turn in enumerate(dialog):
# text of each turn
text = turn["text"]
if not (text.endswith("?") or text.endswith(".") or text.endswith("!")):
text = text + "."
if j == 0:
# first turn
turn_list.append(text)
continue
speaker = turn["speaker"].lower()
if "wizard" in speaker:
checked_sentence = list(turn["checked_sentence"].values()) # knowledge
checked_passage = list(turn["checked_passage"].values()) # topic
assert len(checked_sentence) <= 1
# get the ground truth knowledge
if len(checked_sentence) > 0:
checked_sentence = checked_sentence[0]
else:
checked_sentence = "no_passages_used"
if len(checked_passage) == 1:
checked_passage = checked_passage[0]
else:
checked_passage = "no_passages_used"
# get the topic
if checked_passage != "no_passages_used":
topic = checked_passage
else:
topic = sample["chosen_topic"]
dialog_context = " [SEP] ".join(turn_list)
knowledge = checked_sentence
response = text
# add the response into the dialog history
turn_list.append(response)
# write to the output files
fproc.write(topic + "\t" + dialog_context + "\t" + \
knowledge + "\t" + response + "\n")
if fknwl:
fknwl.write(knowledge + "\n")
if fresp:
# tokenize for evaluation
response = " ".join(word_tokenize(response))
fresp.write(response + "\n")
else:
assert "apprentice" in speaker
turn_list.append(text)
fproc.close()
if fknwl:
fknwl.close()
if fresp:
fresp.close()
def process_woi_dataset(raw_file, processed_file, knwl_ref_file, resp_ref_file):
"""
This is a function used for processing the wizard of internet (woi) dataset
Expected processed format:
topic \t dialogue context \t golden knowledge \t golden response
"""
print("> Processing %s" % raw_file)
fproc = open(processed_file, "w")
fknwl = open(knwl_ref_file, "w") if knwl_ref_file else None
fresp = open(resp_ref_file, "w") if resp_ref_file else None
with open(raw_file, "r") as fr:
for i, line in tqdm(enumerate(fr)):
# read line by line, each line uses json format
line = line.strip()
item_dict = json.loads(line)
# item_dict is a dictionary
# its key is the data id, and its value contains all the data content
item_dict = item_dict.values()
item_dict = list(item_dict)[0] # len(item_dict) == 1
# get the whole dialog data for a single dialog sample
dialog_data = item_dict['dialog_history']
length = len(dialog_data)
turn_list = [] # collect the dialog history
search_text = ""
for i in range(length):
item = dialog_data[i]
action = item['action']
if action == "Wizard => SearchAgent":
search_text = item['text']
elif action == "Wizard => Apprentice":
if len(turn_list) == 0:
# first turn
turn = item['text']
turn_list.append(turn)
continue
# get the relevant content
contents = item["context"]["contents"]
selects = item["context"]["selected_contents"]
flag = selects[0][0]
selects = selects[1:]
assert len(selects) == len(contents)
# get the topic
if flag:
# no knowledge sentence is used for the response
topic = "no_topic"
knwl_sent = "no_passages_used"
else:
# we consider the search text as the topic
topic = search_text
# get the knowledge sentence
knwl_sent = ""
for content, select in zip(contents, selects):
content = content['content']
assert len(content) == len(select)
for c, s in zip(content, select):
if s:
knwl_sent = c
break
if knwl_sent == "":
# no knowledge is used for the response
topic = "no_topic"
knwl_sent = "no_passages_used"
# get dialogue context, knowledge, and response
dialog_context = " [SEP] ".join(turn_list)
response = item['text']
# processing
topic = topic.replace("\n", "").replace("\r", \
"").replace("\t", "")
dialog_context = dialog_context.replace("\n", "").replace("\r", \
"").replace("\t", "")
knwl_sent = knwl_sent.replace("\n", "").replace("\r", \
"").replace("\t", "")
response = response.replace("\n", "").replace("\r", \
"").replace("\t", "")
if topic != "no_topic":
# write to the ouput files
fproc.write(topic + "\t" + dialog_context + "\t" + \
knwl_sent + "\t" + response + "\n")
if fknwl:
fknwl.write(knwl_sent + "\n")
if fresp:
# tokenize for evaluation
response = " ".join(word_tokenize(response))
fresp.write(response + "\n")
turn_list.append(response)
elif action == "Apprentice => Wizard":
turn = item['text']
turn_list.append(turn)
else:
assert action == "SearchAgent => Wizard", \
"Please check whether you have used the correct data!"
fproc.close()
if fknwl:
fknwl.close()
if fresp:
fresp.close()
def get_database(test_datapath, train_datapath, data_type):
"""Get the database by topics"""
assert data_type in ["wow_seen", "wow_unseen", "woi"], \
"Please input a correct data type!!"
# get test data topic dictionary
print("> reading test data from %s" % test_datapath)
test_topics = {}
with open(test_datapath, "r") as f:
for i, line in enumerate(f):
line = line.strip()
splits = line.split("\t")
topic = splits[0]
test_topics[topic] = True
print("> reading data from %s" % train_datapath)
train_data_by_topic = {}
dialog_data_by_topic = {}
dialog_examples = []
with open(train_datapath, "r") as f:
for i, line in enumerate(f):
line = line.strip()
splits = line.split("\t")
topic = splits[0]
turns = splits[1].split(" [SEP] ")[-3:]
knowledge = splits[2]
response = splits[3]
# filtering data samples
if knowledge == "no_passages_used":
# when no knowledge is used
continue
if data_type != "wow_seen" and ("(" in knowledge or ")" in knowledge):
# when bracket exists in the knowledge
continue
if data_type != "wow_seen" and topic not in knowledge:
# when topic does not exist in the knowledge
continue
# get the instance
last_turn = turns[-1]
instance = "( " + last_turn + " ) " + topic + " => " + knowledge
# construct dialog example
dialog_example = ""
if data_type != "wow_seen":
dialog_example += "( " + topic + " ) "
for i, turn in enumerate(turns):
if i != 0:
dialog_example += " "
dialog_example += turn
# check overlaps
if topic in test_topics:
if topic not in train_data_by_topic:
train_data_by_topic[topic] = [instance]
else:
train_data_by_topic[topic].append(instance)
if topic not in dialog_data_by_topic:
dialog_data_by_topic[topic] = [dialog_example]
else:
dialog_data_by_topic[topic].append(dialog_example)
else:
# filtering data samples
if len(knowledge.split()) > 20:
# knowledge is too long
continue
if knowledge.startswith("It") or knowledge.startswith("it") or \
knowledge.startswith("This") or knowledge.startswith("this"):
continue
# append all the data into dialogue examples list
dialog_examples.append((topic, dialog_example, instance))
return train_data_by_topic, dialog_data_by_topic, dialog_examples
emb_dict = {}
def select_prompts_based_on_similarity(
query, dialog_list, prompt_list, topic, tokenizer, encoder, topk):
"""Select samples based on the similarity"""
with torch.no_grad():
# get the query embeddings
query_ids = tokenizer.encode(query)
query_ids = torch.LongTensor([query_ids]).cuda()
query_emb = encoder(input_ids=query_ids).pooler_output
query_emb = query_emb[0]
# calculate embeddings for the samples in the database
if topic in emb_dict:
example_embeddings = emb_dict[topic]
example_embeddings = example_embeddings.cuda()
else:
for idx, example in enumerate(dialog_list):
example_ids = tokenizer.encode(example)
example_ids = torch.LongTensor([example_ids]).cuda()
example_emb = encoder(input_ids=example_ids).pooler_output
if idx == 0:
example_embeddings = example_emb
else:
example_embeddings = torch.cat(
(example_embeddings, example_emb), dim=0)
emb_dict[topic] = example_embeddings.cpu()
# compare the similarity and select the topk samples
similarity_list = example_embeddings.matmul(query_emb)
_, indices = torch.topk(similarity_list, k=topk)
indices = indices.tolist()
indices = indices[::-1] # reverse the order
selected_prompts = []
for index in indices:
# index = index.item()
selected_prompts.append(prompt_list[index])
return selected_prompts
def prompt_selection_for_knowledge_generation(
test_datapath, train_datapath, model_path, output_prompt_path, data_type):
"""Selecting prompts for the knowledge generation"""
print("> Selecting prompts for the knowledge generation")
train_data_by_topic, dialog_data_by_topic, dialog_examples = \
get_database(test_datapath, train_datapath, data_type)
from transformers import DPRQuestionEncoderTokenizer
print("> loading tokenizer and encoder")
tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
'facebook/dpr-question_encoder-single-nq-base')
encoder = torch.load(model_path).cuda()
print("> getting dialog embeddings")
with torch.no_grad():
for idx, example in tqdm(enumerate(dialog_examples)):
dialog = example[1]
dialog_ids = tokenizer.encode(dialog)
dialog_ids = torch.LongTensor([dialog_ids]).cuda()
dialog_emb = encoder(input_ids=dialog_ids).pooler_output
if idx == 0:
dialog_embeddings = dialog_emb
else:
dialog_embeddings = torch.cat((dialog_embeddings, dialog_emb), dim=0)
print("> reading test data from %s" % test_datapath)
prompt_list_for_each_sample = []
with open(test_datapath, "r") as f:
for i, line in tqdm(enumerate(f)):
line = line.strip()
splits = line.split("\t")
topic = splits[0]
turns = splits[1].split(" [SEP] ")[-3:]
# get the query sentence
query_sent = ""
if data_type != "seen":
query_sent += "( " + topic + " ) "
for i, turn in enumerate(turns):
if i != 0:
query_sent += " "
query_sent += turn
if topic not in train_data_by_topic:
# get the query embedding
query_ids = tokenizer.encode(query_sent)
query_ids = torch.LongTensor([query_ids]).cuda()
query_emb = encoder(input_ids=query_ids).pooler_output
query_emb = query_emb[0]
# calculate the similarity
similarity_list = dialog_embeddings.matmul(query_emb)
_, indices = torch.sort(similarity_list)
indices = indices.tolist()
selected_topics = {}
selected_prompts = []
num_prompt = 0
for index in indices:
example = dialog_examples[index]
topic_temp = example[0]
if topic_temp not in selected_topics:
selected_topics[topic_temp] = True
selected_prompts.append(example[2])
num_prompt += 1
if num_prompt == 10:
break
# get the selected samples
example_list = selected_prompts[::-1]
key = topic + " " + turns[-1]
prompt_list_for_each_sample.append({key: example_list})
else:
num_data_sample = min(len(train_data_by_topic[topic]), 10)
total_example_list = train_data_by_topic[topic]
dialog_list = dialog_data_by_topic[topic]
assert len(dialog_list) == len(train_data_by_topic[topic])
# calculate the similarity
example_list = select_prompts_based_on_similarity(
query_sent, dialog_list, total_example_list,
topic, tokenizer, encoder, topk=num_data_sample)
key = topic + " " + turns[-1]
prompt_list_for_each_sample.append({key: example_list})
print("writing to %s" % output_prompt_path)
with open(output_prompt_path, "w") as f:
for instance in tqdm(prompt_list_for_each_sample):
json.dump(instance, f)
f.write("\n")
def prompt_selection_for_response_generation(input_path, output_path, seed):
"""Selecting prompts for the response generation"""
print("> Selecting prompts for the response generation")
print("> set random seed")
np.random.seed(seed)
prompt_example_list = []
print("> reading data from %s" % input_path)
with open(input_path, "r") as f:
for i, line in tqdm(enumerate(f)):
line = line.strip()
splits = line.split("\t")
# get the topic, context, knowledge and response
topic = splits[0]
dialog_context = splits[1]
knowledge = splits[2]
response = splits[3]
turns = dialog_context.split(" [SEP] ")[-3:]
if knowledge == "no_passages_used":
continue
# calculate the overlap ratio
from nltk import word_tokenize
knowledge_sent_token_list = word_tokenize(knowledge)
knowledge_sent_token_dict = {token: True for token in knowledge_sent_token_list}
knowledge_len = len(knowledge_sent_token_list)
response_token_list = word_tokenize(response)
response_len = len(response_token_list)
num_overlap_token = 0
accumulator = 0
for token in response_token_list:
if token in knowledge_sent_token_dict:
accumulator += 1
else:
if accumulator >= 10:
num_overlap_token += accumulator
accumulator = 0
if accumulator >= 10:
num_overlap_token += accumulator
# filtering the data based on the ratio
if num_overlap_token > response_len * 0.9 or num_overlap_token < response_len * 0.6:
continue
if num_overlap_token < knowledge_len * 0.8:
continue
last_turn = " ".join(word_tokenize(turns[-1]))
knowledge = " ".join(word_tokenize(knowledge))
response = " ".join(word_tokenize(response))
prompt_example = ""
# add dialog context
prompt_example += "Topic: " + topic + ". "
prompt_example += "User says: " + last_turn + " "
prompt_example += "We know that: " + knowledge + " "
prompt_example += "System replies: " + response
prompt_example_list.append(prompt_example)
# shuffle the prompt examples
np.random.shuffle(prompt_example_list)
print("> writing to %s" % output_path)
with open(output_path, "w") as f:
# f.write("Generate the System's response based on the knowledge sentence:\n")
for i in tqdm(range(20)):
example = prompt_example_list[i]
f.write(example + "\n")
def prepare_input_for_response_generation(test_file, knwl_gen_file, processed_file):
"""Preparing inputs for the response generation"""
print("> Reading knowledge file from %s" % knwl_gen_file)
# get the knowledge list
with open(knwl_gen_file, "r") as f:
knowledge_list = f.readlines()
print("> Processing ...")
with open(test_file, "r") as fr:
with open(processed_file, "w") as fw:
for line_num, line in enumerate(tqdm(fr)):
line = line.strip()
splits = line.split("\t")
# prepare topic, context, knowledge and response
topic = splits[0]
dialog_context = splits[1]
response = splits[3]
knowledge = knowledge_list[line_num]
knowledge = knowledge.strip()
if "<|endoftext|>" in knowledge:
knowledge = knowledge.replace("<|endoftext|>", "")
# write to the output file
fw.write(topic + "\t" + dialog_context + "\t" \
+ knowledge + "\t" + response + "\n")
if __name__ == "__main__":
args = get_args()
if args.func == "process_wow_dataset":
process_wow_dataset(args.raw_file, args.processed_file, args.knwl_ref_file, args.resp_ref_file)
elif args.func == "process_woi_dataset":
process_woi_dataset(args.raw_file, args.processed_file, args.knwl_ref_file, args.resp_ref_file)
elif args.func == "get_knwl_gen_prompts":
prompt_selection_for_knowledge_generation(
args.test_file, args.train_file, args.model_file,
args.processed_file, args.data_type)
elif args.func == "get_resp_gen_prompts":
prompt_selection_for_response_generation(
args.train_file, args.processed_file, args.seed)
elif args.func == "prepare_input":
prepare_input_for_response_generation(
args.test_file, args.knwl_gen_file, args.processed_file)
|
Megatron-LM-master
|
tasks/msdp/preprocessing.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Model evaluation"""
from megatron import get_args
from megatron import print_rank_0
from tasks.msdp.metrics import F1Metric
from tqdm import tqdm
def evaluate_f1(guess_file, answer_file):
"""Evaluating F1 Score"""
guess_list = []
print_rank_0('reading %s' % guess_file)
with open(guess_file, "r") as f:
for i, line in enumerate(tqdm(f)):
line = line.strip()
if "<|endoftext|>" in line:
line = line.replace("<|endoftext|>", "")
guess_list.append(line)
answer_list = []
print_rank_0('reading %s' % answer_file)
with open(answer_file, "r") as f:
for i, line in enumerate(tqdm(f)):
line = line.strip()
if line == "no_passages_used":
line = ""
answer_list.append(line)
assert len(guess_list) == len(answer_list), \
"lengths of guess and answer are different!"
precision, recall, f1 = F1Metric.compute_all_pairs(guess_list, answer_list)
print_rank_0('Precision: %.4f; recall: %.4f; f1: %.4f' % (precision, recall, f1))
print_rank_0('done :-)')
def main():
args = get_args()
evaluate_f1(args.guess_file, args.answer_file)
|
Megatron-LM-master
|
tasks/msdp/evaluate.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Prompting the pretrained language model to generate knowledge/response"""
import json
import torch
import requests
from nltk import word_tokenize
from megatron import get_args
from megatron import print_rank_0
from megatron import get_tokenizer
from megatron.core import mpu
from megatron.model import GPTModel
from megatron.training import get_model
from megatron.checkpointing import load_checkpoint
from megatron.initialize import initialize_megatron
from megatron.text_generation import generate_and_post_process
def call_model_api(inputs, tokens_to_generate):
"""Calling the model api to get the output generations"""
args = get_args()
# The following is an example of using the Megatron API
# You can also implement your own API function to place this part
headers = {'Content-Type': 'application/json; charset=UTF-8'}
data = {"prompts": [inputs], "tokens_to_generate": tokens_to_generate, "top_k": 1}
data_json = json.dumps(data)
outputs = requests.put(args.megatron_api_url, headers=headers, data=data_json).json()["text"][0]
input_len = len(inputs)
outputs = outputs[input_len:]
outputs = outputs.split("\n")[0].strip()
return outputs
def read_prompts(prompt_path, prompt_type, n_example):
"""Read prompt data"""
if prompt_type == "knowledge":
# prompts for the knowledge generation
prompt_examples_dict = {}
# read prompt_path
with open(prompt_path, "r") as f:
for i, line in enumerate(f):
line = line.strip()
line_dict = json.loads(line)
key = list(line_dict.keys())[0]
if key not in prompt_examples_dict:
prompt_examples = line_dict[key]
prompt = ""
for instance in prompt_examples:
instance = instance.strip()
prompt += instance + " \n"
prompt_examples_dict[key] = prompt
return prompt_examples_dict
else:
# prompts for the response generation
# read prompt_path
prompt = ""
with open(prompt_path, "r") as f:
prompt_examples = f.readlines()
prompt_examples = prompt_examples[:n_example]
for instance in prompt_examples:
instance = instance.strip()
prompt += instance + " \n"
return prompt
def generate_samples_by_calling_api():
""" Generate outputs by calling"""
args = get_args()
assert args.prompt_type in ["knowledge", "response"], \
"Please input a correct prompt type!"
if args.prompt_type == "knowledge":
# read knowledge generation prompts
knwl_gen_prompt_dict = read_prompts(
args.prompt_file, args.prompt_type, args.num_prompt_examples)
else:
resp_gen_prompt = read_prompts(
args.prompt_file, args.prompt_type, args.num_prompt_examples)
# read the test data
fname = open(args.sample_input_file, "r")
test_sample_list = fname.readlines()
# create output file
fname_out = open(args.sample_output_file, "w")
# call the api to get the output generations
for test_sample in test_sample_list:
test_sample = test_sample.strip()
splits = test_sample.split("\t")
topic = splits[0]
# prepare the inputs for the api
if args.prompt_type == "knowledge":
## inputs = prompt + current test
# get the prompt
turns = splits[1].split(" [SEP] ")
last_turn = turns[-1]
key = topic + " " + last_turn
inputs = knwl_gen_prompt_dict[key]
# add current test
inputs += "( " + last_turn + " ) " + topic + " =>"
else:
# inputs = prompt + current test
# get the prompt
inputs = resp_gen_prompt
# add current test
turns = splits[1].split(" [SEP] ")
knowledge = splits[2]
last_turn = turns[-1]
last_turn = " ".join(word_tokenize(last_turn))
knowledge = " ".join(word_tokenize(knowledge))
knowledge = knowledge.strip()
last_turn = last_turn.strip()
inputs += "Topic: " + topic + ". "
inputs += "User says: " + last_turn + " "
inputs += "We know that: " + knowledge + " "
inputs += "System replies:"
# get the output generations from the api,
# and write to the output file
generations = call_model_api(inputs, args.out_seq_length)
fname_out.write(generations)
fname_out.write("\n")
fname.close()
fname_out.close()
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
print_rank_0('building GPT model ...')
model = GPTModel(
num_tokentypes=0,
parallel_output=True,
pre_process=pre_process,
post_process=post_process
)
return model
def generate_samples_by_prompting_input_from_file(model):
"""Prompt a pretrained language model to generate knowledge/response"""
# get tokenizer
args = get_args()
tokenizer = get_tokenizer()
# Read the sample file and open the output file.
assert args.sample_input_file is not None, \
'sample input file is not provided.'
if mpu.is_pipeline_first_stage() and mpu.get_tensor_model_parallel_rank() == 0:
fname = open(args.sample_input_file, "r")
all_raw_text = fname.readlines()
input_count = len(all_raw_text)
if args.sample_output_file is None:
sample_output_file = args.sample_input_file + ".out"
print('`sample-output-file` not specified, setting '
'it to {}'.format(sample_output_file))
else:
sample_output_file = args.sample_output_file
fname_out = open(sample_output_file, "w")
# only two prompt types (i.e., knowledge and response) are allowed
assert args.prompt_type in ["knowledge", "response"], \
"Please input a correct prompt type!"
# Read the prompt file
if args.prompt_type == "knowledge":
# read the prompts for the knowledge generation
prompt_examples_dict = {}
with open(args.prompt_file, "r") as f:
for i, line in enumerate(f):
line = line.strip()
line_dict = json.loads(line)
key = list(line_dict.keys())[0]
# get the prompt examples based on the key
if key not in prompt_examples_dict:
prompt_examples = line_dict[key]
prompt = ""
for instance in prompt_examples:
instance = instance.strip()
prompt += instance + " \n"
prompt_examples_dict[key] = prompt
else:
# read the prompts for the response generation
# prompts are fixed for all test samples
with open(args.prompt_file, "r") as f:
prompt_examples = f.readlines()
prompt_examples = prompt_examples[:args.num_prompt_examples]
prompt = ""
for instance in prompt_examples:
instance = instance.strip()
prompt += instance + " \n"
input_pos = 0
model.eval()
# perform prompting
with torch.no_grad():
while True:
raw_text_len = 0
if mpu.is_pipeline_first_stage() \
and mpu.get_tensor_model_parallel_rank() == 0:
input_str = all_raw_text[input_pos]
input_str = input_str.strip()
splits = input_str.split("\t")
topic = splits[0]
if args.prompt_type == "knowledge":
# first add the prompt into the raw_text
turns = splits[1].split(" [SEP] ")
last_turn = turns[-1]
key = topic + " " + last_turn
raw_text = prompt_examples_dict[key]
# construct inputs for knowledge generation
# then add the constructed inputs into the raw_text
raw_text += "( " + last_turn + " ) " + topic + " =>"
else:
# first add the prompt into the raw_text
raw_text = prompt
# construct inputs for response generation
# then add the constructed inputs into the raw_text
turns = splits[1].split(" [SEP] ")
knowledge = splits[2]
last_turn = turns[-1]
last_turn = " ".join(word_tokenize(last_turn))
knowledge = " ".join(word_tokenize(knowledge))
knowledge = knowledge.strip()
last_turn = last_turn.strip()
raw_text += "Topic: " + topic + ". "
raw_text += "User says: " + last_turn + " "
raw_text += "We know that: " + knowledge + " "
raw_text += "System replies:"
input_pos += 1
raw_text_len = len(raw_text)
else:
raw_text = "EMPTY TEXT"
if input_pos % 100 == 0:
print_rank_0("input_pos: %d" % input_pos)
outputs = generate_and_post_process(
model=model,
prompts=[raw_text],
tokens_to_generate=args.out_seq_length,
top_k_sampling=1)
prompts_plus_generations = outputs[0]
prompts_plus_generations = prompts_plus_generations[0]
# write the generated output to the output file
if mpu.get_tensor_model_parallel_rank() == 0:
if mpu.is_pipeline_first_stage():
generations = prompts_plus_generations[raw_text_len:]
generations = generations.split("\n")[0]
generations = generations.strip()
fname_out.write(generations)
fname_out.write("\n")
raw_text = None
if input_pos == input_count:
return
def main():
args = get_args()
if args.api_prompt:
# obtain the generations by calling the api
generate_samples_by_calling_api()
return
if args.num_layers_per_virtual_pipeline_stage is not None:
print("Interleaved pipeline schedule is not yet supported for text generation.")
exit()
# Set up model and load checkpoint.
model = get_model(model_provider, wrap_with_ddp=False)
if args.load is not None:
_ = load_checkpoint(model, None, None)
assert len(model) == 1, "Above condition should have caught this"
model = model[0]
# perform the prompting
generate_samples_by_prompting_input_from_file(model)
|
Megatron-LM-master
|
tasks/msdp/prompt.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Run multi-stage dialogue prompting (MSDP)."""
import os
import sys
sys.path.append(os.path.abspath(os.path.join(
os.path.join(os.path.dirname(__file__), os.path.pardir), os.path.pardir)))
from megatron import get_args
from megatron.initialize import initialize_megatron
def get_tasks_args(parser):
"""Provide extra arguments required for tasks."""
group = parser.add_argument_group(title='tasks')
# parameters for the knowledgeable dialogue generation
group.add_argument('--task', type=str, required=True,
help='Task name.')
group.add_argument("--sample-input-file", type=str, default=None,
help='Get input from file instead of interactive mode, '
'each line is an input.')
group.add_argument("--sample-output-file", type=str, default=None,
help='Output file got from --sample-input-file')
group.add_argument('--prompt-file', type=str, default=None,
help='prompting file')
group.add_argument('--prompt-type', type=str, default=None,
choices=['knowledge', 'response'],
help='prompt type (knowledge or response)')
group.add_argument('--num-prompt-examples', type=int, default=10,
help='number of prompt examples')
group.add_argument('--guess-file', type=str, default=None,
help='datapath for generated sentences')
group.add_argument('--answer-file', type=str, default=None,
help='datapath for golden sentences')
group.add_argument('--out-seq-length', type=int, default=100,
help='output sequence length')
group.add_argument('--api-prompt', default=False, action="store_true",
help='setup model api for prompting')
group.add_argument('--megatron-api-url', type=str, default=None,
help='url of the megatron api')
return parser
if __name__ == '__main__':
initialize_megatron(extra_args_provider=get_tasks_args)
args = get_args()
if args.num_layers_per_virtual_pipeline_stage is not None:
print("Interleaved pipeline schedule is not yet supported for downstream tasks.")
exit()
if args.task == 'MSDP-PROMPT':
from tasks.msdp.prompt import main
elif args.task == 'MSDP-EVAL-F1':
from tasks.msdp.evaluate import main
else:
raise NotImplementedError('Task {} is not implemented.'.format(
args.task))
main()
|
Megatron-LM-master
|
tasks/msdp/main.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""QQP dataset."""
from megatron import print_rank_0
from tasks.data_utils import clean_text
from .data import GLUEAbstractDataset
LABELS = [0, 1]
class QQPDataset(GLUEAbstractDataset):
def __init__(self, name, datapaths, tokenizer, max_seq_length,
test_label=0):
self.test_label = test_label
super().__init__('QQP', name, datapaths,
tokenizer, max_seq_length)
def process_samples_from_single_path(self, filename):
""""Implement abstract method."""
print_rank_0(' > Processing {} ...'.format(filename))
samples = []
total = 0
first = True
is_test = False
with open(filename, 'r') as f:
for line in f:
row = line.strip().split('\t')
if first:
first = False
if len(row) == 3:
is_test = True
print_rank_0(' reading {}, {}, and {} columns and '
'setting labels to {}'.format(
row[0].strip(), row[1].strip(),
row[2].strip(), self.test_label))
else:
assert len(row) == 6
print_rank_0(' reading {}, {}, {}, and {} columns'
' ...'.format(
row[0].strip(), row[3].strip(),
row[4].strip(), row[5].strip()))
continue
if is_test:
assert len(row) == 3, 'expected length 3: {}'.format(row)
uid = int(row[0].strip())
text_a = clean_text(row[1].strip())
text_b = clean_text(row[2].strip())
label = self.test_label
assert len(text_a) > 0
assert len(text_b) > 0
else:
if len(row) == 6:
uid = int(row[0].strip())
text_a = clean_text(row[3].strip())
text_b = clean_text(row[4].strip())
label = int(row[5].strip())
else:
print_rank_0('***WARNING*** index error, '
'skipping: {}'.format(row))
continue
if len(text_a) == 0:
print_rank_0('***WARNING*** zero length a, '
'skipping: {}'.format(row))
continue
if len(text_b) == 0:
print_rank_0('***WARNING*** zero length b, '
'skipping: {}'.format(row))
continue
assert label in LABELS
assert uid >= 0
sample = {'uid': uid,
'text_a': text_a,
'text_b': text_b,
'label': label}
total += 1
samples.append(sample)
if total % 50000 == 0:
print_rank_0(' > processed {} so far ...'.format(total))
print_rank_0(' >> processed {} samples.'.format(len(samples)))
return samples
|
Megatron-LM-master
|
tasks/glue/qqp.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""GLUE finetuning/evaluation."""
from megatron import get_args
from megatron import print_rank_0
from megatron import get_tokenizer
from megatron.model.classification import Classification
from tasks.eval_utils import accuracy_func_provider
from tasks.finetune_utils import finetune
from megatron.arguments import core_transformer_config_from_args
def glue_classification(num_classes, Dataset,
name_from_datapath_func):
def train_valid_datasets_provider():
"""Build train and validation dataset."""
args = get_args()
tokenizer = get_tokenizer()
train_dataset = Dataset('training', args.train_data,
tokenizer, args.seq_length)
valid_dataset = Dataset('validation', args.valid_data,
tokenizer, args.seq_length)
return train_dataset, valid_dataset
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
args = get_args()
config = core_transformer_config_from_args()
print_rank_0('building classification model for {} ...'.format(
args.task))
model = Classification(config=config, num_classes=num_classes, num_tokentypes=2,
pre_process=pre_process, post_process=post_process)
return model
def metrics_func_provider():
"""Privde metrics callback function."""
def single_dataset_provider(datapath):
args = get_args()
tokenizer = get_tokenizer()
name = name_from_datapath_func(datapath)
return Dataset(name, [datapath], tokenizer, args.seq_length)
return accuracy_func_provider(single_dataset_provider)
"""Finetune/evaluate."""
finetune(train_valid_datasets_provider, model_provider,
end_of_epoch_callback_provider=metrics_func_provider)
def main():
args = get_args()
if args.task == 'MNLI':
num_classes = 3
from tasks.glue.mnli import MNLIDataset as Dataset
def name_from_datapath(datapath):
return datapath.split('MNLI')[-1].strip(
'.tsv').strip('/').replace('_', '-')
elif args.task == 'QQP':
num_classes = 2
from tasks.glue.qqp import QQPDataset as Dataset
def name_from_datapath(datapath):
return datapath.split('QQP')[-1].strip(
'.tsv').strip('/').replace('_', '-')
else:
raise NotImplementedError('GLUE task {} is not implemented.'.format(
args.task))
glue_classification(num_classes, Dataset, name_from_datapath)
|
Megatron-LM-master
|
tasks/glue/finetune.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.