python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Relu and ReluGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.compat import compat
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def _elu_grad_grad(activation):
if activation < 0:
return np.exp(activation)
return 0
class ReluTest(test.TestCase):
def _npRelu(self, np_features):
return np.maximum(np_features, np.zeros(np_features.shape))
def testNpRelu(self):
self.assertAllClose(
np.array([[0.0, 0.7, 0.0, 0.3, 0.0], [0.1, 0.0, 0.5, 0.0, 0.9]]),
self._npRelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]])))
def _testRelu(self, np_features):
np_relu = self._npRelu(np_features)
tf_relu = nn_ops.relu(np_features)
self.assertAllClose(np_relu, tf_relu)
self.assertShapeEqual(np_relu, tf_relu)
def testNumbersCPU(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float32, np.float64]:
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testReluInt8x4GoodShape(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest("No GPU available")
inputs = np.array([[-50, 7, 23, 0], [-1, -5, 6, 11]])
np_relu = self._npRelu(inputs)
tf_relu = nn_ops.relu(constant_op.constant(inputs, dtypes.qint8))
self.assertAllClose(np_relu, tf_relu)
self.assertShapeEqual(np_relu, tf_relu)
@test_util.disable_xla("b/123338077") # Passes with XLA
def testReluInt8x4BadShape(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest("No GPU available")
inputs = constant_op.constant(
np.array([[-50, 7, 23], [0, 1, -5], [6, -2, 11]]), dtypes.qint8)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Tensor size must be a multiple of 4 for Relu<qint8>. Got 9"):
self.evaluate(nn_ops.relu(inputs))
inputs = constant_op.constant(
np.array([1, -2, 3, -4, 5, -6, 7, -8, 9, -8, 7, -6, 5, -4, 3, -2, 1]),
dtypes.qint8)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Tensor size must be a multiple of 4 for Relu<qint8>. Got 17"):
self.evaluate(nn_ops.relu(inputs))
def testNoElement(self):
self._testRelu(np.array([[], []], dtype=np.float32))
# The gradient test for ReLU is a bit tricky as the derivative is not well
# defined at around zero and we want to avoid that in terms of input values.
def testGradientFloat32(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu, [x]))
print("relu (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
# The gradient for fp16 is inaccurate due to the low-precision.
# We compare the fp16 analytical gradient against their fp32 counterpart.
def testGradientFloat16(self):
def grad(x):
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.l2_loss(nn_ops.relu(x))
return tape.gradient(y, x)
def f():
with test_util.use_gpu():
# Randomly construct a 1D shape from [1, 40)
shape = random_ops.random_uniform([1],
minval=1,
maxval=40,
dtype=dtypes.int32)
x32 = random_ops.random_uniform(shape, minval=-1, maxval=1)
x16 = math_ops.cast(x32, dtype=dtypes.float16)
return grad(x32), grad(x16)
# We're going to ensure that the fp16 and fp32 gradients
# are "close" to each other for ~100 random values.
#
# In TensorFlow 1.x, invoking f() (without eager execution enabled)
# would construct a graph. Instead of construct a graph with O(100) nodes,
# we construct a single graph to be executed ~100 times in a Session.
if not tf2.enabled():
d32_tensor, d16_tensor = f()
with self.cached_session() as sess:
f = lambda: sess.run([d32_tensor, d16_tensor])
# Repeat the experiment for 100 times. All tensor shapes and its tensor
# values are randomly generated for each run.
for _ in xrange(100):
d32, d16 = f()
self.assertAllClose(d32, d16, atol=3e-4)
def testGradientFloat64(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu, [x]))
print("relu (float64) gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradGradFloat32(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float32
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("relu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float64
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("relu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientScalar(self):
x = variables.Variable(100.)
def loss():
return nn_ops.relu(x)**2
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.25)
self.evaluate(variables.global_variables_initializer())
self.evaluate(optimizer.minimize(loss))
self.assertAllClose(x.read_value(), 50.0)
def testGradientNoElement(self):
with self.cached_session():
def f(x):
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.relu(x)
return tape.gradient(y, x)
x = np.asarray([[], []], dtype=np.float32)
z = list(gradient_checker_v2.compute_gradient(f, [x]))[0][0]
self.assertAllEqual(z, np.reshape(x, (0, 0)))
class Relu6Test(test.TestCase):
def _npRelu6(self, np_features):
sixes = np.copy(np_features)
sixes.fill(6.0)
return np.minimum(
np.maximum(np_features, np.zeros(np_features.shape)), sixes)
def testNpRelu6(self):
self.assertAllClose(
np.array([[0.0, 0.7, 0.0, 0.3, 6.0], [0.1, 0.0, 6.0, 0.0, 0.9]]),
self._npRelu6(
np.array([[-0.9, 0.7, -0.5, 0.3, 6.0], [0.1, -0.3, 6.5, -0.7,
0.9]])))
def _testRelu6(self, np_features):
np_relu6 = self._npRelu6(np_features)
tf_relu6 = nn_ops.relu6(np_features)
self.assertAllClose(np_relu6, tf_relu6)
self.assertShapeEqual(np_relu6, tf_relu6)
def testNumbersCPU(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testRelu6(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float, np.double]:
self._testRelu6(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
# The gradient test for ReLU6 is a bit tricky as the derivative is
# not well defined at around zero and six and we want to avoid that
# in terms of input values.
def testGradientFloat32(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu6, [x]))
print("relu6 (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu6, [x]))
print("relu6 (float64) gradient err = ", err)
self.assertLess(err, 1e-10)
class LeakyReluTest(test.TestCase):
def _npLeakyRelu(self, np_features, alpha=0.1):
return np.maximum(np_features, alpha * np_features)
def testNpLeakyRelu(self):
self.assertAllClose(
np.array([[-0.09, 0.7, -0.05, 0.3, -0.01],
[0.1, -0.03, 0.5, -0.07, 0.9]]),
self._npLeakyRelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]]),
alpha=0.1))
def _testLeakyRelu(self, np_features, alpha):
np_leaky_relu = self._npLeakyRelu(np_features, alpha)
tf_leaky_relu = nn_ops.leaky_relu(np_features, alpha)
self.assertAllClose(np_leaky_relu, tf_leaky_relu)
self.assertShapeEqual(np_leaky_relu, tf_leaky_relu)
def testNumbersCPU(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testLeakyRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
alpha=0.2)
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float32, np.float64]:
self._testLeakyRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
alpha=0.1)
# The gradient test for Leaky ReLU is a bit tricky as the derivative is not
# well defined at around zero and we want to avoid that in terms of input
# values.
def testGradientFloat32(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.leaky_relu, [x]))
print("leaky_relu (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.leaky_relu, [x]))
print("leaky_relu (float64) gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradGradFloat32(self):
with compat.forward_compatibility_horizon(2018, 11, 2):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float32
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.leaky_relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("leaky_relu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with compat.forward_compatibility_horizon(2018, 11, 2):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float64
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.leaky_relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("leaky_relu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientScalar(self):
x = variables.Variable(-100.)
def loss():
return nn_ops.leaky_relu(x, 0.05)**2
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.2)
self.evaluate(variables.global_variables_initializer())
self.evaluate(optimizer.minimize(loss))
self.assertAllClose(x.read_value(), -99.9)
def testUnexpectedAlphaValue(self):
self.assertAllClose(
np.array([[-9.0, 0.7, -5.0, 0.3, -0.1], [0.1, -3.0, 0.5, -27.0, 0.9]]),
nn_ops.leaky_relu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.01],
[0.1, -0.3, 0.5, -2.7, 0.9]]),
alpha=10))
self.assertAllClose(
np.array([[9.0, 0.7, 5.0, 0.3, 0.1], [0.1, 3.0, 0.5, 27.0, 0.9]]),
nn_ops.leaky_relu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.01],
[0.1, -0.3, 0.5, -2.7, 0.9]]),
alpha=-10))
class EluTest(test.TestCase):
def _npElu(self, np_features):
return np.where(np_features < 0, np.exp(np_features) - 1, np_features)
def testNpElu(self):
self.assertAllClose(
np.array([[-0.59343034025, 0.7, -0.39346934028, 0.3, -0.09516258196],
[0.1, -0.25918177931, 0.5, -0.5034146962, 0.9]]),
self._npElu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]])))
def _testElu(self, np_features):
np_elu = self._npElu(np_features)
tf_elu = nn_ops.elu(np_features)
self.assertAllCloseAccordingToType(np_elu, tf_elu)
self.assertShapeEqual(np_elu, tf_elu)
def testNumbersCPU(self):
for t in [np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testElu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float32, np.float64]:
self._testElu(np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testGradientFloat32(self):
with self.cached_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = np.asarray(x_val, dtype=np.float32, order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.elu, [x]))
print("elu (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = np.asarray(x_val, dtype=np.float64, order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.elu, [x]))
print("elu (float64) gradient err = ", err)
self.assertLess(err, 1e-6)
def testGradGrad(self):
with self.cached_session():
def f(x):
with backprop.GradientTape(persistent=True) as tape:
tape.watch(x)
y = nn_ops.elu(x)
dy = tape.gradient(y, x)
return tape.gradient(dy, x)
for x in [-1., -0.5, 0.5, 1.]:
got = self.evaluate(f(constant_op.constant(x)))
want = _elu_grad_grad(x)
err = np.abs(got - want)
self.assertLess(err, 1e-4)
def testGradGradFloat32(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float32
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.elu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("elu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float64
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.elu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("elu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-6)
class SeluTest(test.TestCase):
def _npSelu(self, np_features):
scale = 1.0507009873554804934193349852946
scale_alpha = 1.7580993408473768599402175208123
return np.where(np_features < 0, scale_alpha * (np.exp(np_features) - 1),
scale * np_features)
def testNpSelu(self):
self.assertAllClose(
np.array([[-1.0433095, 0.73549069, -0.6917582, 0.3152103, -0.16730527],
[0.1050701, -0.45566732, 0.5253505, -0.88505305, 0.9456309]]),
self._npSelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]])))
def _testSelu(self, np_features):
np_selu = self._npSelu(np_features)
tf_selu = nn_ops.selu(np_features)
self.assertAllCloseAccordingToType(np_selu, tf_selu)
self.assertShapeEqual(np_selu, tf_selu)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
# Force executed on CPU in case GPU kernels are available.
with ops.device("/device:CPU:0"):
self._testSelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testGradientFloat32(self):
with self.cached_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = np.asarray(x_val, dtype=np.float32, order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.selu, [x]))
print("selu (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = np.asarray(x_val, dtype=np.float64, order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.selu, [x]))
print("selu (float64) gradient err = ", err)
self.assertLess(err, 1e-6)
def testGradGradFloat32(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float32
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.selu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("selu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float64
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.selu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("selu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-6)
class CreluTest(test.TestCase):
def testCreluShape(self):
f = random_ops.random_normal([50, 5, 7, 10])
t = nn_ops.crelu(f)
self.assertEqual([50, 5, 7, 20], t.get_shape())
def _testCrelu(self, np_features):
np_relu = np.maximum(np_features, np.zeros_like(np_features))
np_neg_relu = np.maximum(-np_features, np.zeros_like(np_features))
np_crelu = np.concatenate((np_relu, np_neg_relu),
len(np_features.shape) - 1)
tf_crelu = nn_ops.crelu(np_features)
self.assertAllClose(np_crelu, tf_crelu)
self.assertShapeEqual(np_crelu, tf_crelu)
def testNumbersCPU(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testCrelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float32, np.float64]:
self._testCrelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersWithAxis0(self):
tf_crelu = nn_ops.crelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]), axis=0)
np_crelu = np.array([[0, 7, 0, 3, 0], [1, 0, 5, 0, 9], [9, 0, 5, 0, 1],
[0, 3, 0, 7, 0]])
self.assertAllEqual(np_crelu, tf_crelu)
def testNumbersWithAxis1(self):
tf_crelu = nn_ops.crelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]), axis=1)
np_crelu = np.array([[0, 7, 0, 3, 0, 9, 0, 5, 0, 1],
[1, 0, 5, 0, 9, 0, 3, 0, 7, 0]])
self.assertAllEqual(np_crelu, tf_crelu)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/relu_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for binary coefficient-wise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x**y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
def _default_tolerance(dtype):
"""Returns a sensible default tolerance for comparing results of a given type.
Args:
dtype: A datatype.
"""
if dtype == np.float16:
return 5e-3
elif dtype in (np.float32, np.complex64):
return 1e-3
elif dtype in (np.float64, np.complex128):
return 1e-5
else:
return None # Fail fast for unexpected types
class BinaryOpTest(test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False):
np_ans = np_func(x, y)
with test_util.force_cpu():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = self.evaluate(out)
# Test that the op takes precedence over numpy operators.
np_left = self.evaluate(tf_func(x, iny))
np_right = self.evaluate(tf_func(inx, y))
if also_compare_variables:
var_x = variables.Variable(x)
var_y = variables.Variable(y)
self.evaluate(variables.global_variables_initializer())
print(type(x), type(y), type(var_x), type(var_y))
print(type(tf_func(x, var_y)), type(tf_func(var_x, y)))
np_var_left = self.evaluate(tf_func(x, var_y))
np_var_right = self.evaluate(tf_func(var_x, y))
if np_ans.dtype != np.object:
self.assertAllClose(np_ans, tf_cpu)
self.assertAllClose(np_ans, np_left)
self.assertAllClose(np_ans, np_right)
if also_compare_variables:
self.assertAllClose(np_ans, np_var_left)
self.assertAllClose(np_ans, np_var_right)
self.assertShapeEqual(np_ans, out)
_GRAD_TOL = {
dtypes_lib.float16: 1e-3,
dtypes_lib.float32: 1e-3,
dtypes_lib.complex64: 1e-2,
dtypes_lib.float64: 1e-5,
dtypes_lib.complex128: 1e-4
}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, xs, outf, zs, x_init_value=xf, delta=1e-3)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, ys, outf, zs, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareBoth(self, x, y, np_func, tf_func, also_compare_variables=False):
self._compareCpu(x, y, np_func, tf_func, also_compare_variables)
if x.dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
if tf_func not in (_FLOORDIV, math_ops.floordiv, math_ops.zeta,
math_ops.polygamma):
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
if tf_func in (math_ops.zeta, math_ops.polygamma):
# These methods only support gradients in the second parameter
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
@test_util.run_deprecated_v1
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(x, y, np.add, math_ops.add, also_compare_variables=True)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.arctan2, math_ops.atan2)
x1 = np.random.randn(5, 6).astype(np.float32)
x2 = np.random.randn(5, 6).astype(np.float32)
# Remove tiny values--atan2 gradients are flaky near the origin.
x1[np.abs(x1) < 0.05] = 0.05 * np.sign(x1[np.abs(x1) < 0.05])
x2[np.abs(x2) < 0.05] = 0.05 * np.sign(x2[np.abs(x2) < 0.05])
self._compareBoth(x1, x2, np.arctan2, math_ops.atan2)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc,
math_ops.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc,
math_ops.igammac)
# Need x > 1
self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta,
math_ops.zeta)
n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(n_small, x_pos_small, special.polygamma,
math_ops.polygamma)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
@test_util.run_deprecated_v1
def testFloatDifferentShapes(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)
y = np.array([1, 2]).reshape(2, 1).astype(np.float32)
with self.cached_session() as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
s = math_ops.reduce_sum(inx * iny)
gx, gy = sess.run(gradients_impl.gradients(s, [inx, iny]))
# gx is simply the broadcasted y
self.assertAllEqual(gx,
np.array([1, 1, 2, 2]).reshape(2, 2).astype(np.float32))
# gy is x's column summed up
self.assertAllEqual(gy, np.array([3, 7]).reshape(2, 1).astype(np.float32))
def testFloatVariableOverload(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.int32)
y = np.array([1, 2]).reshape(2, 1).astype(np.int32)
var_x = variables.Variable(x)
var_y = variables.Variable(y)
with self.cached_session() as sess:
self.evaluate([var_x.initializer, var_y.initializer])
left_result = self.evaluate(var_x * y)
right_result = self.evaluate(x * var_y)
np_result = x * y
self.assertAllEqual(np_result, left_result)
self.assertAllEqual(np_result, right_result)
@test_util.run_deprecated_v1
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.arctan2, math_ops.atan2)
x1 = np.random.randn(7, 4).astype(np.float64)
x2 = np.random.randn(7, 4).astype(np.float64)
# Remove tiny values--atan2 gradients are flaky near the origin.
x1[np.abs(x1) < 0.5] = 0.5 * np.sign(x1[np.abs(x1) < 0.5])
x2[np.abs(x2) < 0.5] = 0.5 * np.sign(x2[np.abs(x2) < 0.5])
self._compareBoth(x1, x2, np.arctan2, math_ops.atan2)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc,
math_ops.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc,
math_ops.igammac)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
def testUint8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.uint8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.uint8)
self._compareBoth(x, y, np.add, math_ops.add)
def testInt8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
def testUint16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.uint16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.uint16)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
def testInt32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.mod, math_ops.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
# _compareBoth tests on GPU only for floating point types, so test
# _MOD for int32 on GPU by calling _compareGpu
self._compareGpu(x, y, np.mod, _MOD)
def testInt64Basic(self):
x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.mod, math_ops.mod)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
@test_util.run_deprecated_v1
def testComplex64Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex64)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex64)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
@test_util.run_deprecated_v1
def testComplex128Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex128)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex128)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
def testStringComparison(self):
x = np.array([["abc", "bh"], ["c", ""]])
y = np.array([["abc", "bh"], ["def", "hi"]])
with test_util.force_cpu():
cmp_eq = math_ops.equal(x, y)
cmp_not_eq = math_ops.not_equal(x, y)
values = self.evaluate([cmp_eq, cmp_not_eq])
self.assertAllEqual([[True, True], [False, False]], values[0])
self.assertAllEqual([[False, False], [True, True]], values[1])
def testString(self):
x = np.array([["x_0_0", "x_0_1", "x_0_2"], ["x_1_0", "x_1_1", "x_1_2"],
["x_2_0", "x_2_1", "x_2_2"]],
dtype=np.object)
y = np.array([["y_0_0", "y_0_1", "y_0_2"], ["y_1_0", "y_1_1", "y_1_2"],
["y_2_0", "y_2_1", "y_2_2"]],
dtype=np.object)
z = np.array([["z_0", "z_1", "z_2"]], dtype=np.object)
w = np.array("w", dtype=np.object)
self._compareCpu(x, y, _ADD, _ADD)
self._compareCpu(x, z, _ADD, _ADD)
self._compareCpu(x, w, _ADD, _ADD)
self._compareCpu(z, w, _ADD, _ADD)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
if dtype in (np.complex64, np.complex128):
x = (1 + np.linspace(0, 2 + 3j, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 2 - 2j, np.prod(ys))).astype(dtype).reshape(ys)
else:
x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
if x.dtype in (np.float16, np.float32, np.float64):
# TODO(aselle): Make the test work for dtypes:
# (np.complex64, np.complex128).
if tf_func not in (_FLOORDIV, math_ops.floordiv):
if x.dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(x, y, np_func, tf_func, np.float)
self._compareGradientY(x, y, np_func, tf_func, np.float)
else:
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
# TODO(josh11b,vrv): Refactor this to use parameterized tests.
def _testBCastByFunc(self, funcs, xs, ys):
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
np.complex64,
np.complex128,
]
for dtype in dtypes:
for (np_func, tf_func) in funcs:
if (dtype in (np.complex64, np.complex128) and
tf_func in (_FLOORDIV, math_ops.floordiv)):
continue # floordiv makes no sense for complex numbers
self._compareBCast(xs, ys, dtype, np_func, tf_func)
self._compareBCast(ys, xs, dtype, np_func, tf_func)
def _testBCastA(self, xs, ys):
funcs = [
(np.add, math_ops.add),
(np.add, _ADD),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastB(self, xs, ys):
funcs = [
(np.subtract, math_ops.subtract),
(np.subtract, _SUB),
(np.power, math_ops.pow),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastC(self, xs, ys):
funcs = [
(np.multiply, math_ops.multiply),
(np.multiply, _MUL),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastD(self, xs, ys):
funcs = [
(np.true_divide, math_ops.truediv),
(np.floor_divide, math_ops.floordiv),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
self._testBCastByFunc(funcs, xs, ys)
@test_util.run_deprecated_v1
def testBCast_0A(self):
self._testBCastA([1, 3, 2], [1])
@test_util.run_deprecated_v1
def testBCast_0B(self):
self._testBCastB([1, 3, 2], [1])
@test_util.run_deprecated_v1
def testBCast_0C(self):
self._testBCastC([1, 3, 2], [1])
@test_util.run_deprecated_v1
def testBCast_0D(self):
self._testBCastD([1, 3, 2], [1])
@test_util.run_deprecated_v1
def testBCast_1A(self):
self._testBCastA([1, 3, 2], [2])
@test_util.run_deprecated_v1
def testBCast_1B(self):
self._testBCastB([1, 3, 2], [2])
@test_util.run_deprecated_v1
def testBCast_1C(self):
self._testBCastC([1, 3, 2], [2])
@test_util.run_deprecated_v1
def testBCast_1D(self):
self._testBCastD([1, 3, 2], [2])
@test_util.run_deprecated_v1
def testBCast_2A(self):
self._testBCastA([1, 3, 2], [3, 2])
@test_util.run_deprecated_v1
def testBCast_2B(self):
self._testBCastB([1, 3, 2], [3, 2])
@test_util.run_deprecated_v1
def testBCast_2C(self):
self._testBCastC([1, 3, 2], [3, 2])
@test_util.run_deprecated_v1
def testBCast_2D(self):
self._testBCastD([1, 3, 2], [3, 2])
@test_util.run_deprecated_v1
def testBCast_3A(self):
self._testBCastA([1, 3, 2], [3, 1])
@test_util.run_deprecated_v1
def testBCast_3B(self):
self._testBCastB([1, 3, 2], [3, 1])
@test_util.run_deprecated_v1
def testBCast_3C(self):
self._testBCastC([1, 3, 2], [3, 1])
@test_util.run_deprecated_v1
def testBCast_3D(self):
self._testBCastD([1, 3, 2], [3, 1])
@test_util.run_deprecated_v1
def testBCast_4A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_4B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_4C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_4D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_5A(self):
self._testBCastA([1, 3, 2], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_5B(self):
self._testBCastB([1, 3, 2], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_5C(self):
self._testBCastC([1, 3, 2], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_5D(self):
self._testBCastD([1, 3, 2], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_6A(self):
self._testBCastA([1, 3, 2], [2, 1, 1])
@test_util.run_deprecated_v1
def testBCast_6B(self):
self._testBCastB([1, 3, 2], [2, 1, 1])
@test_util.run_deprecated_v1
def testBCast_6C(self):
self._testBCastC([1, 3, 2], [2, 1, 1])
@test_util.run_deprecated_v1
def testBCast_6D(self):
self._testBCastD([1, 3, 2], [2, 1, 1])
@test_util.run_deprecated_v1
def testBCast_7A(self):
self._testBCastA([1, 3, 2], [1, 3, 1])
@test_util.run_deprecated_v1
def testBCast_7B(self):
self._testBCastB([1, 3, 2], [1, 3, 1])
@test_util.run_deprecated_v1
def testBCast_7C(self):
self._testBCastC([1, 3, 2], [1, 3, 1])
@test_util.run_deprecated_v1
def testBCast_7D(self):
self._testBCastD([1, 3, 2], [1, 3, 1])
@test_util.run_deprecated_v1
def testBCast_8A(self):
self._testBCastA([2, 1, 5], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_8B(self):
self._testBCastB([2, 1, 5], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_8C(self):
self._testBCastC([2, 1, 5], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_8D(self):
self._testBCastD([2, 1, 5], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_9A(self):
self._testBCastA([2, 0, 5], [2, 0, 1])
@test_util.run_deprecated_v1
def testBCast_9B(self):
self._testBCastB([2, 0, 5], [2, 0, 1])
@test_util.run_deprecated_v1
def testBCast_9C(self):
self._testBCastC([2, 0, 5], [2, 0, 1])
@test_util.run_deprecated_v1
def testBCast_9D(self):
self._testBCastD([2, 0, 5], [2, 0, 1])
@test_util.run_deprecated_v1
def testBCast_10A(self):
self._testBCastA([2, 3, 0], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_10B(self):
self._testBCastB([2, 3, 0], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_10C(self):
self._testBCastC([2, 3, 0], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_10D(self):
self._testBCastD([2, 3, 0], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_11A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_11B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_11C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_11D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_12A(self):
self._testBCastA([1, 1, 1, 1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_12B(self):
self._testBCastB([1, 1, 1, 1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_12C(self):
self._testBCastC([1, 1, 1, 1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_12D(self):
self._testBCastD([1, 1, 1, 1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_13A(self):
self._testBCastA([1, 3, 2, 1, 1], [1])
@test_util.run_deprecated_v1
def testBCast_13B(self):
self._testBCastB([1, 3, 2, 1, 1], [1])
@test_util.run_deprecated_v1
def testBCast_13C(self):
self._testBCastC([1, 3, 2, 1, 1], [1])
@test_util.run_deprecated_v1
def testBCast_13D(self):
self._testBCastD([1, 3, 2, 1, 1], [1])
@test_util.run_deprecated_v1
def testBCast_14A(self):
self._testBCastA([2, 3, 1, 1, 5], [1])
@test_util.run_deprecated_v1
def testBCast_14B(self):
self._testBCastB([2, 3, 1, 1, 5], [1])
@test_util.run_deprecated_v1
def testBCast_14C(self):
self._testBCastC([2, 3, 1, 1, 5], [1])
@test_util.run_deprecated_v1
def testBCast_14D(self):
self._testBCastD([2, 3, 1, 1, 5], [1])
@test_util.run_deprecated_v1
def testBCast_15A(self):
self._testBCastA([10, 3, 1, 2], [3, 1, 2])
@test_util.run_deprecated_v1
def testBCast_15B(self):
self._testBCastB([10, 3, 1, 2], [3, 1, 2])
@test_util.run_deprecated_v1
def testBCast_15C(self):
self._testBCastC([10, 3, 1, 2], [3, 1, 2])
@test_util.run_deprecated_v1
def testBCast_15D(self):
self._testBCastD([10, 3, 1, 2], [3, 1, 2])
@test_util.run_deprecated_v1
def testMismatchedDimensions(self):
for func in [
math_ops.add, math_ops.subtract, math_ops.multiply, math_ops.div, _ADD,
_SUB, _MUL, _TRUEDIV, _FLOORDIV
]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
func(
ops.convert_to_tensor([10.0, 20.0, 30.0]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
@test_util.run_deprecated_v1
def testZeroPowGrad(self):
with self.cached_session():
for dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
x = constant_op.constant(0.0, dtype=dtype)
y = constant_op.constant(2.0, dtype=dtype)
z = math_ops.pow(x, y)
error = gradient_checker.compute_gradient_error(y, [], z, [])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testComplexPowGrad(self):
with self.cached_session():
for dtype in np.complex64, np.complex128:
for base in 2.0, -2.0:
x = constant_op.constant(base, dtype=dtype)
y = constant_op.constant(2.0, dtype=dtype)
z = math_ops.pow(x, y)
error = gradient_checker.compute_gradient_error(y, [], z, [])
self.assertLess(error, 2e-4)
def testAtan2SpecialValues(self):
x1l, x2l = zip((+0.0, +0.0), (+0.0, -0.0), (-0.0, +0.0), (-0.0, -0.0),
(1.2345, float("inf")), (1.2345, -float("inf")),
(-4.321, float("inf")), (-4.125, -float("inf")),
(float("inf"), float("inf")), (float("inf"), -float("inf")),
(-float("inf"), float("inf")),
(-float("inf"), -float("inf")))
for dtype in np.float32, np.float64:
x1 = np.array(x1l).astype(dtype)
x2 = np.array(x2l).astype(dtype)
self._compareCpu(x1, x2, np.arctan2, math_ops.atan2)
self._compareGpu(x1, x2, np.arctan2, math_ops.atan2)
def testPowNegativeExponent(self):
for dtype in [np.int32, np.int64]:
with test_util.force_cpu():
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = np.array([-2, 3]).astype(dtype)
self.evaluate(math_ops.pow(x, y))
with test_util.force_cpu():
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = np.array([2, -3]).astype(dtype)
self.evaluate(math_ops.pow(x, y))
with test_util.force_cpu():
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = -3
self.evaluate(math_ops.pow(x, y))
class ComparisonOpTest(test.TestCase):
def _compareScalar(self, func, x, y, dtype):
with test_util.use_gpu():
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = self.evaluate(out)
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)
self.assertEqual(
self._compareScalar(math_ops.less_equal, x, y, t), x <= y)
self.assertEqual(
self._compareScalar(math_ops.greater, x, y, t), x > y)
self.assertEqual(
self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(xt, yt, np.less, math_ops.less)
self._compare(xt, yt, np.less_equal, math_ops.less_equal)
self._compare(xt, yt, np.greater, math_ops.greater)
self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
# Complex types do not support ordering but do support equality tests.
for t in [np.complex64, np.complex128]:
xt = x.astype(t)
xt -= 1j * xt
yt = y.astype(t)
yt -= 1j * yt
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
if dtype in (np.complex64, np.complex128):
x -= 1j * x
y -= 1j * y
self._compare(x, y, np_func, tf_func)
self._compare(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func, include_complex=False):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
if include_complex:
dtypes.extend([np.complex64, np.complex128])
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, math_ops.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, math_ops.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, math_ops.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True)
def testBCastNotEqual(self):
self._testBCastByFunc(
np.not_equal, math_ops.not_equal, include_complex=True)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [
math_ops.less, math_ops.less_equal, math_ops.greater,
math_ops.greater_equal, math_ops.equal, math_ops.not_equal
]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x.astype(t), y.astype(t))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/cwise_ops_binary_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Softsign and SoftsignGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SoftsignTest(test.TestCase):
def _npSoftsign(self, np_features):
return np_features / (1 + np.abs(np_features))
def _testSoftsign(self, np_features, use_gpu=False):
np_softsign = self._npSoftsign(np_features)
with self.cached_session(use_gpu=use_gpu):
softsign = nn_ops.softsign(np_features)
tf_softsign = self.evaluate(softsign)
self.assertAllClose(np_softsign, tf_softsign)
self.assertShapeEqual(np_softsign, softsign)
def testNumbers(self):
for t in [np.float, np.double]:
self._testSoftsign(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftsign(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softsign(x, name="softsign")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softsign (float) gradient err = ", err)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testNoInts(self):
with self.cached_session():
with self.assertRaisesRegexp(
TypeError,
"'features' has DataType int32 not in list of allowed values"):
nn_ops.softsign(constant_op.constant(7)).eval()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/softsign_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fractional max pool operation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class FractionalMaxPoolTest(test.TestCase):
# Random number generate with seed.
_PRNG = np.random.RandomState(341261)
_SEED = 123456
def _MaxPoolAlongRows(self, input_matrix, row_seq, overlapping):
"""Perform max pool along row of a 2-D matrix based on row_seq.
Args:
input_matrix: A 2-D matrix.
row_seq: Cumulative pooling sequence along row.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = len(row_seq)-1
* num_cols = input_matrix.num_cols.
"""
output_image = np.zeros(input_matrix.shape[1])
row_max = row_seq[-1]
for i in range(row_seq.shape[0] - 1):
row_start = row_seq[i]
row_end = row_seq[i + 1] + 1 if overlapping else row_seq[i + 1]
row_end = min(row_end, row_max)
output_image = np.vstack((output_image, np.amax(
input_matrix[row_start:row_end, :], axis=0))) # axis 0 is along row
# remove the sentinel row
return output_image[1:, :]
def _MaxPoolAlongCols(self, input_matrix, col_seq, overlapping):
"""Perform max pool along column of a 2-D matrix based on col_seq.
Args:
input_matrix: A 2-D matrix.
col_seq: Cumulative pooling sequence along column.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = input_matrix.num_rows
* num_cols = len(col_seq)-1.
"""
input_matrix = input_matrix.transpose()
output_matrix = self._MaxPoolAlongRows(input_matrix, col_seq, overlapping)
return output_matrix.transpose()
def _GetExpectedFractionalMaxPoolResult(self, input_tensor, row_seq, col_seq,
overlapping):
"""Get expected fractional max pool result.
row_seq and col_seq together defines the fractional pooling region.
Args:
input_tensor: Original input tensor, assuming it is a 4-D tensor, with
dimension as [batch, height/row, width/column, channels/depth].
row_seq: Cumulative pooling sequence along row.
col_seq: Cumulative pooling sequence along column.
overlapping: Use overlapping when doing pooling.
Returns:
A 4-D tensor that is the result of max pooling on input_tensor based on
pooling region defined by row_seq and col_seq, conditioned on whether or
not overlapping is used.
"""
input_shape = input_tensor.shape
output_shape = (input_shape[0], len(row_seq) - 1, len(col_seq) - 1,
input_shape[3])
output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
for batch in range(input_shape[0]):
for channel in range(input_shape[3]):
two_dim_slice = input_tensor[batch, :, :, channel]
tmp = self._MaxPoolAlongRows(two_dim_slice, row_seq, overlapping)
output_tensor[batch, :, :, channel] = self._MaxPoolAlongCols(
tmp, col_seq, overlapping)
return output_tensor
def _ValidateFractionalMaxPoolResult(self, input_tensor, pooling_ratio,
pseudo_random, overlapping):
"""Validate FractionalMaxPool's result against expected.
Expected result is computed given input_tensor, and pooling region defined
by row_seq and col_seq.
Args:
input_tensor: A tensor or numpy ndarray.
pooling_ratio: A list or tuple of length 4, first and last element be 1.
pseudo_random: Use pseudo random method to generate pooling sequence.
overlapping: Use overlapping when pooling.
Returns:
None
"""
with self.cached_session() as sess:
p, r, c = nn_ops.fractional_max_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random,
overlapping,
seed=self._SEED)
actual, row_seq, col_seq = self.evaluate([p, r, c])
expected = self._GetExpectedFractionalMaxPoolResult(input_tensor, row_seq,
col_seq, overlapping)
self.assertShapeEqual(expected, p)
self.assertAllClose(expected, actual)
def _testVisually(self):
"""Manual test by printing out intermediate result of a small random tensor.
Since _GetExpectedFractionalMaxPoolResult is 'automated', it feel safer to
have a test case that you can see what's happening.
This test will generate a small, random, int 2D matrix, and feed it to
FractinalMaxPool and _GetExpectedFractionalMaxPoolResult.
"""
num_rows = 6
num_cols = 6
tensor_shape = (1, num_rows, num_cols, 1)
pseudo_random = False
for overlapping in True, False:
print("-" * 70)
print("Testing FractionalMaxPool with overlapping = {}".format(
overlapping))
rand_mat = self._PRNG.randint(10, size=tensor_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(2), 1]
with self.cached_session() as sess:
p, r, c = nn_ops.fractional_max_pool_v2(
rand_mat,
pooling_ratio,
pseudo_random,
overlapping,
seed=self._SEED)
tensor_output, row_seq, col_seq = self.evaluate([p, r, c])
expected_result = self._GetExpectedFractionalMaxPoolResult(rand_mat,
row_seq,
col_seq,
overlapping)
print("row sequence:")
print(row_seq)
print("column sequence:")
print(col_seq)
print("Input:")
# Print input with pooling region marked.
for i in range(num_rows):
row_to_print = []
for j in range(num_cols):
if j in col_seq:
row_to_print.append("|")
row_to_print.append(str(rand_mat[0, i, j, 0]))
row_to_print.append("|")
if i in row_seq:
print("-" * 2 * len(row_to_print))
print(" ".join(row_to_print))
print("-" * 2 * len(row_to_print))
print("Output from FractionalMaxPool:")
print(tensor_output[0, :, :, 0])
print("Expected result:")
print(expected_result[0, :, :, 0])
def testAllInputOptions(self):
"""Try all possible input options for fractional_max_pool.
"""
num_batches = 5
num_channels = 3
num_rows = 20
num_cols = 30
for pseudo_random in True, False:
for overlapping in True, False:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalMaxPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testIntegerTensorInput(self):
"""Test it works fine when input tensor is integer type.
"""
num_batches = 5
num_channels = 3
num_rows = 20
num_cols = 30
pseudo_random = True
overlapping = True
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
rand_mat = self._PRNG.randint(1000, size=tensor_shape)
self._ValidateFractionalMaxPoolResult(rand_mat,
[1, math.sqrt(3), math.sqrt(2), 1],
pseudo_random, overlapping)
def testDifferentTensorShapes(self):
"""Test different shapes of input tensor.
Mainly test different combinations of num_rows and num_cols.
"""
pseudo_random = True
overlapping = True
for num_batches in [1, 3]:
for num_channels in [1, 3]:
for num_rows in [10, 20, 50]:
for num_cols in [10, 20, 50]:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalMaxPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testLargePoolingRatio(self):
"""Test when pooling ratio is not within [1, 2).
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
for row_ratio in [math.sqrt(11), math.sqrt(37)]:
for col_ratio in [math.sqrt(11), math.sqrt(27)]:
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalMaxPoolResult(rand_mat,
[1, row_ratio, col_ratio, 1],
pseudo_random, overlapping)
def testDivisiblePoolingRatio(self):
"""Test when num of rows/cols can evenly divide pooling ratio.
This is a case regular max pooling can handle. Should be handled by
fractional pooling as well.
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalMaxPoolResult(rand_mat, [1, 2, 2, 1], pseudo_random,
overlapping)
@test_util.run_deprecated_v1
def testDifferentInputTensorShape(self):
"""Runs the operation in one session with different input tensor shapes."""
with self.cached_session() as sess:
input_holder = array_ops.placeholder(dtypes.float32,
[None, None, None, 3])
pooling_ratio = [1, 1.5, 1.5, 1]
pseudo_random = False
overlapping = False
p, r, c = nn_ops.fractional_max_pool_v2(
input_holder,
pooling_ratio,
pseudo_random,
overlapping,
seed=self._SEED)
# First run.
input_a = np.zeros([3, 32, 32, 3])
actual, row_seq, col_seq = sess.run([p, r, c], {input_holder: input_a})
expected = self._GetExpectedFractionalMaxPoolResult(
input_a, row_seq, col_seq, overlapping)
self.assertSequenceEqual(expected.shape, actual.shape)
# Second run.
input_b = np.zeros([4, 45, 45, 3])
actual, row_seq, col_seq = sess.run([p, r, c], {input_holder: input_b})
expected = self._GetExpectedFractionalMaxPoolResult(
input_b, row_seq, col_seq, overlapping)
self.assertSequenceEqual(expected.shape, actual.shape)
class FractionalMaxPoolGradTest(test.TestCase):
"""Tests for FractionalMaxPoolGrad.
Two types of tests for FractionalMaxPoolGrad.
1) Test fractional_max_pool_grad() directly.
This type of test relies on gen_nn_ops.max_pool_grad() returns the correct
result. For example:
* input_tensor_shape = (1, 10, 10, 1)
* window_size = (1, 2, 2, 1)
* stride_size = (1, 2, 2, 1)
* padding: not really import, since 10/2 is divisible
max pooling should generate the same result as fractional max pooling with:
* row_sequence = [0, 2, 4, 6, 8, 10]
* col_sequence = [0, 2, 4, 6, 8, 10]
* overlapping = False
This also means their gradients in such case will be the same.
Similarly, when
* input_tensor_shape = (1, 7, 7, 1)
* window_size = (1, 3, 3, 1)
* stride_size = (1, 2, 2, 1)
* padding: not important
max pooling should generate the same result as fractional max pooling with:
* row_sequence = [0, 2, 4, 7]
* col_sequence = [0, 2, 4, 7]
* overlapping = True
2) Test through compute_gradient_error()
"""
_PRNG = np.random.RandomState(341261)
_SEED = 123456
def _GenerateUniqueRandomInputTensor(self, shape):
"""Generate 'unqiue' random input tensor.
'Unique' means there's no collision values in the tensor, all elements are
different. This is done by generating sequence of integers with step of 1
and then randomly shuffle these integers.
Args:
shape: Shape of the tensor desired.
Returns:
A numpy ndarray with size = shape and dtype = numpy.float32.
"""
num_elements = 1
for size in shape:
num_elements *= size
x = np.arange(num_elements, dtype=np.float32)
self._PRNG.shuffle(x)
return x.reshape(shape)
def testDirectNotUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = row_window_size * 5
num_cols = col_window_size * 7
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.cached_session() as _:
input_tensor = constant_op.constant(
self._GenerateUniqueRandomInputTensor(input_shape))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size, col_window_size, 1]
padding = "VALID"
output_tensor = nn_ops.max_pool(input_tensor, window_size,
stride_size, padding)
output_data = self.evaluate(output_tensor)
output_backprop = self._PRNG.randint(100, size=output_data.shape)
input_backprop_tensor = gen_nn_ops.max_pool_grad(
input_tensor, output_tensor, output_backprop, window_size,
stride_size, padding)
input_backprop = self.evaluate(input_backprop_tensor)
row_seq = list(range(0, num_rows + 1, row_window_size))
col_seq = list(range(0, num_cols + 1, col_window_size))
fmp_input_backprop_tensor = gen_nn_ops.fractional_max_pool_grad(
input_tensor,
output_tensor,
output_backprop,
row_seq,
col_seq,
overlapping=False)
fmp_input_backprop = self.evaluate(fmp_input_backprop_tensor)
self.assertShapeEqual(input_backprop, fmp_input_backprop_tensor)
self.assertAllClose(input_backprop, fmp_input_backprop)
def testDirectUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = (row_window_size - 1) * 5 + 1
num_cols = (col_window_size - 1) * 7 + 1
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.cached_session() as _:
input_tensor = constant_op.constant(
self._GenerateUniqueRandomInputTensor(input_shape))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
padding = "VALID"
output_tensor = nn_ops.max_pool(input_tensor, window_size,
stride_size, padding)
output_data = self.evaluate(output_tensor)
output_backprop = self._PRNG.randint(100, size=output_data.shape)
input_backprop_tensor = gen_nn_ops.max_pool_grad(
input_tensor, output_tensor, output_backprop, window_size,
stride_size, padding)
input_backprop = self.evaluate(input_backprop_tensor)
row_seq = list(range(0, num_rows, row_window_size - 1))
col_seq = list(range(0, num_cols, col_window_size - 1))
row_seq[-1] += 1
col_seq[-1] += 1
fmp_input_backprop_tensor = gen_nn_ops.fractional_max_pool_grad(
input_tensor,
output_tensor,
output_backprop,
row_seq,
col_seq,
overlapping=True)
fmp_input_backprop = self.evaluate(fmp_input_backprop_tensor)
self.assertShapeEqual(input_backprop, fmp_input_backprop_tensor)
self.assertAllClose(input_backprop, fmp_input_backprop)
@test_util.run_deprecated_v1
def testAllInputOptionsThroughGradientError(self):
input_shape = (1, 7, 13, 1)
input_data = self._GenerateUniqueRandomInputTensor(input_shape)
# Add some randomness to make input_data not so 'integer'
input_data += self._PRNG.random_sample(input_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(3), 1]
for pseudo_random in True, False:
for overlapping in True, False:
with self.cached_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_max_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
output_data = self.evaluate(output_tensor)
output_shape = output_data.shape
# error_margin and delta setting is similar to max_pool_grad.
error_margin = 1e-3
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
@test_util.run_deprecated_v1
def testDifferentTensorShapesThroughGradientError(self):
pseudo_random = True
overlapping = True
pooling_ratio = [1, math.sqrt(3), math.sqrt(2), 1]
for num_batches in [1, 2]:
for num_rows in [5, 13]:
for num_cols in [5, 11]:
for num_channels in [1, 3]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
input_data = self._GenerateUniqueRandomInputTensor(input_shape)
# Add some randomness to make input_data not so 'integer'
input_data += self._PRNG.random_sample(input_shape)
with self.cached_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_max_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
output_data = self.evaluate(output_tensor)
output_shape = output_data.shape
# error_margin and delta setting is similar to max_pool_grad.
error_margin = 1e-3
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
@test_util.run_deprecated_v1
def testLargePoolingRatioThroughGradientError(self):
input_shape = (1, 17, 23, 1)
input_data = self._GenerateUniqueRandomInputTensor(input_shape)
# Add some randomness to make input_data not so 'integer'
input_data += self._PRNG.random_sample(input_shape)
pooling_ratio = (1, math.sqrt(13), math.sqrt(7), 1)
output_shape = [int(a / b) for a, b in zip(input_shape, pooling_ratio)]
overlapping = True
pseudo_random = False
with self.cached_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_max_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
# error_margin and delta setting is similar to max_pool_grad.
error_margin = 1e-3
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
def testWhenRepeatedMaxValueInPoolingRegion(self):
"""Test when there's repeating value in pooling region.
There's no formal definition for what the gradient should be when there're
multiple max value within a pooling cell. Such as
| 1 5 |
| 5 3 |
The expected result depends heavily on implementation, if someone swap the
order of a nested for loop when walking through the tensor, result would be
very different.
The goal of this test is to alert when someone else change the
implementation. Current implementation scans row-by-row.
"""
input_data = [5.0, 4.0, 6.0, 7.0,
3.0, 5.0, 9.0, 6.0,
8.0, 8.0, 9.0, 5.0,
7.0, 4.0, 0.0, 0.0] # pyformat: disable
input_size = [1, 4, 4, 1]
output_backprop = [12.0, 15.0,
17.0, -5.0,
6.0, 21.0] # pyformat: disable
row_seq = [0, 1, 3, 4]
col_seq = [0, 2, 4]
output_data_not_overlapping = [5.0, 7.0,
8.0, 9.0,
7.0, 0.0] # pyformat: disable
output_data_overlapping = [9.0, 9.0,
9.0, 9.0,
7.0, 0.0] # pyformat: disable
output_size = [1, 3, 2, 1]
expected_input_backprop_not_overlapping = np.reshape(
[12.0, 0.0, 0.0, 15.0,
0.0, 0.0, -5.0, 0.0,
17.0, 0.0, 0.0, 0.0,
6.0, 0.0, 21.0, 0.0],
input_size) # pyformat: disable
expected_input_backprop_overlapping = np.reshape(
[0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 39.0, 0.0,
0.0, 0.0, 0.0, 0.0,
6.0, 0.0, 21.0, 0.0],
input_size) # pyformat: disable
with self.cached_session() as _:
# Test when overlapping is False
input_tensor = constant_op.constant(input_data, shape=input_size)
output_tensor = constant_op.constant(
output_data_not_overlapping, shape=output_size)
grad = constant_op.constant(output_backprop, shape=output_size)
r = gen_nn_ops.fractional_max_pool_grad(
input_tensor,
output_tensor,
grad,
row_seq,
col_seq,
overlapping=False)
input_backprop_not_overlapping = self.evaluate(r)
self.assertShapeEqual(
np.reshape(expected_input_backprop_not_overlapping, input_size), r)
self.assertAllClose(expected_input_backprop_not_overlapping,
input_backprop_not_overlapping)
# Test when overlapping is True
output_tensor = constant_op.constant(
output_data_overlapping, shape=output_size)
r = gen_nn_ops.fractional_max_pool_grad(
input_tensor, output_tensor, grad, row_seq, col_seq, overlapping=True)
input_backprop_overlapping = self.evaluate(r)
self.assertShapeEqual(
np.reshape(expected_input_backprop_overlapping, input_size), r)
self.assertAllClose(expected_input_backprop_overlapping,
input_backprop_overlapping)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/fractional_max_pool_op_test.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_upper_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringUpperOpTest(test.TestCase):
"""Test cases for tf.strings.upper."""
def test_string_upper(self):
strings = ["Pigs on The Wing", "aNimals"]
with self.cached_session():
output = string_ops.string_upper(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [b"PIGS ON THE WING", b"ANIMALS"])
def test_string_upper_2d(self):
strings = [["pigS on THE wIng", "aniMals"], [" hello ", "\n\tWorld! \r \n"]]
with self.cached_session():
output = string_ops.string_upper(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [[b"PIGS ON THE WING", b"ANIMALS"],
[b" HELLO ", b"\n\tWORLD! \r \n"]])
def test_string_upper_unicode(self):
strings = [["óósschloë"]]
with self.cached_session():
output = string_ops.string_upper(strings, encoding="utf-8")
output = self.evaluate(output)
# output: "ÓÓSSCHLOË"
self.assertAllEqual(output, [[b"\xc3\x93\xc3\x93SSCHLO\xc3\x8b"]])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/string_upper_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SoftmaxCrossEntropyWithLogits op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class XentTest(test.TestCase):
def _npXent(self, features, labels, dim=-1):
if dim == -1:
dim = len(features.shape) - 1
one_only_on_dim = list(features.shape)
one_only_on_dim[dim] = 1
e = np.exp(
features - np.reshape(np.amax(features, axis=dim), one_only_on_dim))
probs = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
bp = (probs - labels)
l = -np.sum(labels * np.log(probs + 1.0e-20), axis=dim)
return l, bp
# TODO(b/123860949): The values are constant folded for XLA, so placeholders
# are needed.
def _testXent(self,
np_features,
np_labels,
use_gpu=False,
with_placeholders=False):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.cached_session(use_gpu=use_gpu) as sess:
if with_placeholders:
features_placeholder = array_ops.placeholder(np_features.dtype)
labels_placeholder = array_ops.placeholder(np_labels.dtype)
loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
labels=labels_placeholder, features=features_placeholder)
tf_loss, tf_backprop = sess.run([loss, backprop],
feed_dict={
labels_placeholder: np_labels,
features_placeholder: np_features
})
else:
loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def _testXentWrapper(self, np_features, np_labels, dim=-1, use_gpu=False):
np_loss, _ = self._npXent(np_features, np_labels, dim=dim)
with self.cached_session(use_gpu=use_gpu) as sess:
loss = nn_ops.softmax_cross_entropy_with_logits(
labels=np_labels, logits=np_features, dim=dim)
tf_loss = self.evaluate(loss)
print("np_loss:", np_loss)
print("tf_loss:", tf_loss)
self.assertAllCloseAccordingToType(np_loss, tf_loss)
# TODO(b/123860949): The values are constant folded for XLA, so placeholders
# are needed.
def _testAll(self, features, labels, with_placeholders=False):
self._testXent(
features, labels, use_gpu=False, with_placeholders=with_placeholders)
self._testXent(
features, labels, use_gpu=True, with_placeholders=with_placeholders)
def _testSingleClass(self, use_gpu=False):
for dtype in np.float16, np.float32:
with self.cached_session(use_gpu=use_gpu) as sess:
loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(dtype),
np.array([[-1.], [0.], [1.]]).astype(dtype))
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
self.assertAllClose([[2.0], [1.0], [0.0]], tf_backprop)
def testSingleClass(self):
self._testSingleClass(True)
self._testSingleClass(False)
@test_util.run_deprecated_v1
def testRankTooLarge(self):
for dtype in np.float16, np.float32:
np_features = np.array([[[1., 1., 1., 1.]], [[1., 2., 3.,
4.]]]).astype(dtype)
np_labels = np.array([[[0., 0., 0., 1.]], [[0., .5, .5,
0.]]]).astype(dtype)
self.assertRaisesRegexp(ValueError, "rank 2, but is rank 3",
gen_nn_ops.softmax_cross_entropy_with_logits,
np_features, np_labels)
def testNpXent(self):
# We create 2 batches of logits for testing.
# batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
# batch 1 has a bit of difference: 1, 2, 3, 4, with soft targets (1, 2).
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
labels = [[0., 0., 0., 1.], [0., .5, .5, 0.]]
# For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
# With a hard target 3, the backprop is [0.25, 0.25, 0.25, -0.75]
# The loss for this batch is -log(0.25) = 1.386
#
# For batch 1, we have:
# exp(0) = 1
# exp(1) = 2.718
# exp(2) = 7.389
# exp(3) = 20.085
# SUM = 31.192
# So we have as probabilities:
# exp(0) / SUM = 0.032
# exp(1) / SUM = 0.087
# exp(2) / SUM = 0.237
# exp(3) / SUM = 0.644
# With a soft target (1, 2), the backprop is
# [0.032, 0.087 - 0.5 = -0.413, 0.237 - 0.5 = -0.263, 0.644]
# The loss for this batch is [0.5 * -log(0.087), 0.5 * -log(0.237)]
# = [1.3862, 1.9401]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, -0.75], [0.0321, -0.4129, -0.2632,
0.6439]]),
np_backprop,
rtol=1.e-3,
atol=1.e-3)
self.assertAllClose(
np.array([1.3862, 1.9401]), np_loss, rtol=1.e-3, atol=1.e-3)
def testShapeBroadcast(self):
np_f = np.array([[1., 2., 3., 4.],
[1., 2., 3., 4.]]).astype(np.float32)
np_l = np.array([[0., 0., 0., 1.],
[0., .5, .5, 0.]]).astype(np.float32)
np_loss, np_backprop = self._npXent(np_f, np_l)
tf_f = constant_op.constant(
np.array([[1., 2., 3., 4.]]).astype(np.float32))
tf_l = constant_op.constant(
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float32))
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu) as sess:
loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
tf_f, tf_l)
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
# TODO(b/123860949): The values are constant folded for XLA, so placeholders
# are needed.
@test_util.run_deprecated_v1
def testFeatureBroadcast(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([[0., 0., 0., 1.]]).astype(np.float16),
with_placeholders=True)
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([[0.], [2.]]).astype(np.float16),
with_placeholders=True)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
with self.cached_session():
with self.assertRaises(ValueError):
gen_nn_ops.softmax_cross_entropy_with_logits(
[[0., 1.], [2., 3.]], [[0., 1., 0.], [1., 0., 0.]])
@test_util.run_deprecated_v1
def testNotMatrix(self):
with self.cached_session():
with self.assertRaises(ValueError):
gen_nn_ops.softmax_cross_entropy_with_logits([0., 1., 2., 3.],
[0., 1., 0., 1.])
def testHalf(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float16))
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float32))
def testDouble(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float64))
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session() as sess:
l = constant_op.constant(
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5],
shape=[3, 4],
dtype=dtypes.float64,
name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
x = nn_ops.softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
err = gradient_checker.compute_gradient_error(f, [3, 4], x, [3])
# Check that no extra computation performed. When only first derivative is requested,
# second derivative must not be computed. So when there is no second derivative,
# there is no `BatchMatMul` op in the graph.
op_names = [
op.op_def.name for op in sess.graph.get_operations() if op.op_def
]
self.assertNotIn("BatchMatMul", op_names)
print("cross entropy gradient err = ", err)
self.assertLess(err, 5e-8)
@test_util.run_deprecated_v1
def testGradientLabelWithV2(self):
with self.cached_session():
l = constant_op.constant(
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5],
shape=[3, 4],
dtype=dtypes.float64,
name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
x = nn_ops.softmax_cross_entropy_with_logits_v2(
labels=l, logits=f, name="xent")
err = gradient_checker.compute_gradient_error(l, [3, 4], x, [3])
self.assertLess(err, 5e-8)
@test_util.run_deprecated_v1
def testSecondGradient(self):
with self.cached_session() as sess:
l = constant_op.constant(
[
0.0, 0.0, 1.0 / 3, 0.0, 1.0 / 3, 0.0, 0.0, 0.0, 0.0, 0.5 / 3, 0.0,
0.5 / 3
],
shape=[12],
dtype=dtypes.float64,
name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[12],
dtype=dtypes.float64,
name="f")
x = nn_ops.softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
loss = math_ops.reduce_sum(x)
gradients = gradients_impl.gradients(loss, [f])[0]
err = gradient_checker.compute_gradient_error(f, [12], gradients, [12])
# Check that second derivative is calculated.
# (it is equivalent to being `BatchMatMul` op in the graph because of implementation of xentropy grad)
op_names = [
op.op_def.name for op in sess.graph.get_operations() if op.op_def
]
if compat.forward_compatible(2019, 4, 25):
self.assertIn("BatchMatMulV2", op_names)
else:
self.assertIn("BatchMatMul", op_names)
print("cross entropy hessian err = ", err)
self.assertLess(err, 5e-8)
def testWrapper(self):
features = np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(
np.float32)
labels = np.array([[[0., 0., 0., 1.], [0., 1., 0., 0.]],
[[0., 0.5, 0.5, 0.], [0.5, 0.5, 0., 0.]],
[[0., 1., 0., 0.], [0., 0., 1., 0.]]]).astype(
np.float32)
self._testXentWrapper(features, labels, dim=0, use_gpu=False)
self._testXentWrapper(features, labels, dim=0, use_gpu=True)
self._testXentWrapper(features, labels, dim=1, use_gpu=False)
self._testXentWrapper(features, labels, dim=1, use_gpu=True)
self._testXentWrapper(features, labels, dim=-1, use_gpu=False)
self._testXentWrapper(features, labels, dim=-1, use_gpu=True)
def testZeroDimension(self):
features = np.zeros([0, 2, 4]).astype(np.float32)
labels = np.zeros([0, 2, 4]).astype(np.float32)
np_loss, _ = self._npXent(features, labels)
with self.session(use_gpu=True) as sess:
loss = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=features)
tf_loss = self.evaluate(loss)
self.assertAllEqual(np_loss, tf_loss)
class XentBenchmark(test.Benchmark):
def benchmarkZeroDimension(self):
for (m, n, p, use_gpu) in itertools.product(
[128],
[10, 100, 1000, 10000, 100000],
[0.001, 0.01, 0.5, 0.99, 1.0],
[False]):
k = int(p * n)
if k == 0:
continue
name = "zero_dimension_m_%d_n_%d_k_%g_use_gpu_%s" % (m, n, k, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
labels = array_ops.zeros([0, 2, 4], dtype=dtypes.float32)
logits = array_ops.zeros([0, 2, 4], dtype=dtypes.float32)
op = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
with session.Session() as sess:
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
throughput = gb_processed_input / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
def benchmarkSingleClass(self):
for (m, n, p, use_gpu) in itertools.product(
[128],
[10, 100, 1000, 10000, 100000],
[0.001, 0.01, 0.5, 0.99, 1.0],
[False]):
k = int(p * n)
if k == 0:
continue
name = "single_class_m_%d_n_%d_k_%g_use_gpu_%s" % (m, n, k, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
labels = constant_op.constant([[1.], [-1.], [0.]],
dtype=dtypes.float32)
logits = constant_op.constant([[-1.], [0.], [1.]],
dtype=dtypes.float32)
op = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
with session.Session() as sess:
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
throughput = gb_processed_input / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/xent_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CandidateSamplerOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import candidate_sampling_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class RangeSamplerOpsTest(test.TestCase):
BATCH_SIZE = 3
NUM_TRUE = 2
RANGE = 5
NUM_SAMPLED = RANGE
TRUE_LABELS = [[1, 2], [0, 4], [3, 3]]
@test_util.run_deprecated_v1
def testTrueCandidates(self):
with self.cached_session() as sess:
indices = constant_op.constant([0, 0, 1, 1, 2, 2])
true_candidates_vec = constant_op.constant([1, 2, 0, 4, 3, 3])
true_candidates_matrix = array_ops.reshape(
true_candidates_vec, [self.BATCH_SIZE, self.NUM_TRUE])
indices_val, true_candidates_val = sess.run(
[indices, true_candidates_matrix])
self.assertAllEqual(indices_val, [0, 0, 1, 1, 2, 2])
self.assertAllEqual(true_candidates_val, self.TRUE_LABELS)
def testSampledCandidates(self):
with self.cached_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
result = self.evaluate(sampled_candidates)
expected_ids = [0, 1, 2, 3, 4]
self.assertAllEqual(result, expected_ids)
self.assertEqual(sampled_candidates.get_shape(), [self.NUM_SAMPLED])
def testTrueLogExpectedCount(self):
with self.cached_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
_, true_expected_count, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
true_log_expected_count = math_ops.log(true_expected_count)
result = self.evaluate(true_log_expected_count)
self.assertAllEqual(result, [[0.0] * self.NUM_TRUE] * self.BATCH_SIZE)
self.assertEqual(true_expected_count.get_shape(),
[self.BATCH_SIZE, self.NUM_TRUE])
self.assertEqual(true_log_expected_count.get_shape(),
[self.BATCH_SIZE, self.NUM_TRUE])
def testSampledLogExpectedCount(self):
with self.cached_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
_, _, sampled_expected_count = candidate_sampling_ops.all_candidate_sampler( # pylint: disable=line-too-long
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
sampled_log_expected_count = math_ops.log(sampled_expected_count)
result = self.evaluate(sampled_log_expected_count)
self.assertAllEqual(result, [0.0] * self.NUM_SAMPLED)
self.assertEqual(sampled_expected_count.get_shape(), [self.NUM_SAMPLED])
self.assertEqual(sampled_log_expected_count.get_shape(), [self.NUM_SAMPLED])
def testAccidentalHits(self):
with self.cached_session() as sess:
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
accidental_hits = candidate_sampling_ops.compute_accidental_hits(
true_classes, sampled_candidates, self.NUM_TRUE)
indices, ids, weights = self.evaluate(accidental_hits)
self.assertEqual(1, accidental_hits[0].get_shape().ndims)
self.assertEqual(1, accidental_hits[1].get_shape().ndims)
self.assertEqual(1, accidental_hits[2].get_shape().ndims)
for index, id_, weight in zip(indices, ids, weights):
self.assertTrue(id_ in self.TRUE_LABELS[index])
self.assertLess(weight, -1.0e37)
@test_util.run_deprecated_v1
def testSeed(self):
def draw(seed):
with self.cached_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled, _, _ = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True, 5, seed=seed)
return self.evaluate(sampled)
# Non-zero seed. Repeatable.
for seed in [1, 12, 123, 1234]:
self.assertAllEqual(draw(seed), draw(seed))
# Seed=0 means random seeds.
num_same = 0
for _ in range(10):
if np.allclose(draw(None), draw(None)):
num_same += 1
# Accounts for the fact that the same random seed may be picked
# twice very rarely.
self.assertLessEqual(num_same, 2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/candidate_sampler_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for new version of accumulate_n op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class AccumulateNV2EagerTest(test_util.TensorFlowTestCase):
"""Tests of the new, differentiable version of accumulate_n."""
def testMinimalEagerMode(self):
forty = constant_op.constant(40)
two = constant_op.constant(2)
answer = math_ops.accumulate_n([forty, two])
self.assertEqual(42, answer.numpy())
def testFloat(self):
np.random.seed(12345)
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
tf_x = ops.convert_n_to_tensor(x)
self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x))
self.assertAllClose(x[0] * 5,
math_ops.accumulate_n([tf_x[0]] * 5))
def testGrad(self):
np.random.seed(42)
num_inputs = 3
input_vars = [
resource_variable_ops.ResourceVariable(10.0 * np.random.random(),
name="t%d" % i)
for i in range(0, num_inputs)
]
def fn(first, second, third):
return math_ops.accumulate_n([first, second, third])
grad_fn = backprop.gradients_function(fn)
grad = grad_fn(input_vars[0], input_vars[1], input_vars[2])
self.assertAllEqual(np.repeat(1.0, num_inputs), # d/dx (x + y + ...) = 1
[elem.numpy() for elem in grad])
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/accumulate_n_eager_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ctc_ops.ctc_loss_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import zip_longest
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import ctc_ops
from tensorflow.python.platform import test
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks."""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def flatten(list_of_lists):
"""Flatten one level of nesting."""
return itertools.chain.from_iterable(list_of_lists)
class CTCGreedyDecoderTest(test.TestCase):
def _testCTCDecoder(self,
decoder,
inputs,
seq_lens,
log_prob_truth,
decode_truth,
expected_err_re=None,
**decoder_args):
inputs_t = [ops.convert_to_tensor(x) for x in inputs]
# convert inputs_t into a [max_time x batch_size x depth] tensor
# from a len time python list of [batch_size x depth] tensors
inputs_t = array_ops.stack(inputs_t)
with self.cached_session(use_gpu=False) as sess:
decoded_list, log_probability = decoder(
inputs_t, sequence_length=seq_lens, **decoder_args)
decoded_unwrapped = list(
flatten([(st.indices, st.values, st.dense_shape) for st in
decoded_list]))
if expected_err_re is None:
outputs = sess.run(decoded_unwrapped + [log_probability])
# Group outputs into (ix, vals, shape) tuples
output_sparse_tensors = list(grouper(outputs[:-1], 3))
output_log_probability = outputs[-1]
# Check the number of decoded outputs (top_paths) match
self.assertEqual(len(output_sparse_tensors), len(decode_truth))
# For each SparseTensor tuple, compare (ix, vals, shape)
for out_st, truth_st, tf_st in zip(output_sparse_tensors, decode_truth,
decoded_list):
self.assertAllEqual(out_st[0], truth_st[0]) # ix
self.assertAllEqual(out_st[1], truth_st[1]) # vals
self.assertAllEqual(out_st[2], truth_st[2]) # shape
# Compare the shapes of the components with the truth. The
# `None` elements are not known statically.
self.assertEqual([None, truth_st[0].shape[1]],
tf_st.indices.get_shape().as_list())
self.assertEqual([None], tf_st.values.get_shape().as_list())
self.assertShapeEqual(truth_st[2], tf_st.dense_shape)
# Make sure decoded probabilities match
self.assertAllClose(output_log_probability, log_prob_truth, atol=1e-6)
else:
with self.assertRaisesOpError(expected_err_re):
sess.run(decoded_unwrapped + [log_probability])
@test_util.run_deprecated_v1
def testCTCGreedyDecoder(self):
"""Test two batch entries - best path decoder."""
max_time_steps = 6
# depth == 4
seq_len_0 = 4
input_prob_matrix_0 = np.asarray(
[[1.0, 0.0, 0.0, 0.0], # t=0
[0.0, 0.0, 0.4, 0.6], # t=1
[0.0, 0.0, 0.4, 0.6], # t=2
[0.0, 0.9, 0.1, 0.0], # t=3
[0.0, 0.0, 0.0, 0.0], # t=4 (ignored)
[0.0, 0.0, 0.0, 0.0]], # t=5 (ignored)
dtype=np.float32)
input_log_prob_matrix_0 = np.log(input_prob_matrix_0)
seq_len_1 = 5
# dimensions are time x depth
input_prob_matrix_1 = np.asarray(
[
[0.1, 0.9, 0.0, 0.0], # t=0
[0.0, 0.9, 0.1, 0.0], # t=1
[0.0, 0.0, 0.1, 0.9], # t=2
[0.0, 0.9, 0.1, 0.1], # t=3
[0.9, 0.1, 0.0, 0.0], # t=4
[0.0, 0.0, 0.0, 0.0]
], # t=5 (ignored)
dtype=np.float32)
input_log_prob_matrix_1 = np.log(input_prob_matrix_1)
# len max_time_steps array of batch_size x depth matrices
inputs = [
np.vstack(
[input_log_prob_matrix_0[t, :], input_log_prob_matrix_1[t, :]])
for t in range(max_time_steps)
]
# batch_size length vector of sequence_lengths
seq_lens = np.array([seq_len_0, seq_len_1], dtype=np.int32)
# batch_size length vector of negative log probabilities
log_prob_truth = np.array([
np.sum(-np.log([1.0, 0.6, 0.6, 0.9])),
np.sum(-np.log([0.9, 0.9, 0.9, 0.9, 0.9]))
], np.float32)[:, np.newaxis]
# decode_truth: one SparseTensor (ix, vals, shape)
decode_truth = [
(
np.array(
[
[0, 0], # batch 0, 2 outputs
[0, 1],
[1, 0], # batch 1, 3 outputs
[1, 1],
[1, 2]
],
dtype=np.int64),
np.array(
[
0,
1, # batch 0
1,
1,
0
], # batch 1
dtype=np.int64),
# shape is batch x max_decoded_length
np.array(
[2, 3], dtype=np.int64)),
]
self._testCTCDecoder(ctc_ops.ctc_greedy_decoder, inputs, seq_lens,
log_prob_truth, decode_truth)
@test_util.run_deprecated_v1
def testCTCDecoderBeamSearch(self):
"""Test one batch, two beams - hibernating beam search."""
# max_time_steps == 8
depth = 6
seq_len_0 = 5
input_prob_matrix_0 = np.asarray(
[
[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],
[0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],
[0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],
[0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],
[0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],
# Random entry added in at time=5
[0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]
],
dtype=np.float32)
# Add arbitrary offset - this is fine
input_prob_matrix_0 = input_prob_matrix_0 + 2.0
# len max_time_steps array of batch_size x depth matrices
inputs = ([
input_prob_matrix_0[t, :][np.newaxis, :] for t in range(seq_len_0)
] # Pad to max_time_steps = 8
+ 2 * [np.zeros(
(1, depth), dtype=np.float32)])
# batch_size length vector of sequence_lengths
seq_lens = np.array([seq_len_0], dtype=np.int32)
# batch_size length vector of log probabilities
log_prob_truth = np.array(
[
-5.811451, # output beam 0
-6.63339 # output beam 1
],
np.float32)[np.newaxis, :]
# decode_truth: two SparseTensors, (ix, values, shape)
decode_truth = [
# beam 0, batch 0, two outputs decoded
(np.array(
[[0, 0], [0, 1]], dtype=np.int64), np.array(
[1, 0], dtype=np.int64), np.array(
[1, 2], dtype=np.int64)),
# beam 1, batch 0, one output decoded
(np.array(
[[0, 0]], dtype=np.int64), np.array(
[1], dtype=np.int64), np.array(
[1, 1], dtype=np.int64)),
]
# Test correct decoding.
self._testCTCDecoder(
ctc_ops.ctc_beam_search_decoder,
inputs,
seq_lens,
log_prob_truth,
decode_truth,
beam_width=2,
top_paths=2)
# Requesting more paths than the beam width allows.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
(".*requested more paths than the beam "
"width.*")):
self._testCTCDecoder(
ctc_ops.ctc_beam_search_decoder,
inputs,
seq_lens,
log_prob_truth,
decode_truth,
beam_width=2,
top_paths=3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/ctc_decoder_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for PrecisionOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class InTopKTest(test.TestCase):
def _validateInTopK(self, predictions, target, k, expected):
np_ans = np.array(expected)
with self.cached_session(use_gpu=True):
precision = nn_ops.in_top_k(predictions, target, k)
out = self.evaluate(precision)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, precision)
def testInTop1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = [3, 2]
self._validateInTopK(predictions, target, 1, [True, False])
def testInTop2(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = [2, 2]
self._validateInTopK(predictions, target, 2, [False, True])
def testInTop2Tie(self):
# Class 2 and 3 tie for 2nd, so both are considered in top 2.
predictions = [[0.1, 0.3, 0.2, 0.2], [0.1, 0.3, 0.2, 0.2]]
target = [2, 3]
self._validateInTopK(predictions, target, 2, [True, True])
def testInTop2_int64Target(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = np.asarray([0, 2]).astype(np.int64)
self._validateInTopK(predictions, target, 2, [False, True])
def testInTopNan(self):
predictions = [[0.1, float("nan"), 0.2, 0.4], [0.1, 0.2, 0.3, float("inf")]]
target = [1, 3]
self._validateInTopK(predictions, target, 2, [False, False])
def testBadTarget(self):
predictions = [[0.1, 0.3, 0.2, 0.2], [0.1, 0.3, 0.2, 0.2]]
target = [2, 4] # must return False for invalid target
self._validateInTopK(predictions, target, 2, [True, False])
def testTensorK(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = [0, 2]
k = constant_op.constant(3)
np_ans = np.array([False, True])
with self.cached_session():
precision = nn_ops.in_top_k(predictions, target, k)
out = self.evaluate(precision)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, precision)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/in_topk_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ParameterizedTruncatedNormalOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import timeit
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _get_stddev_inside_bounds_before_using_randn(gpu):
# The boundary where the randn sampler is used varies between CPU and GPU.
if gpu:
return 1.3
else:
return 1.7
class TruncatedNormalMoments(object):
memoized_moments = None
mean = None
stddev = None
minval = None
maxval = None
def __init__(self, mean, stddev, minval, maxval):
self.memoized_moments = [1.0] # 0th moment
self.mean = np.double(mean)
self.stddev = np.double(stddev)
# NOTE(ringwalt): The formula doesn't handle infinite values.
self.minval = np.double(max(-10, minval))
self.maxval = np.double(min(10, maxval))
def __getitem__(self, moment):
"""Calculates the truncated normal moments.
Args:
moment: The number for the moment.
Returns:
The value for the given moment.
Uses the recurrence relation described in:
http://www.smp.uq.edu.au/people/YoniNazarathy/teaching_projects
/studentWork/EricOrjebin_TruncatedNormalMoments.pdf
"""
assert moment > 0
# The test case must ensure it can import scipy.stats before this point.
import scipy.stats # pylint: disable=g-import-not-at-top
dist = scipy.stats.norm(loc=self.mean, scale=self.stddev)
for k in range(len(self.memoized_moments), moment + 1):
m_k_minus_2 = self.memoized_moments[k - 2] if k > 1 else np.double(0.0)
m_k_minus_1 = self.memoized_moments[k - 1]
numerator = (np.power(self.maxval, k - 1) * dist.pdf(self.maxval) -
np.power(self.minval, k - 1) * dist.pdf(self.minval))
denominator = dist.cdf(self.maxval) - dist.cdf(self.minval)
m = ((k - 1) * self.stddev**2 * m_k_minus_2 + self.mean * m_k_minus_1 -
self.stddev * numerator / denominator)
assert abs(m) < 1e50 # ensure numerical accuracy
self.memoized_moments.append(m)
return self.memoized_moments[moment]
def calculate_moments(samples, max_moment):
moments = [0.0] * (max_moment + 1)
for sample in samples:
value = 1.0
for k in range(len(moments)):
moments[k] += value
value *= sample
for i in range(len(moments)):
moments[i] /= len(samples)
return moments
def z_test(real, expected, i, num_samples):
numerical_error = 1e-6 # per-operation error
moment_mean = expected[i]
moment_squared = expected[2 * i]
moment_var = moment_squared - moment_mean * moment_mean
error_per_moment = i * numerical_error
total_variance = moment_var / float(num_samples) + error_per_moment
return abs((real[i] - moment_mean) / math.sqrt(total_variance))
class ParameterizedTruncatedNormalTest(test.TestCase):
z_limit = 6.0
# Stop at moment 10 to avoid numerical errors in the theoretical moments.
max_moment = 10
def validateMoments(self, shape, mean, stddev, minval, maxval, seed=1618):
try:
# TruncatedNormalMoments requires scipy.stats.
# Give up early if we are unable to import it.
import scipy.stats # pylint: disable=g-import-not-at-top,unused-variable
random_seed.set_random_seed(seed)
with self.cached_session(use_gpu=True):
samples = random_ops.parameterized_truncated_normal(shape, mean, stddev,
minval,
maxval).eval()
assert (~np.isnan(samples)).all()
moments = calculate_moments(samples, self.max_moment)
expected_moments = TruncatedNormalMoments(mean, stddev, minval, maxval)
num_samples = functools.reduce(lambda x, y: x * y, shape, 1)
for i in range(1, len(moments)):
self.assertLess(
z_test(moments, expected_moments, i, num_samples), self.z_limit)
except ImportError as e:
tf_logging.warn("Cannot test truncated normal op: %s" % str(e))
def validateKolmogorovSmirnov(self,
shape,
mean,
stddev,
minval,
maxval,
seed=1618):
try:
import scipy.stats # pylint: disable=g-import-not-at-top
random_seed.set_random_seed(seed)
with self.cached_session(use_gpu=True):
samples = random_ops.parameterized_truncated_normal(shape, mean, stddev,
minval,
maxval).eval()
assert (~np.isnan(samples)).all()
minval = max(mean - stddev * 10, minval)
maxval = min(mean + stddev * 10, maxval)
dist = scipy.stats.norm(loc=mean, scale=stddev)
cdf_min = dist.cdf(minval)
cdf_max = dist.cdf(maxval)
def truncated_cdf(x):
return np.clip((dist.cdf(x) - cdf_min) / (cdf_max - cdf_min), 0.0, 1.0)
pvalue = scipy.stats.kstest(samples, truncated_cdf)[1]
self.assertGreater(pvalue, 1e-10)
except ImportError as e:
tf_logging.warn("Cannot test truncated normal op: %s" % str(e))
@test_util.run_deprecated_v1
def testDefaults(self):
self.validateMoments([10**5], 0.0, 1.0, -2.0, 2.0)
@test_util.run_deprecated_v1
def testShifted(self):
self.validateMoments([10**5], -1.0, 1.0, -2.0, 2.0)
@test_util.run_deprecated_v1
def testRightTail(self):
self.validateMoments([10**5], 0.0, 1.0, 4.0, np.infty)
@test_util.run_deprecated_v1
def testLeftTail(self):
self.validateMoments([10**5], 0.0, 1.0, -np.infty, -4.0)
@test_util.run_deprecated_v1
def testLeftTailTwoSidedBounds(self):
self.validateMoments([10**5], 0.0, 1.0, -6.0, -3.0)
@test_util.run_deprecated_v1
def testTwoSidedLeftTailShifted(self):
self.validateKolmogorovSmirnov([10**5], 6.0, 1.0, -1.0, 1.0)
@test_util.run_deprecated_v1
def testRightTailShifted(self):
self.validateMoments([10**5], -5.0, 1.0, 2.0, np.infty)
@test_util.run_deprecated_v1
def testSmallStddev(self):
self.validateKolmogorovSmirnov([10**5], 0.0, 0.1, 0.05, 0.10)
@test_util.run_deprecated_v1
def testSamplingWithSmallStdDevFarFromBound(self):
sample_op = random_ops.parameterized_truncated_normal(
shape=(int(1e5),), means=0.8, stddevs=0.05, minvals=-1., maxvals=1.)
with self.session(use_gpu=True) as sess:
samples = sess.run(sample_op)
# 0. is more than 16 standard deviations from the mean, and
# should have a likelihood < 1e-57.
assert (~np.isnan(samples)).all()
no_neg_samples = np.sum(samples < 0.)
self.assertEqual(no_neg_samples, 0.)
@test_util.run_deprecated_v1
def testSamplingAtRandnSwitchover(self):
# The randn sampler is used as the bounds are moved farther from the mean,
# and the probability of accepting a sample increases the farther the
# bounds are from the mean.
# This test asserts that at the point of switchover, both samplers are
# working (not raising an error or returning nan) and returning the
# expected moments.
use_gpu = test.is_gpu_available()
stddev_inside_bounds_before_using_randn = (
_get_stddev_inside_bounds_before_using_randn(use_gpu))
epsilon = 0.001
self.validateMoments(
shape=[10**6],
mean=0.,
stddev=1.0,
minval=-epsilon,
maxval=stddev_inside_bounds_before_using_randn - epsilon)
self.validateMoments(
shape=[10**6],
mean=0.,
stddev=1.0,
minval=-epsilon,
maxval=stddev_inside_bounds_before_using_randn + epsilon)
# Benchmarking code
def parameterized_vs_naive(shape, num_iters, use_gpu=False):
np.random.seed(1618) # Make it reproducible.
# No CSE/CF.
optimizer_options = config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)
config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=optimizer_options))
with session.Session(config=config) as sess:
with ops.device("/cpu:0" if not use_gpu else None):
param_op = control_flow_ops.group(
random_ops.parameterized_truncated_normal(shape))
naive_op = control_flow_ops.group(random_ops.truncated_normal(shape))
# Burn-in to avoid session setup costs in the timing.
sess.run(param_op)
sess.run(param_op)
param_dt = timeit.timeit(lambda: sess.run(param_op), number=num_iters)
sess.run(naive_op)
sess.run(naive_op)
naive_dt = timeit.timeit(lambda: sess.run(naive_op), number=num_iters)
return param_dt, naive_dt
def randn_sampler_switchover(shape, num_iters, use_gpu=False):
# Benchmark by constructing samplers on the threshold of using the randn
# rejection sampling and check that this threshold is set correctly by
# benchmarking with bounds just above and below this threshold.
# The uniform and randn samplers should have about the same performance
# at this point.
stddev_inside_bounds_before_using_randn = (
_get_stddev_inside_bounds_before_using_randn(use_gpu))
epsilon = 0.001
np.random.seed(1618) # Make it reproducible.
# No CSE/CF.
optimizer_options = config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=optimizer_options))
with session.Session(config=config) as sess:
with ops.device("/cpu:0" if not use_gpu else "/gpu:0"):
uniform_sampler_op = control_flow_ops.group(
random_ops.parameterized_truncated_normal(
shape,
means=0.,
stddevs=1.0,
minvals=-stddev_inside_bounds_before_using_randn + epsilon,
maxvals=0.01))
randn_sampler_op = control_flow_ops.group(
random_ops.parameterized_truncated_normal(
shape,
means=0.,
stddevs=1.0,
minvals=-stddev_inside_bounds_before_using_randn - epsilon,
maxvals=0.01))
# Burn-in to avoid session setup costs in the timing.
sess.run(uniform_sampler_op)
sess.run(uniform_sampler_op)
uniform_dt = timeit.timeit(
lambda: sess.run(uniform_sampler_op), number=num_iters)
sess.run(randn_sampler_op)
sess.run(randn_sampler_op)
randn_dt = timeit.timeit(
lambda: sess.run(randn_sampler_op), number=num_iters)
return randn_dt, uniform_dt
class TruncatedNormalBenchmark(test.Benchmark):
def benchmarkParameterizedOpVsNaiveOpCpu(self):
self._benchmarkParameterizedOpVsNaiveOp(False)
def benchmarkParameterizedOpVsNaiveOpGpu(self):
self._benchmarkParameterizedOpVsNaiveOp(True)
def _benchmarkParameterizedOpVsNaiveOp(self, use_gpu):
num_iters = 50
print(("Composition of new ParameterizedTruncatedNormalOp vs. "
"naive TruncatedNormalOp [%d iters]") % num_iters)
print("Shape\tsec(parameterized)\tsec(naive)\tspeedup")
for shape in [[10000, 100], [1000, 1000], [1000000], [100, 100, 100],
[20, 20, 20, 20]]:
p_dt, n_dt = parameterized_vs_naive(shape, num_iters, use_gpu)
print("%s\t%.3f\t%.3f\t%.2f" % (shape, p_dt, n_dt, p_dt / n_dt))
shape_str = "-".join(map(str, shape))
self.report_benchmark(
name="parameterized_shape" + shape_str,
iters=num_iters,
wall_time=p_dt)
self.report_benchmark(
name="naive_shape" + shape_str, iters=num_iters, wall_time=n_dt)
def benchmarkRandnSamplerCPU(self):
self._benchmarkRandnSampler(False)
def benchmarkRandnSamplerGPU(self):
self._benchmarkRandnSampler(True)
def _benchmarkRandnSampler(self, use_gpu):
num_iters = 100
shape = [int(1e6)]
randn_dt, uniform_dt = randn_sampler_switchover(shape, num_iters, use_gpu)
print(("Randn Sampler vs uniform samplers [%d iters]\t%.4f\t%.4f") %
(num_iters, randn_dt, uniform_dt))
gpu_str = "_gpu" if use_gpu else "_cpu"
self.report_benchmark(
name="randn_sampler" + gpu_str, iters=num_iters, wall_time=randn_dt)
self.report_benchmark(
name="uniform_sampler" + gpu_str, iters=num_iters, wall_time=uniform_dt)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class MatrixBandPartTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBandPartTest(dtype_, batch_shape_, shape_):
@test_util.run_v1_only("b/120545219")
def Test(self):
mat = np.ones(shape_).astype(dtype_)
batch_mat = np.tile(mat, batch_shape_ + (1, 1))
for lower in -1, 0, 1, shape_[-2] - 1:
for upper in -1, 0, 1, shape_[-1] - 1:
band_np = mat
if lower >= 0:
band_np = np.triu(band_np, -lower)
if upper >= 0:
band_np = np.tril(band_np, upper)
if batch_shape_ is not ():
band_np = np.tile(band_np, batch_shape_ + (1, 1))
for index_dtype in [dtypes_lib.int32, dtypes_lib.int64]:
with self.cached_session(use_gpu=False):
band = array_ops.matrix_band_part(
batch_mat,
constant_op.constant(lower, index_dtype),
constant_op.constant(upper, index_dtype))
self.assertAllEqual(band_np, self.evaluate(band))
return Test
class MatrixBandPartGradTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBandPartGradTest(dtype_, batch_shape_, shape_):
@test_util.run_v1_only("b/120545219")
def Test(self):
shape = batch_shape_ + shape_
x = constant_op.constant(np.random.rand(*shape), dtype=dtype_)
with self.session(use_gpu=False):
for lower in -1, 0, 1, shape_[-2] - 1:
for upper in -1, 0, 1, shape_[-1] - 1:
y = array_ops.matrix_band_part(x, lower, upper)
error = gradient_checker.compute_gradient_error(
x, x.get_shape().as_list(), y, y.get_shape().as_list())
self.assertLess(error, 1e-4)
return Test
class MatrixBandPartBenchmark(test_lib.Benchmark):
shapes = [
(10, 16, 16),
(10, 101, 101),
(10, 256, 256),
(10, 1000, 1000),
(10, 1024, 1024),
(10, 2048, 2048),
(10, 10, 4, 4),
(10, 10, 10, 10),
(10, 10, 16, 16),
(10, 10, 101, 101),
(10, 10, 256, 256),
(10, 10, 1000, 1000),
(10, 10, 1024, 1024),
(10, 10, 2048, 2048),
]
def benchmarkMatrixBandPartOp(self):
for shape_ in self.shapes:
for limits in (-1, -1), (-1, 0), (0, -1), (2, 2):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(array_ops.ones(shape_))
band = array_ops.matrix_band_part(matrix, limits[0], limits[1])
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(band),
min_iters=10,
name="matrix_band_part_cpu_{shape}_{limits}".format(
shape=shape_, limits=limits))
if test_lib.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix = variables.Variable(array_ops.ones(shape_))
band = array_ops.matrix_band_part(matrix, limits[0], limits[1])
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(band),
min_iters=10,
name="matrix_band_part_gpu_{shape}_{limits}".format(
shape=shape_, limits=limits))
if __name__ == "__main__":
dtypes = (np.bool, np.int32, np.int64, np.float16,
dtypes_lib.bfloat16.as_numpy_dtype, np.float32, np.float64,
np.complex64, np.complex128)
for dtype in dtypes:
for batch_shape in ((), (2,), (1, 3, 2)):
for rows in 1, 2, 7, 23:
for cols in 1, 2, 7, 23:
shape = (rows, cols)
name = "%s_%s" % (dtype.__name__,
"_".join(map(str, batch_shape + shape)))
_AddTest(MatrixBandPartTest, "MatrixBandPart", name,
_GetMatrixBandPartTest(dtype, batch_shape, shape))
for dtype in (np.float32, np.float64):
for batch_shape in ((), (2,)):
for rows in 1, 2, 7:
for cols in 1, 2, 7:
shape = (rows, cols)
name = "%s_%s" % (dtype.__name__,
"_".join(map(str, batch_shape + shape)))
_AddTest(MatrixBandPartGradTest, "MatrixBandPartGrad", name,
_GetMatrixBandPartGradTest(dtype, batch_shape, shape))
test_lib.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/matrix_band_part_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ctc_ops.ctc_decoder_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import ctc_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
def SimpleSparseTensorFrom(x):
"""Create a very simple SparseTensor with dimensions (batch, time).
Args:
x: a list of lists of type int
Returns:
x_ix and x_val, the indices and values of the SparseTensor<2>.
"""
x_ix = []
x_val = []
for batch_i, batch in enumerate(x):
for time, val in enumerate(batch):
x_ix.append([batch_i, time])
x_val.append(val)
x_shape = [len(x), np.asarray(x_ix).max(0)[1] + 1]
x_ix = constant_op.constant(x_ix, dtypes.int64)
x_val = constant_op.constant(x_val, dtypes.int32)
x_shape = constant_op.constant(x_shape, dtypes.int64)
return sparse_tensor.SparseTensor(x_ix, x_val, x_shape)
def _ctc_loss_v2(labels, inputs, sequence_length,
preprocess_collapse_repeated=False,
ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False,
time_major=True):
"""Call ctc_loss_v2 with v1 args."""
assert not preprocess_collapse_repeated
assert ctc_merge_repeated
assert not ignore_longer_outputs_than_inputs
return ctc_ops.ctc_loss_v2(
labels=labels,
logits=inputs,
logit_length=sequence_length,
label_length=None,
blank_index=-1,
logits_time_major=time_major)
class CTCLossTest(test.TestCase):
def _testCTCLoss(self,
inputs,
seq_lens,
labels,
loss_truth,
grad_truth,
expected_err_re=None):
self.assertEquals(len(inputs), len(grad_truth))
inputs_t = constant_op.constant(inputs)
with self.cached_session(use_gpu=False) as sess:
loss = _ctc_loss_v2(
inputs=inputs_t, labels=labels, sequence_length=seq_lens)
grad = gradients_impl.gradients(loss, [inputs_t])[0]
self.assertShapeEqual(loss_truth, loss)
self.assertShapeEqual(grad_truth, grad)
if expected_err_re is None:
(tf_loss, tf_grad) = self.evaluate([loss, grad])
self.assertAllClose(tf_loss, loss_truth, atol=1e-6)
self.assertAllClose(tf_grad, grad_truth, atol=1e-6)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate([loss, grad])
@test_util.run_v1_only("b/120545219")
def testBasic(self):
"""Test two batch entries."""
# Input and ground truth from Alex Graves' implementation.
#
#### Batch entry 0 #####
# targets: 0 1 2 1 0
# outputs:
# 0 0.633766 0.221185 0.0917319 0.0129757 0.0142857 0.0260553
# 1 0.111121 0.588392 0.278779 0.0055756 0.00569609 0.010436
# 2 0.0357786 0.633813 0.321418 0.00249248 0.00272882 0.0037688
# 3 0.0663296 0.643849 0.280111 0.00283995 0.0035545 0.00331533
# 4 0.458235 0.396634 0.123377 0.00648837 0.00903441 0.00623107
# alpha:
# 0 -3.64753 -0.456075 -inf -inf -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -0.986437 -inf -inf -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -inf -2.12145 -inf -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -inf -inf -2.56174 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -inf -inf -3.34211 -inf
# beta:
# 0 -inf -2.88604 -inf -inf -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -2.35568 -inf -inf -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -inf -1.22066 -inf -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -inf -inf -0.780373 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -inf -inf 0 0
# prob: -3.34211
# outputDerivs:
# 0 -0.366234 0.221185 0.0917319 0.0129757 0.0142857 0.0260553
# 1 0.111121 -0.411608 0.278779 0.0055756 0.00569609 0.010436
# 2 0.0357786 0.633813 -0.678582 0.00249248 0.00272882 0.0037688
# 3 0.0663296 -0.356151 0.280111 0.00283995 0.0035545 0.00331533
# 4 -0.541765 0.396634 0.123377 0.00648837 0.00903441 0.00623107
#
#### Batch entry 1 #####
#
# targets: 0 1 1 0
# outputs:
# 0 0.30176 0.28562 0.0831517 0.0862751 0.0816851 0.161508
# 1 0.24082 0.397533 0.0557226 0.0546814 0.0557528 0.19549
# 2 0.230246 0.450868 0.0389607 0.038309 0.0391602 0.202456
# 3 0.280884 0.429522 0.0326593 0.0339046 0.0326856 0.190345
# 4 0.423286 0.315517 0.0338439 0.0393744 0.0339315 0.154046
# alpha:
# 0 -1.8232 -1.19812 -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -2.19315 -2.83037 -2.1206 -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -2.03268 -3.71783 -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -4.56292 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -5.42262 -inf
# beta:
# 0 -inf -4.2245 -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -3.30202 -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -1.70479 -0.856738 -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -0.859706 -0.859706 -0.549337 -inf
# 4 -inf -inf -inf -inf -inf -inf -inf 0 0
# prob: -5.42262
# outputDerivs:
# 0 -0.69824 0.28562 0.0831517 0.0862751 0.0816851 0.161508
# 1 0.24082 -0.602467 0.0557226 0.0546814 0.0557528 0.19549
# 2 0.230246 0.450868 0.0389607 0.038309 0.0391602 -0.797544
# 3 0.280884 -0.570478 0.0326593 0.0339046 0.0326856 0.190345
# 4 -0.576714 0.315517 0.0338439 0.0393744 0.0339315 0.154046
# max_time_steps == 7
depth = 6
# seq_len_0 == 5
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
# dimensions are time x depth
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
input_log_prob_matrix_0 = np.log(input_prob_matrix_0)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
# seq_len_1 == 5
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
# dimensions are time x depth
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
input_log_prob_matrix_1 = np.log(input_prob_matrix_1)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
# len max_time_steps array of 2 x depth matrices
inputs = [
np.vstack(
[input_log_prob_matrix_0[t, :], input_log_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, depth), np.float32)]
# convert inputs into [max_time x batch_size x depth tensor] Tensor
inputs = np.asarray(inputs, dtype=np.float32)
# len batch_size array of label vectors
labels = SimpleSparseTensorFrom([targets_0, targets_1])
# batch_size length vector of sequence_lengths
seq_lens = np.array([5, 5], dtype=np.int32)
# output: batch_size length vector of negative log probabilities
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
# output: len max_time_steps array of 2 x depth matrices
grad_truth = [
np.vstack([gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, depth), np.float32)]
# convert grad_truth into [max_time x batch_size x depth] Tensor
grad_truth = np.asarray(grad_truth, dtype=np.float32)
self._testCTCLoss(inputs, seq_lens, labels, loss_truth, grad_truth)
def test_time_major(self):
"""Testing time_major param.
testing if transposing and setting time_major=False will result in the same
loss
"""
# [max_time x batch_size x depth tensor]
inputs = np.random.randn(2, 2, 3).astype(np.float32)
labels = SimpleSparseTensorFrom([[0, 1], [1, 0]])
seq_lens = np.array([2, 2], dtype=np.int32)
inputs_t = constant_op.constant(inputs)
# Transposing tensor to [batch_size x max_time x depth tensor]
inputs_t_transposed = constant_op.constant(inputs.transpose(1, 0, 2))
with self.session(use_gpu=False) as sess:
loss = _ctc_loss_v2(
inputs=inputs_t, labels=labels, sequence_length=seq_lens)
loss_transposed = _ctc_loss_v2(
inputs=inputs_t_transposed,
labels=labels,
sequence_length=seq_lens,
time_major=False)
(tf_loss, tf_loss_transposed) = self.evaluate([loss, loss_transposed])
self.assertAllEqual(tf_loss, tf_loss_transposed)
@test_util.run_v1_only("b/120545219")
def testInvalidSecondGradient(self):
inputs = np.random.randn(2, 2, 3).astype(np.float32)
inputs_t = constant_op.constant(inputs)
labels = SimpleSparseTensorFrom([[0, 1], [1, 0]])
seq_lens = np.array([2, 2], dtype=np.int32)
v = [1.0]
with self.session(use_gpu=False):
loss = _ctc_loss_v2(
inputs=inputs_t, labels=labels, sequence_length=seq_lens)
# Taking ths second gradient should fail, since it is not
# yet supported.
with self.assertRaisesRegexp(LookupError,
"explicitly disabled"):
_ = gradients_impl._hessian_vector_product(loss, [inputs_t], v)
@test_util.run_v1_only("b/120545219")
def testEmptyBatch(self):
inputs = constant_op.constant([], dtype=dtypes.float32, shape=(1, 0, 2))
sequence_lengths = constant_op.constant([], dtype=dtypes.int32)
labels = sparse_tensor.SparseTensor(
indices=constant_op.constant([], shape=(0, 2), dtype=dtypes.int64),
values=constant_op.constant([], shape=(0,), dtype=dtypes.int32),
dense_shape=[5, 5])
with self.session(use_gpu=False) as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"batch_size must not be 0"):
sess.run(_ctc_loss_v2(labels, inputs, sequence_lengths))
class CTCLossTestV2(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testCtcLossV2(self):
random_seed.set_random_seed(5)
batch_size = 8
num_labels = 6
max_label_length = 5
num_frames = 12
labels = random_ops.random_uniform(
[batch_size, max_label_length], minval=1, maxval=num_labels,
dtype=dtypes.int64)
logits = random_ops.random_uniform([num_frames, batch_size, num_labels])
label_length = random_ops.random_uniform(
[batch_size], minval=2, maxval=max_label_length, dtype=dtypes.int64)
label_mask = array_ops.sequence_mask(
label_length, maxlen=max_label_length, dtype=label_length.dtype)
labels *= label_mask
logit_length = [num_frames] * batch_size
with backprop.GradientTape() as t:
t.watch(logits)
ref_loss = ctc_ops.ctc_loss_v2(
labels=labels,
logits=logits,
label_length=label_length,
logit_length=logit_length)
ref_grad = t.gradient(ref_loss, [logits])
sparse_labels = ctc_ops.dense_labels_to_sparse(labels, label_length)
def assert_same_loss_and_grads(loss):
if context.executing_eagerly():
return
with self.cached_session():
self.assertAllClose(*self.evaluate([loss, ref_loss]))
grad = gradients_impl.gradients(loss, [logits])
self.assertAllClose(
*self.evaluate([grad, ref_grad]), rtol=2e-06, atol=2e-06)
assert_same_loss_and_grads(
ctc_ops.ctc_loss_v2(
labels=sparse_labels,
logits=logits,
label_length=label_length,
logit_length=logit_length,
blank_index=0))
@test_util.run_v1_only("b/120545219")
def testCtcLossDenseIsSameAsCtcLoss(self):
with ops.device("/GPU:0" if test.is_gpu_available() else "/CPU:0"):
random_seed.set_random_seed(5)
batch_size = 8
num_labels = 6
label_length = 5
num_frames = 12
logits = random_ops.random_uniform([num_frames, batch_size, num_labels])
labels = random_ops.random_uniform(
[batch_size, label_length], minval=1, maxval=num_labels,
dtype=dtypes.int64)
label_lengths = random_ops.random_uniform(
[batch_size], minval=2, maxval=label_length, dtype=dtypes.int64)
label_mask = array_ops.sequence_mask(
label_lengths, maxlen=label_length, dtype=label_lengths.dtype)
labels *= label_mask
logit_lengths = [num_frames] * batch_size
ctc_loss = ctc_ops.ctc_loss_dense(
labels=labels,
logits=logits,
label_length=label_lengths,
logit_length=logit_lengths)
ctc_loss_grads = gradients_impl.gradients(ctc_loss, [logits])[0]
# Shift labels down by one (move blank from 0 to num_labels -1)
tf_ctc_loss_labels = math_ops.cast(labels, dtypes.int32) - 1
tf_nn_ctc_logits = array_ops.concat([
logits[:, :, 1:],
logits[:, :, 0:1],
], axis=2)
tf_ctc_loss_labels = ctc_ops.dense_labels_to_sparse(
tf_ctc_loss_labels, label_lengths)
tf_nn_ctc_loss = ctc_ops.ctc_loss(
labels=tf_ctc_loss_labels,
inputs=tf_nn_ctc_logits,
sequence_length=logit_lengths,
time_major=True)
tf_nn_ctc_grads = gradients_impl.gradients(tf_nn_ctc_loss, [logits])[0]
with self.cached_session() as sess:
for _ in range(32):
self.assertAllClose(*self.evaluate([ctc_loss, tf_nn_ctc_loss]))
self.assertAllClose(
*self.evaluate([ctc_loss_grads, tf_nn_ctc_grads]),
rtol=2e-06,
atol=2e-06)
@test_util.run_v1_only("b/120545219")
def testCtcLossDenseUniqueFastPathIsSameAsCtcLoss(self):
random_seed.set_random_seed(5)
batch_size = 8
num_labels = 6
label_length = 5
num_frames = 12
logits = random_ops.random_uniform([num_frames, batch_size, num_labels])
labels = random_ops.random_uniform(
[batch_size, label_length], minval=1, maxval=num_labels,
dtype=dtypes.int64)
label_lengths = random_ops.random_uniform(
[batch_size], minval=2, maxval=label_length, dtype=dtypes.int64)
label_mask = array_ops.sequence_mask(
label_lengths, maxlen=label_length, dtype=label_lengths.dtype)
labels *= label_mask
logit_lengths = [num_frames] * batch_size
ctc_loss = ctc_ops.ctc_loss_dense(
labels=labels,
logits=logits,
label_length=label_lengths,
logit_length=logit_lengths,
unique=ctc_ops.ctc_unique_labels(labels))
ctc_loss_grads = gradients_impl.gradients(ctc_loss, [logits])[0]
# Shift labels down by one (move blank from 0 to num_labels -1)
tf_ctc_loss_labels = math_ops.cast(labels, dtypes.int32) - 1
tf_nn_ctc_logits = array_ops.concat([
logits[:, :, 1:],
logits[:, :, 0:1],
], axis=2)
tf_ctc_loss_labels = ctc_ops.dense_labels_to_sparse(
tf_ctc_loss_labels, label_lengths)
tf_nn_ctc_loss = ctc_ops.ctc_loss(
labels=tf_ctc_loss_labels,
inputs=tf_nn_ctc_logits,
sequence_length=logit_lengths,
time_major=True)
tf_nn_ctc_grads = gradients_impl.gradients(tf_nn_ctc_loss, [logits])[0]
with self.cached_session() as sess:
for _ in range(32):
self.assertAllClose(*self.evaluate([ctc_loss, tf_nn_ctc_loss]))
self.assertAllClose(
*self.evaluate([ctc_loss_grads, tf_nn_ctc_grads]),
rtol=2e-06,
atol=2e-06)
@test_util.run_v1_only("b/120545219")
def testCtcLossDenseWithBlankIndexIsSameAsCtcLoss(self):
random_seed.set_random_seed(5)
batch_size = 8
num_labels = 6
label_length = 5
num_frames = 12
logits = random_ops.random_uniform([num_frames, batch_size, num_labels])
labels = random_ops.random_uniform(
[batch_size, label_length], minval=0, maxval=num_labels-1,
dtype=dtypes.int64)
label_lengths = random_ops.random_uniform(
[batch_size], minval=2, maxval=label_length, dtype=dtypes.int64)
label_mask = array_ops.sequence_mask(
label_lengths, maxlen=label_length, dtype=label_lengths.dtype)
labels *= label_mask
logit_lengths = [num_frames] * batch_size
tf_ctc_loss_labels = math_ops.cast(labels, dtypes.int32)
tf_ctc_loss_labels = ctc_ops.dense_labels_to_sparse(
tf_ctc_loss_labels, label_lengths)
tf_nn_ctc_loss = ctc_ops.ctc_loss(
labels=tf_ctc_loss_labels,
inputs=logits,
sequence_length=logit_lengths,
time_major=True)
tf_nn_ctc_grads = gradients_impl.gradients(tf_nn_ctc_loss, [logits])[0]
# Shift the blank logits/labels to be somewhere in the middle.
blank_index = 2
shifted_logits = array_ops.concat([
logits[:, :, :blank_index],
logits[:, :, -1:],
logits[:, :, blank_index:-1],
], axis=2)
shifted_labels = array_ops.where(labels < blank_index, labels, labels + 1)
ctc_loss = ctc_ops.ctc_loss_dense(
labels=shifted_labels,
logits=shifted_logits,
label_length=label_lengths,
logit_length=logit_lengths,
blank_index=blank_index)
ctc_loss_grads = gradients_impl.gradients(ctc_loss, [logits])[0]
with self.cached_session() as sess:
for _ in range(32):
self.assertAllClose(*self.evaluate([ctc_loss, tf_nn_ctc_loss]))
self.assertAllClose(
*self.evaluate([ctc_loss_grads, tf_nn_ctc_grads]),
rtol=2e-06,
atol=2e-06)
@test_util.run_v1_only("b/120545219")
def testCtcLossDenseWithNegativeBlankIndexIsSameAsCtcLoss(self):
with ops.device("/GPU:0" if test.is_gpu_available() else "/CPU:0"):
random_seed.set_random_seed(5)
batch_size = 8
num_labels = 6
label_length = 5
num_frames = 12
logits = random_ops.random_uniform([num_frames, batch_size, num_labels])
labels = random_ops.random_uniform(
[batch_size, label_length], minval=0, maxval=num_labels-1,
dtype=dtypes.int64)
label_lengths = random_ops.random_uniform(
[batch_size], minval=2, maxval=label_length, dtype=dtypes.int64)
label_mask = array_ops.sequence_mask(
label_lengths, maxlen=label_length, dtype=label_lengths.dtype)
labels *= label_mask
logit_lengths = [num_frames] * batch_size
ctc_loss = ctc_ops.ctc_loss_dense(
labels=labels,
logits=logits,
label_length=label_lengths,
logit_length=logit_lengths,
blank_index=-1)
ctc_loss_grads = gradients_impl.gradients(ctc_loss, [logits])[0]
tf_ctc_loss_labels = math_ops.cast(labels, dtypes.int32)
tf_ctc_loss_labels = ctc_ops.dense_labels_to_sparse(
tf_ctc_loss_labels, label_lengths)
tf_nn_ctc_loss = ctc_ops.ctc_loss(
labels=tf_ctc_loss_labels,
inputs=logits,
sequence_length=logit_lengths,
time_major=True)
tf_nn_ctc_grads = gradients_impl.gradients(tf_nn_ctc_loss, [logits])[0]
with self.cached_session() as sess:
for _ in range(32):
self.assertAllClose(*self.evaluate([ctc_loss, tf_nn_ctc_loss]))
self.assertAllClose(
*self.evaluate([ctc_loss_grads, tf_nn_ctc_grads]),
rtol=2e-06,
atol=2e-06)
def testCollapseRepeated(self):
collapsed, new_seq_lengths = ctc_ops.collapse_repeated(
labels=[[1, 3, 3, 3, 0],
[1, 4, 4, 4, 0],
[4, 2, 2, 9, 4]],
seq_length=[4, 5, 5])
self.assertAllEqual(new_seq_lengths, [2, 3, 4])
self.assertAllEqual(
collapsed,
[[1, 3, 0, 0],
[1, 4, 0, 0],
[4, 2, 9, 4]])
def testCollapseRepeatedPreservesDtypes(self):
collapsed, new_seq_lengths = ctc_ops.collapse_repeated(
labels=constant_op.constant(
[[1, 3, 3, 3, 0],
[1, 4, 4, 4, 0],
[4, 2, 2, 9, 4]],
dtype=dtypes.int64),
seq_length=constant_op.constant([4, 5, 5], dtype=dtypes.int64))
self.assertEqual(new_seq_lengths.dtype, dtypes.int64)
self.assertEqual(collapsed.dtype, dtypes.int64)
self.assertAllEqual(new_seq_lengths, [2, 3, 4])
self.assertAllEqual(
collapsed,
[[1, 3, 0, 0],
[1, 4, 0, 0],
[4, 2, 9, 4]])
def testCollapseRepeatedExtraPadding(self):
collapsed, new_seq_lengths = ctc_ops.collapse_repeated(
labels=[[1, 3, 3, 3, 0, 0, 0],
[1, 4, 4, 4, 0, 1, 2],
[4, 2, 2, 9, 4, 0, 0]],
seq_length=[4, 5, 5])
self.assertAllEqual(new_seq_lengths, [2, 3, 4])
self.assertAllEqual(
collapsed,
[[1, 3, 0, 0],
[1, 4, 0, 0],
[4, 2, 9, 4]])
def testCollapseRepeatedFrontRepeats(self):
collapsed, new_seq_lengths = ctc_ops.collapse_repeated(
labels=[[1, 1, 1, 2, 2],
[1, 1, 1, 2, 2],
[1, 1, 1, 2, 2]],
seq_length=[5, 4, 3])
self.assertAllEqual(new_seq_lengths, [2, 2, 1])
self.assertAllEqual(
collapsed,
[[1, 2],
[1, 2],
[1, 0]])
def testCollapseRepeatedAllLabelsTheSame(self):
collapsed, new_seq_lengths = ctc_ops.collapse_repeated(
labels=[[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]],
seq_length=[4, 5, 1])
self.assertAllEqual(new_seq_lengths, [1, 1, 1])
self.assertAllEqual(
collapsed,
[[1],
[1],
[1]])
def testDenseSequencesToSparse(self):
labels = [[1, 3, 3, 3, 0],
[1, 4, 4, 4, 0],
[4, 2, 2, 9, 4]]
length = [4, 5, 5]
sparse = ctc_ops.dense_labels_to_sparse(labels, length)
new_dense = sparse_ops.sparse_tensor_to_dense(sparse)
self.assertAllEqual(labels, new_dense)
padded_labels = [[1, 3, 3, 3, 0, 0, 0, 0],
[1, 4, 4, 4, 0, 0, 0, 0],
[4, 2, 2, 9, 4, 0, 0, 0]]
length = [4, 5, 5]
sparse = ctc_ops.dense_labels_to_sparse(padded_labels, length)
padded_dense = sparse_ops.sparse_tensor_to_dense(sparse)
self.assertAllEqual(padded_dense, new_dense)
def testUnique(self):
labels = [
[3, 4, 4, 3],
[1, 1, 1, 0],
]
unique, idx = ctc_ops.ctc_unique_labels(labels)
self.assertAllEqual([
[3, 4, 0, 0],
[1, 0, 0, 0],
], unique)
self.assertAllEqual([
[0, 1, 1, 0],
[0, 0, 0, 1],
], idx)
def testSumStates(self):
idx = [
[0, 1, 0, 1],
[0, 0, 0, 1],
]
states = math_ops.log([
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0]],
[[0.1, 0.2, 0.3, 0.4],
[0.5, 0.6, 0.7, 0.8]],
])
sum_of_states = math_ops.exp(ctc_ops._sum_states(idx, states))
self.assertAllClose([
[[4.0, 6.0, 0.0, 0.0],
[18.0, 8.0, 0.0, 0.0]],
[[0.4, 0.6, 0.0, 0.0],
[1.8, 0.8, 0.0, 0.0]]
], sum_of_states)
def testStateToOlabel(self):
labels = [
[3, 4, 3, 4],
[1, 1, 1, 0],
]
num_labels = 8
# 3 frames, 2 batch, 10 states (5 label, 5 blank).
states = [
[[0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.20],
[0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.30]],
[[1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0],
[2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0]],
[[11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0],
[21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0]],
]
labels = ops.convert_to_tensor(labels)
states = math_ops.log(states)
olabel = ctc_ops._state_to_olabel(labels, num_labels, states)
olabel = math_ops.exp(olabel)
blank = olabel[:, :, 0]
self.assertAllClose(blank, [
[0.16 + 0.17 + 0.18 + 0.19 + 0.20,
0.26 + 0.27 + 0.28 + 0.29 + 0.30],
[1.6 + 1.7 + 1.8 + 1.9 + 2.0,
2.6 + 2.7 + 2.8 + 2.9 + 3.0],
[16.0 + 17.0 + 18.0 + 19.0 + 20.0,
26.0 + 27.0 + 28.0 + 29.0 + 30.0]
])
self.assertAllClose(olabel[:, :, 1:], [
[[0.0, 0.0, 0.12 + 0.14, 0.13 + 0.15, 0.0, 0.0, 0.0],
[0.22 + 0.23 + 0.24, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 1.2 + 1.4, 1.3 + 1.5, 0.0, 0.0, 0.0],
[2.2 + 2.3 + 2.4, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 12.0 + 14.0, 13.0 + 15.0, 0.0, 0.0, 0.0],
[22.0 + 23.0 + 24.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
])
def testStateToOlabelUnique(self):
labels = [
[3, 4, 3, 4],
[1, 1, 1, 0],
]
num_labels = 8
# 3 frames, 2 batch, 10 states (5 label, 5 blank).
states = [
[[0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.20],
[0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.30]],
[[1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0],
[2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0]],
[[11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0],
[21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0]],
]
labels = ops.convert_to_tensor(labels)
states = math_ops.log(states)
olabel = ctc_ops._state_to_olabel_unique(
labels, num_labels, states, ctc_ops.ctc_unique_labels(labels))
olabel = math_ops.exp(olabel)
blank = olabel[:, :, 0]
self.assertAllClose(blank, [
[0.16 + 0.17 + 0.18 + 0.19 + 0.20,
0.26 + 0.27 + 0.28 + 0.29 + 0.30],
[1.6 + 1.7 + 1.8 + 1.9 + 2.0,
2.6 + 2.7 + 2.8 + 2.9 + 3.0],
[16.0 + 17.0 + 18.0 + 19.0 + 20.0,
26.0 + 27.0 + 28.0 + 29.0 + 30.0]])
self.assertAllClose(olabel[:, :, 1:], [
[[0.0, 0.0, 0.12 + 0.14, 0.13 + 0.15, 0.0, 0.0, 0.0],
[0.22 + 0.23 + 0.24, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 1.2 + 1.4, 1.3 + 1.5, 0.0, 0.0, 0.0],
[2.2 + 2.3 + 2.4, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 12.0 + 14.0, 13.0 + 15.0, 0.0, 0.0, 0.0],
[22.0 + 23.0 + 24.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
])
@test_util.run_deprecated_v1
def testScan(self):
with ops.device("/GPU:0" if test.is_gpu_available() else "/CPU:0"):
out = ctc_ops._scan(
lambda accum, elem: accum + elem,
constant_op.constant([1.0, 2.0, 3.0]), 23.0)
self.assertAllEqual([24.0, 26.0, 29.0], out)
out = ctc_ops._scan(
lambda a, e: a + e,
constant_op.constant([1.0, 2.0, 3.0]), 23.0,
inclusive=True)
self.assertAllEqual([23.0, 24.0, 26.0, 29.0], out)
out = ctc_ops._scan(
lambda a, e: a + e,
constant_op.constant([1.0, 2.0, 3.0]), 23.0,
reverse=True)
self.assertAllEqual([29.0, 28.0, 26.0], out)
out = ctc_ops._scan(
lambda a, e: a + e,
constant_op.constant([1.0, 2.0, 3.0]), 23.0,
reverse=True,
inclusive=True)
self.assertAllEqual([29.0, 28.0, 26.0, 23.0], out)
out = ctc_ops._scan(
lambda a, e: a + e,
constant_op.constant([[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]),
constant_op.constant([23.0, 24.0]))
self.assertAllEqual([[23.0, 25.0], [25.0, 28.0], [29.0, 33.0]], out)
@test_util.run_deprecated_v1
def testScanCapturesVariables(self):
with self.cached_session() as sess:
x = random_ops.random_uniform([])
fn = lambda accum, elem: accum + x * elem
out = ctc_ops._scan(fn, constant_op.constant([0.0, 1.0, 2.0]), 23.0)
self.assertAllClose(*sess.run([
[23.0 + x * 0.0, 23.0 + x * 1.0, 23.0 + x * 3.0], out
]))
@test_util.run_deprecated_v1
def testScanMultipleAccumulators(self):
with ops.device("/GPU:0" if test.is_gpu_available() else "/CPU:0"):
def fn(accum, elem):
accum_a, accum_b = accum
return accum_a + elem, accum_b * elem
out = ctc_ops._scan(
fn, constant_op.constant([1.0, 2.0, 3.0]),
(23.0, constant_op.constant([1.0, 2.0])))
a, b = out
self.assertAllEqual([24.0, 26.0, 29.0], a)
self.assertAllEqual([[1.0, 2.0], [2.0, 4.0], [6.0, 12.0]], b)
@test_util.run_deprecated_v1
def testScanMultipleElements(self):
with ops.device("/GPU:0" if test.is_gpu_available() else "/CPU:0"):
def fn(accum, elem):
elem_a, elem_b = elem
return accum + (elem_a * elem_b)
elems_a = constant_op.constant([1.0, 2.0, 3.0])
elems_b = constant_op.constant([[1.0, 2.0], [2.0, 3.0], [3.0, 4.0]])
out = ctc_ops._scan(
fn, (elems_a, elems_b),
initial=constant_op.constant([0.0, 0.0]))
self.assertAllEqual(
[[1.0, 2.0], [5.0, 8.0], [14.0, 20.0]], out)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/ctc_loss_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.{,parallel_}dynamic_stitch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import gradients_impl
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class DynamicStitchTestBase(object):
def __init__(self, stitch_op):
self.stitch_op = stitch_op
def testScalar(self):
with test_util.use_gpu():
indices = [constant_op.constant(0), constant_op.constant(1)]
data = [constant_op.constant(40), constant_op.constant(60)]
for step in -1, 1:
stitched_t = self.stitch_op(indices[::step], data)
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([40, 60][::step], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([2], stitched_t.get_shape().as_list())
@test_util.run_deprecated_v1
def testShapeInferenceForScalarWithNonConstantIndices(self):
with test_util.use_gpu():
indices = [
array_ops.placeholder(dtype=dtypes.int32),
constant_op.constant(1)
]
data = [constant_op.constant(40), constant_op.constant(60)]
for step in -1, 1:
stitched_t = self.stitch_op(indices[::step], data)
# Dimension 0 is max(flatten(indices))+1, but the first indices input is
# not a constant tensor, so we can only infer it as a vector of unknown
# length.
self.assertEqual([None], stitched_t.get_shape().as_list())
def testSimpleOneDimensional(self):
# Test various datatypes in the simple case to ensure that the op was
# registered under those types.
dtypes_to_test = [
dtypes.float32, dtypes.qint8, dtypes.quint8, dtypes.qint32
]
for dtype in dtypes_to_test:
indices = [
constant_op.constant([0, 4, 7]),
constant_op.constant([1, 6, 2, 3, 5])
]
data = [
math_ops.cast(constant_op.constant([0, 40, 70]), dtype=dtype),
math_ops.cast(
constant_op.constant([10, 60, 20, 30, 50]), dtype=dtype)
]
stitched_t = self.stitch_op(indices, data)
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([8], stitched_t.get_shape().as_list())
def testOneListOneDimensional(self):
indices = [constant_op.constant([1, 6, 2, 3, 5, 0, 4, 7])]
data = [constant_op.constant([10, 60, 20, 30, 50, 0, 40, 70])]
stitched_t = self.stitch_op(indices, data)
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([8], stitched_t.get_shape().as_list())
def testSimpleTwoDimensional(self):
indices = [
constant_op.constant([0, 4, 7]),
constant_op.constant([1, 6]),
constant_op.constant([2, 3, 5])
]
data = [
constant_op.constant([[0, 1], [40, 41], [70, 71]]),
constant_op.constant([[10, 11], [60, 61]]),
constant_op.constant([[20, 21], [30, 31], [50, 51]])
]
stitched_t = self.stitch_op(indices, data)
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([[0, 1], [10, 11], [20, 21], [30, 31], [40, 41],
[50, 51], [60, 61], [70, 71]], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([8, 2], stitched_t.get_shape().as_list())
def testZeroSizeTensor(self):
indices = [
constant_op.constant([0, 4, 7]),
constant_op.constant([1, 6]),
constant_op.constant([2, 3, 5]),
array_ops.zeros([0], dtype=dtypes.int32)
]
data = [
constant_op.constant([[0, 1], [40, 41], [70, 71]]),
constant_op.constant([[10, 11], [60, 61]]),
constant_op.constant([[20, 21], [30, 31], [50, 51]]),
array_ops.zeros([0, 2], dtype=dtypes.int32)
]
stitched_t = self.stitch_op(indices, data)
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([[0, 1], [10, 11], [20, 21], [30, 31], [40, 41],
[50, 51], [60, 61], [70, 71]], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([8, 2], stitched_t.get_shape().as_list())
@test_util.run_deprecated_v1
def testHigherRank(self):
indices = [
constant_op.constant(6),
constant_op.constant([4, 1]),
constant_op.constant([[5, 2], [0, 3]])
]
data = [
constant_op.constant([61., 62.]),
constant_op.constant([[41., 42.], [11., 12.]]),
constant_op.constant([[[51., 52.], [21., 22.]],
[[1., 2.], [31., 32.]]])
]
stitched_t = self.stitch_op(indices, data)
stitched_val = self.evaluate(stitched_t)
correct = 10. * np.arange(7)[:, None] + [1., 2.]
self.assertAllEqual(correct, stitched_val)
self.assertEqual([7, 2], stitched_t.get_shape().as_list())
# Test gradients
stitched_grad = 7. * stitched_val
grads = gradients_impl.gradients(stitched_t, indices + data,
stitched_grad)
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
for datum, grad in zip(data, self.evaluate(grads[3:])):
self.assertAllEqual(7. * self.evaluate(datum), grad)
@test_util.run_deprecated_v1
def testErrorIndicesMultiDimensional(self):
indices = [
constant_op.constant([0, 4, 7]),
constant_op.constant([[1, 6, 2, 3, 5]])
]
data = [
constant_op.constant([[0, 40, 70]]),
constant_op.constant([10, 60, 20, 30, 50])
]
with self.assertRaises(ValueError):
self.stitch_op(indices, data)
@test_util.run_deprecated_v1
def testErrorDataNumDimsMismatch(self):
indices = [
constant_op.constant([0, 4, 7]),
constant_op.constant([1, 6, 2, 3, 5])
]
data = [
constant_op.constant([0, 40, 70]),
constant_op.constant([[10, 60, 20, 30, 50]])
]
with self.assertRaises(ValueError):
self.stitch_op(indices, data)
@test_util.run_deprecated_v1
def testErrorDataDimSizeMismatch(self):
indices = [
constant_op.constant([0, 4, 5]),
constant_op.constant([1, 6, 2, 3])
]
data = [
constant_op.constant([[0], [40], [70]]),
constant_op.constant([[10, 11], [60, 61], [20, 21], [30, 31]])
]
with self.assertRaises(ValueError):
self.stitch_op(indices, data)
@test_util.run_deprecated_v1
def testErrorDataAndIndicesSizeMismatch(self):
indices = [
constant_op.constant([0, 4, 7]),
constant_op.constant([1, 6, 2, 3, 5])
]
data = [
constant_op.constant([0, 40, 70]),
constant_op.constant([10, 60, 20, 30])
]
with self.assertRaises(ValueError):
self.stitch_op(indices, data)
class DynamicStitchTest(DynamicStitchTestBase, test.TestCase):
def __init__(self, *test_case_args):
test.TestCase.__init__(self, *test_case_args)
DynamicStitchTestBase.__init__(self, data_flow_ops.dynamic_stitch)
class ParallelDynamicStitchTest(DynamicStitchTestBase, test.TestCase):
def __init__(self, *test_case_args):
test.TestCase.__init__(self, *test_case_args)
DynamicStitchTestBase.__init__(self, data_flow_ops.parallel_dynamic_stitch)
def testScalar(self):
with test_util.use_gpu():
indices = [constant_op.constant(0), constant_op.constant(1)]
data = [constant_op.constant(40.0), constant_op.constant(60.0)]
for step in -1, 1:
stitched_t = data_flow_ops.dynamic_stitch(indices[::step], data)
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([40.0, 60.0][::step], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([2], stitched_t.get_shape().as_list())
@test_util.run_deprecated_v1
def testHigherRank(self):
indices = [
constant_op.constant(6),
constant_op.constant([4, 1]),
constant_op.constant([[5, 2], [0, 3]])
]
data = [
constant_op.constant([61, 62], dtype=dtypes.float32),
constant_op.constant([[41, 42], [11, 12]], dtype=dtypes.float32),
constant_op.constant(
[[[51, 52], [21, 22]], [[1, 2], [31, 32]]], dtype=dtypes.float32)
]
stitched_t = data_flow_ops.dynamic_stitch(indices, data)
stitched_val = self.evaluate(stitched_t)
correct = 10 * np.arange(7)[:, None] + [1.0, 2.0]
self.assertAllEqual(correct, stitched_val)
self.assertEqual([7, 2], stitched_t.get_shape().as_list())
# Test gradients
stitched_grad = 7 * stitched_val
grads = gradients_impl.gradients(stitched_t, indices + data,
stitched_grad)
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
for datum, grad in zip(data, self.evaluate(grads[3:])):
self.assertAllEqual(7.0 * self.evaluate(datum), grad)
# GPU version unit tests
def testScalarGPU(self):
indices = [constant_op.constant(0), constant_op.constant(1)]
data = [constant_op.constant(40.0), constant_op.constant(60.0)]
for step in -1, 1:
stitched_t = data_flow_ops.dynamic_stitch(indices[::step], data)
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([40.0, 60.0][::step], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([2], stitched_t.get_shape().as_list())
@test_util.run_deprecated_v1
def testHigherRankGPU(self):
indices = [
constant_op.constant(6),
constant_op.constant([4, 1]),
constant_op.constant([[5, 2], [0, 3]])
]
data = [
constant_op.constant([61, 62], dtype=dtypes.float32),
constant_op.constant([[41, 42], [11, 12]], dtype=dtypes.float32),
constant_op.constant(
[[[51, 52], [21, 22]], [[1, 2], [31, 32]]], dtype=dtypes.float32)
]
stitched_t = data_flow_ops.dynamic_stitch(indices, data)
stitched_val = self.evaluate(stitched_t)
correct = 10 * np.arange(7)[:, None] + [1.0, 2.0]
self.assertAllEqual(correct, stitched_val)
self.assertEqual([7, 2], stitched_t.get_shape().as_list())
# Test gradients
stitched_grad = 7 * stitched_val
grads = gradients_impl.gradients(stitched_t, indices + data,
stitched_grad)
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
for datum, grad in zip(data, self.evaluate(grads[3:])):
self.assertAllEqual(7.0 * self.evaluate(datum), grad)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/dynamic_stitch_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for basic component wise operations using a GPU device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.gen_array_ops import broadcast_gradient_args
from tensorflow.python.platform import test
class GPUBinaryOpsTest(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.cached_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
with self.cached_session(use_gpu=False) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = self.evaluate(out)
self.assertAllClose(tf_cpu, tf_gpu)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
class MathBuiltinUnaryTest(test.TestCase):
def _compare(self, x, np_func, tf_func, use_gpu):
np_out = np_func(x)
with self.cached_session(use_gpu=use_gpu) as sess:
inx = ops.convert_to_tensor(x)
ofunc = tf_func(inx)
tf_out = self.evaluate(ofunc)
self.assertAllClose(np_out, tf_out)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _testDtype(self, dtype, use_gpu):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
data_gt_1 = data + 2 # for x > 1
self._compare(data, np.abs, math_ops.abs, use_gpu)
self._compare(data, np.arccos, math_ops.acos, use_gpu)
self._compare(data, np.arcsin, math_ops.asin, use_gpu)
self._compare(data, np.arcsinh, math_ops.asinh, use_gpu)
self._compare(data_gt_1, np.arccosh, math_ops.acosh, use_gpu)
self._compare(data, np.arctan, math_ops.atan, use_gpu)
self._compare(data, np.ceil, math_ops.ceil, use_gpu)
self._compare(data, np.cos, math_ops.cos, use_gpu)
self._compare(data, np.cosh, math_ops.cosh, use_gpu)
self._compare(data, np.exp, math_ops.exp, use_gpu)
self._compare(data, np.floor, math_ops.floor, use_gpu)
self._compare(data, np.log, math_ops.log, use_gpu)
self._compare(data, np.log1p, math_ops.log1p, use_gpu)
self._compare(data, np.negative, math_ops.negative, use_gpu)
self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
self._compare(data, np.sin, math_ops.sin, use_gpu)
self._compare(data, np.sinh, math_ops.sinh, use_gpu)
self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
self._compare(data, np.square, math_ops.square, use_gpu)
self._compare(data, np.tan, math_ops.tan, use_gpu)
self._compare(data, np.tanh, math_ops.tanh, use_gpu)
self._compare(data, np.arctanh, math_ops.atanh, use_gpu)
def testTypes(self):
for dtype in [np.float32]:
self._testDtype(dtype, use_gpu=True)
def testFloorDivide(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
np_out = np.floor_divide(x, y + 0.1)
with self.session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y + 0.1)
ofunc = inx / iny
out_func2 = math_ops.floor(ofunc)
tf_out = self.evaluate(out_func2)
self.assertAllClose(np_out, tf_out)
class BroadcastSimpleTest(test.TestCase):
def _GetGradientArgs(self, xs, ys):
with self.cached_session(use_gpu=True) as sess:
return sess.run(broadcast_gradient_args(xs, ys))
@test_util.run_deprecated_v1
def testBroadcast(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
_GRAD_TOL = {dtypes.float32: 1e-3}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
@test_util.run_deprecated_v1
def testGradient(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
self._compareGradientX(x, y, np.true_divide, math_ops.truediv)
self._compareGradientY(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv)
class GpuMultiSessionMemoryTest(test_util.TensorFlowTestCase):
"""Tests concurrent sessions executing on the same GPU."""
def _run_session(self, session, results):
n_iterations = 500
with session as s:
data = variables.Variable(1.0)
with ops.device('/device:GPU:0'):
random_seed.set_random_seed(1)
matrix1 = variables.Variable(
random_ops.truncated_normal([1024, 1]), name='matrix1')
matrix2 = variables.Variable(
random_ops.truncated_normal([1, 1024]), name='matrix2')
x1 = math_ops.multiply(data, matrix1, name='x1')
x3 = math_ops.matmul(x1, math_ops.matmul(matrix2, matrix1))
x4 = math_ops.matmul(array_ops.transpose(x3), x3, name='x4')
s.run(variables.global_variables_initializer())
for _ in xrange(n_iterations):
value = s.run(x4)
results.add(value.flat[0])
if len(results) != 1:
break
@test_util.run_v1_only('b/126596827 needs graph mode in multiple threads')
def testConcurrentSessions(self):
n_threads = 4
threads = []
results = []
for _ in xrange(n_threads):
session = self.session(graph=ops.Graph(), use_gpu=True)
results.append(set())
args = (session, results[-1])
threads.append(threading.Thread(target=self._run_session, args=args))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
flat_results = set([x for x in itertools.chain(*results)])
self.assertEqual(1,
len(flat_results),
'Expected single value, got %r' % flat_results)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/basic_gpu_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.fingerprint_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
# Fingerprint op has C++ tests. This simple test case tests that fingerprint
# function is accessible via Python API.
class FingerprintTest(test.TestCase):
def test_default_values(self):
data = np.arange(10)
data = np.expand_dims(data, axis=0)
fingerprint0 = self.evaluate(array_ops.fingerprint(data))
fingerprint1 = self.evaluate(array_ops.fingerprint(data[:, 1:]))
self.assertEqual(fingerprint0.ndim, 2)
self.assertTupleEqual(fingerprint0.shape, fingerprint1.shape)
self.assertTrue(np.any(fingerprint0 != fingerprint1))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/fingerprint_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for partitioned_variables.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
class PartitionerCreatorsTest(test.TestCase):
def testFixedSizePartitioner(self):
with self.cached_session():
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
def testFixedSizePartitionerInt64(self):
with self.cached_session():
partitioner = partitioned_variables.fixed_size_partitioner(4, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable("v0", dtype=dtypes.int64, shape=[20])
v0_list = v0._get_variable_list()
self.assertEqual(len(v0_list), 4)
def testResourceFixedSizePartitioner(self):
with self.cached_session():
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope(
"root", partitioner=partitioner, use_resource=True):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
def _testVariableAxisSizePartitioner(self,
name,
axis,
max_shard_bytes,
expected_axis_shards,
expected_partitions,
max_shards=None):
partitioner = partitioned_variables.variable_axis_size_partitioner(
axis=axis, max_shard_bytes=max_shard_bytes, max_shards=max_shards)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
name, dtype=dtypes.float32, shape=(4, 8, 16, 32))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
def testVariableAxisSizePartitioner(self):
with self.cached_session():
# Create a partitioned variable of shape (4, 8, 16, 32) type float32
# Bytes per slice along the given axes:
# 8 * 16 * 32 * sizeof(float32) = 16384 / slice on axis 0
# 4 * 16 * 32 * sizeof(float32) = 8192 / slice on axis 1
# 4 * 8 * 32 * sizeof(float32) = 4096 / slice on axis 2
# 4 * 8 * 16 * sizeof(float32) = 2048 / slice on axis 3
# Now partition it in different ways...
# No need to slice: bytes_per_slice * dim0 = 65536 < max_shard_bytes
self._testVariableAxisSizePartitioner(
"v0",
axis=0,
max_shard_bytes=131072,
expected_axis_shards=1,
expected_partitions=(1, 1, 1, 1))
# Slice exactly once: bytes_per_slice * dim1 = 65536 = max_shard_bytes
self._testVariableAxisSizePartitioner(
"v1",
axis=1,
max_shard_bytes=65536,
expected_axis_shards=1,
expected_partitions=(1, 1, 1, 1))
# Slice into 2 parts:
# bytes_per_slice = 4096
# slices_per_shard = 32768 / 4096 = 8
# axis_shards = 16 / 8 = 2
self._testVariableAxisSizePartitioner(
"v2",
axis=2,
max_shard_bytes=32768,
expected_axis_shards=2,
expected_partitions=(1, 1, 2, 1))
# This partitioner makes sure we maximize the number of shards along
# axis 3. Slice it into 32 parts:
# bytes_per_slice = 2048
# slices_per_shard = 2048 / 2048 = 1
# axis_shards = 32 / 1 = 32
self._testVariableAxisSizePartitioner(
"v3a",
axis=3,
max_shard_bytes=2048,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32))
# This partitioner makes sure we do not go past the bound of allowable
# number of shards along axis 3.
# Slice into 32 parts:
# bytes_per_slice = 2048
# slices_per_shard = max(1, 1024 / 2048) = 1
# axis_shards = 32 / 1 = 32
# Slice into max of 32 parts because: max_shard_bytes < bytes_per_slice
self._testVariableAxisSizePartitioner(
"v3b",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32))
# Specify max_shards so that it won't affect sharding.
self._testVariableAxisSizePartitioner(
"v3c",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32),
max_shards=33)
# Specify max_shards so that it will affect sharding.
self._testVariableAxisSizePartitioner(
"v3d",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=2,
expected_partitions=(1, 1, 1, 2),
max_shards=2)
# Use the partitioner with strings
partitioner_axis3_str = partitioned_variables.variable_axis_size_partitioner( # pylint: disable=line-too-long
axis=3,
max_shard_bytes=32768,
bytes_per_string_element=8)
with variable_scope.variable_scope(
"root", partitioner=partitioner_axis3_str):
v3str = variable_scope.get_variable(
"v3str",
initializer=np.array([""] * 4 * 8 * 16 * 32).reshape(4, 8, 16, 32),
dtype=dtypes.string,
shape=(4, 8, 16, 32))
v3str_list = v3str._get_variable_list()
v3str_part = v3str._get_partitions()
# Now the estimated bytes_per_slice = 4*8*16*bytes_per_string_element
# which is equal to 4096. Setting a max_shard_bytes of 32768
# and we should get a split of 4.
# Slice into 4 parts:
# bytes_per_slice = 4096
# slices_per_shard = 32768 / 4096 = 8
# axis_shards = 32 / 8 = 4
self.assertEqual(len(v3str_list), 4)
self.assertAllEqual(v3str_part, (1, 1, 1, 4))
def _testMinMaxVariablePartitioner(self, max_partitions, axis, min_slice_size,
var_name, var_shape, expected_axis_shards,
expected_partitions):
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=max_partitions, axis=axis, min_slice_size=min_slice_size)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
var_name, dtype=dtypes.float32, shape=var_shape)
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
def testMinMaxVariablePartitioner(self):
with self.cached_session():
# Partitioning a variable of shape=[2048] with a minimum of 2K per slice.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=2 << 10,
var_name="v0_0",
var_shape=[2048],
expected_axis_shards=4,
expected_partitions=[4])
# Partitioning a variable of shape=[2048, 1024] with a minimum of 256K per
# slice.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v0",
var_shape=[2048, 1024],
expected_axis_shards=32,
expected_partitions=[32, 1])
# max_partitions restricts partitioning of the variable.
self._testMinMaxVariablePartitioner(
max_partitions=16,
axis=0,
min_slice_size=256 << 10,
var_name="v1_max",
var_shape=[2048, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1])
self._testMinMaxVariablePartitioner(
max_partitions=1,
axis=0,
min_slice_size=256 << 10,
var_name="v2_max",
var_shape=[2048, 1024],
expected_axis_shards=1,
expected_partitions=[1, 1])
# Reducing/Increasing min_slice_size proportionately increases/reduces the
# number of partitions.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=128 << 10,
var_name="v3_slice",
var_shape=[2048, 1024],
expected_axis_shards=64,
expected_partitions=[64, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=512 << 10,
var_name="v4_slice",
var_shape=[2048, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1])
# Partitioning the variable along a different axis.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=1,
min_slice_size=256 << 10,
var_name="v5_axis",
var_shape=[64, 1024, 1, 3],
expected_axis_shards=3,
expected_partitions=[1, 3, 1, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=3,
min_slice_size=256 << 10,
var_name="v6_axis",
var_shape=[64, 1024, 1, 3],
expected_axis_shards=3,
expected_partitions=[1, 1, 1, 3])
# Can not partition the variable more than what its shape allows.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v7_shape",
var_shape=[16, 128, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v8_shape",
var_shape=[4, 512, 1024],
expected_axis_shards=4,
expected_partitions=[4, 1, 1])
def _IotaInitializer(shape, dtype=dtypes.float32, partition_info=None):
assert dtype == dtypes.float32
if len(shape) == 1:
return range(shape[0])
else:
val = _IotaInitializer(shape[1:], dtype)
return [[(10**i) * v for v in val] for i in range(shape[0])]
class PartitionedVariablesTestCase(test.TestCase):
def _TestSaveSpec(self, slices, expected_specs):
self.assertEqual(len(expected_specs), len(slices))
for i in xrange(len(expected_specs)):
self.assertEquals(expected_specs[i], slices[i]._save_slice_info.spec)
def testVecConstantInit(self):
with self.cached_session():
rnd_par = constant_op.constant([1, 2, 3, 4])
vs = partitioned_variables.create_partitioned_variables([4], [4], rnd_par)
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 0)
rnd = self.evaluate(rnd_par)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 4, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["4 0,1", "4 1,1", "4 2,1", "4 3,1"])
def testConstantInit(self):
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
vs = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 1)
rnd = self.evaluate(rnd_par)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 2, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["2 4 0,2:0,2", "2 4 0,2:2,2"])
def _testNameHelper(self, use_resource=False):
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with variable_scope.variable_scope("hi", use_resource=use_resource):
vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
self.evaluate(variables.global_variables_initializer())
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hi/PartitionedVariable", var1_name)
self.assertEqual("hi/PartitionedVariable_1", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test same variable.
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with variable_scope.variable_scope(
"hola", use_resource=use_resource) as vs:
vs1 = partitioned_variables.create_partitioned_variables(
[2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
with variable_scope.variable_scope(
vs, reuse=True, use_resource=use_resource):
vs2 = partitioned_variables.create_partitioned_variables(
[2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
self.evaluate(variables.global_variables_initializer())
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hola/PartitionedVariable", var1_name)
self.assertEqual("hola/PartitionedVariable", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test name_scope
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with ops.name_scope("ola"):
vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
self.evaluate(variables.global_variables_initializer())
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
# Currently, the name scope 'ola' has no effect.
self.assertEqual("PartitionedVariable", var1_name)
self.assertEqual("PartitionedVariable_1", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
@test_util.run_deprecated_v1
def testName(self):
self._testNameHelper(use_resource=False)
def testResourceName(self):
self._testNameHelper(use_resource=True)
def testRandomInitValue(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([200, 40]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 10], rnd.initialized_value())
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 1)
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.float32] * 10, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, [
"200 40 0,200:0,4", "200 40 0,200:4,4", "200 40 0,200:8,4",
"200 40 0,200:12,4", "200 40 0,200:16,4", "200 40 0,200:20,4",
"200 40 0,200:24,4", "200 40 0,200:28,4", "200 40 0,200:32,4",
"200 40 0,200:36,4"
])
def testRandomInitUnevenPartitions(self):
with self.cached_session():
rnd = variables.Variable(
random_ops.random_uniform([20, 43], dtype=dtypes.float64))
var_lists = [
partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, i], rnd.initialized_value())
for i in xrange(1, 10)
]
self.evaluate(variables.global_variables_initializer())
rnd_val = self.evaluate(rnd)
# Only check the slice save specs for the first 5 tf.
save_specs = [
# One slice
["20 43 0,20:0,43"],
# Two slices
["20 43 0,20:0,22", "20 43 0,20:22,21"],
# Three slices
["20 43 0,20:0,15", "20 43 0,20:15,14", "20 43 0,20:29,14"],
# Four slices
[
"20 43 0,20:0,11", "20 43 0,20:11,11", "20 43 0,20:22,11",
"20 43 0,20:33,10"
],
# Five slices
[
"20 43 0,20:0,9", "20 43 0,20:9,9", "20 43 0,20:18,9",
"20 43 0,20:27,8", "20 43 0,20:35,8"
]
]
for i, vs in enumerate(var_lists):
var_val = array_ops.concat(vs, 1)
self.assertAllClose(rnd_val, var_val)
self.assertEqual([dtypes.float64] * len(vs),
[v.dtype.base_dtype for v in vs])
if i < len(save_specs):
self._TestSaveSpec(vs, save_specs[i])
def testDegenerate(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 1], rnd.initialized_value())
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 0)
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
def testSliceSizeOne(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [10, 1], rnd.initialized_value())
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 0)
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, [
"10 43 0,1:0,43", "10 43 1,1:0,43", "10 43 2,1:0,43",
"10 43 3,1:0,43", "10 43 4,1:0,43", "10 43 5,1:0,43",
"10 43 6,1:0,43", "10 43 7,1:0,43", "10 43 8,1:0,43", "10 43 9,1:0,43"
])
def testIotaInitializer(self):
self.assertAllClose([0., 1., 2., 3.], _IotaInitializer([4]))
self.assertAllClose([[0., 1.], [0., 10.], [0., 100.], [0., 1000.]],
_IotaInitializer([4, 2]))
with self.cached_session():
vs = partitioned_variables.create_partitioned_variables([13, 5], [3, 1],
_IotaInitializer)
self.evaluate(variables.global_variables_initializer())
slice0 = _IotaInitializer([5, 5])
slice1 = _IotaInitializer([4, 5])
slice2 = _IotaInitializer([4, 5])
val = array_ops.concat(vs, 0)
self.assertAllClose(slice0 + slice1 + slice2, val)
self._TestSaveSpec(vs, ["13 5 0,5:0,5", "13 5 5,4:0,5", "13 5 9,4:0,5"])
@test_util.run_deprecated_v1
def testRandomInitializer(self):
# Sanity check that the slices uses a different seed when using a random
# initializer function.
with self.cached_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer())
self.evaluate(variables.global_variables_initializer())
val0, val1 = self.evaluate(var0).flatten(), self.evaluate(var1).flatten()
self.assertTrue(np.linalg.norm(val0 - val1) > 1e-6)
# Negative test that proves that slices have the same values if
# the random initializer uses a seed.
with self.cached_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer(seed=201))
self.evaluate(variables.global_variables_initializer())
val0, val1 = self.evaluate(var0).flatten(), self.evaluate(var1).flatten()
self.assertAllClose(val0, val1)
def testSomeErrors(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10], [1, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 20], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1, 2, 3], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [11, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [20, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1, 50], rnd.initialized_value())
@test_util.run_deprecated_v1
def testControlDepsNone(self):
with self.cached_session() as session:
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dependency.
d = constant_op.constant(2.0)
# Partitioned variables do not.
var_x = variable_scope.get_variable(
"x",
shape=[2],
initializer=init_ops.ones_initializer(),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
ops_before_read = session.graph.get_operations()
var_x.as_tensor() # Caches the ops for subsequent reads.
reading_ops = [
op for op in session.graph.get_operations()
if op not in ops_before_read
]
self.assertEqual([c.op], d.op.control_inputs)
# Tests that no control dependencies are added to reading a partitioned
# variable which is similar to reading a variable.
for op in reading_ops:
self.assertEqual([], op.control_inputs)
@test_util.run_deprecated_v1
def testConcat(self):
with self.cached_session() as session:
var_x = variable_scope.get_variable(
"x",
initializer=constant_op.constant([1., 2.]),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
ops_before_concat = session.graph.get_operations()
value = var_x._concat() # pylint: disable=protected-access
concat_ops = [
op for op in session.graph.get_operations()
if op not in ops_before_concat
]
concat_control_inputs = [
ci for op in concat_ops for ci in op.control_inputs
]
self.assertTrue(
c.op in concat_control_inputs,
"var_x._concat() should get control dependencies from its scope.")
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(value, var_x.as_tensor())
def testMetaGraphSaveLoad(self):
save_prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph) as session:
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
self.evaluate(variables.global_variables_initializer())
save_graph.get_collection_ref("partvar").append(v0)
saver = saver_lib.Saver()
save_graph.finalize()
save_path = saver.save(sess=session, save_path=save_prefix)
previous_value = session.run(
save_graph.get_tensor_by_name(v0.name + ":0"))
restore_graph = ops.Graph()
with restore_graph.as_default(), self.session(
graph=restore_graph) as session:
saver = saver_lib.import_meta_graph(save_path + ".meta")
saver.restore(sess=session, save_path=save_path)
v0, = save_graph.get_collection_ref("partvar")
self.assertIsInstance(v0, variables.PartitionedVariable)
self.assertAllEqual(
previous_value,
session.run(restore_graph.get_tensor_by_name(v0.name + ":0")))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/partitioned_variables_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_split_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class StringSplitOpTest(test.TestCase):
def testStringSplit(self):
strings = ["pigs on the wing", "animals"]
with self.cached_session():
tokens = string_ops.string_split(strings)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [0, 3], [1, 0]])
self.assertAllEqual(values, [b"pigs", b"on", b"the", b"wing", b"animals"])
self.assertAllEqual(shape, [2, 4])
@test_util.run_deprecated_v1
def testStringSplitEmptyDelimiter(self):
strings = ["hello", "hola", b"\xF0\x9F\x98\x8E"] # Last string is U+1F60E
with self.cached_session():
tokens = string_ops.string_split(strings, delimiter="")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[1, 0], [1, 1], [1, 2], [1, 3], [2, 0],
[2, 1], [2, 2], [2, 3]])
expected = np.array(
[
"h", "e", "l", "l", "o", "h", "o", "l", "a", b"\xf0", b"\x9f",
b"\x98", b"\x8e"
],
dtype="|S1")
self.assertAllEqual(values.tolist(), expected)
self.assertAllEqual(shape, [3, 5])
def testStringSplitEmptyToken(self):
strings = ["", " a", "b ", " c", " ", " d ", " e", "f ", " g ", " "]
with self.cached_session():
tokens = string_ops.string_split(strings)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(
indices,
[[1, 0], [2, 0], [3, 0], [5, 0], [6, 0], [7, 0], [8, 0]])
self.assertAllEqual(values, [b"a", b"b", b"c", b"d", b"e", b"f", b"g"])
self.assertAllEqual(shape, [10, 1])
def testStringSplitOnSetEmptyToken(self):
strings = ["", " a", "b ", " c", " ", " d ", ". e", "f .", " .g. ", " ."]
with self.cached_session():
tokens = string_ops.string_split(strings, delimiter=" .")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(
indices,
[[1, 0], [2, 0], [3, 0], [5, 0], [6, 0], [7, 0], [8, 0]])
self.assertAllEqual(values, [b"a", b"b", b"c", b"d", b"e", b"f", b"g"])
self.assertAllEqual(shape, [10, 1])
@test_util.run_deprecated_v1
def testStringSplitWithDelimiter(self):
strings = ["hello|world", "hello world"]
with self.cached_session():
self.assertRaises(
ValueError, string_ops.string_split, strings, delimiter=["|", ""])
self.assertRaises(
ValueError, string_ops.string_split, strings, delimiter=["a"])
tokens = string_ops.string_split(strings, delimiter="|")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0]])
self.assertAllEqual(values, [b"hello", b"world", b"hello world"])
self.assertAllEqual(shape, [2, 2])
tokens = string_ops.string_split(strings, delimiter="| ")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(values, [b"hello", b"world", b"hello", b"world"])
self.assertAllEqual(shape, [2, 2])
@test_util.run_deprecated_v1
def testStringSplitWithDelimiterTensor(self):
strings = ["hello|world", "hello world"]
with self.cached_session() as sess:
delimiter = array_ops.placeholder(dtypes.string)
tokens = string_ops.string_split(strings, delimiter=delimiter)
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a", "b"]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a"]})
indices, values, shape = sess.run(tokens, feed_dict={delimiter: "|"})
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0]])
self.assertAllEqual(values, [b"hello", b"world", b"hello world"])
self.assertAllEqual(shape, [2, 2])
@test_util.run_deprecated_v1
def testStringSplitWithDelimitersTensor(self):
strings = ["hello.cruel,world", "hello cruel world"]
with self.cached_session() as sess:
delimiter = array_ops.placeholder(dtypes.string)
tokens = string_ops.string_split(strings, delimiter=delimiter)
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a", "b"]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a"]})
indices, values, shape = sess.run(tokens, feed_dict={delimiter: ".,"})
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [1, 0]])
self.assertAllEqual(values,
[b"hello", b"cruel", b"world", b"hello cruel world"])
self.assertAllEqual(shape, [2, 3])
def testStringSplitWithNoSkipEmpty(self):
strings = ["#a", "b#", "#c#"]
with self.cached_session():
tokens = string_ops.string_split(strings, "#", skip_empty=False)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1],
[1, 0], [1, 1],
[2, 0], [2, 1], [2, 2]])
self.assertAllEqual(values, [b"", b"a", b"b", b"", b"", b"c", b""])
self.assertAllEqual(shape, [3, 3])
with self.cached_session():
tokens = string_ops.string_split(strings, "#")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(values, [b"a", b"b", b"c"])
self.assertAllEqual(indices, [[0, 0], [1, 0], [2, 0]])
self.assertAllEqual(shape, [3, 1])
class StringSplitV2OpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.named_parameters([
{"testcase_name": "Simple",
"input": [b"pigs on the wing", b"animals"],
"expected": [[b"pigs", b"on", b"the", b"wing"], [b"animals"]]},
{"testcase_name": "MultiCharSeparator",
"input": [b"1<>2<>3", b"<><>4<>5<><>6<>"],
"sep": b"<>",
"expected": [[b"1", b"2", b"3"],
[b"", b"", b"4", b"5", b"", b"6", b""]]},
{"testcase_name": "SimpleSeparator",
"input": [b"1,2,3", b"4,5,,6,"],
"sep": b",",
"expected": [[b"1", b"2", b"3"], [b"4", b"5", b"", b"6", b""]]},
{"testcase_name": "EmptySeparator",
"input": [b"1 2 3", b" 4 5 6 "],
"expected": [[b"1", b"2", b"3"], [b"4", b"5", b"6"]]},
{"testcase_name": "EmptySeparatorEmptyInputString",
"input": [b""],
"expected": [[]]},
{"testcase_name": "EmptyInputVector",
"input": [],
"expected": []},
{"testcase_name": "SimpleSeparatorMaxSplit",
"input": [b"1,2,3", b"4,5,,6,"],
"sep": b",",
"maxsplit": 1,
"expected": [[b"1", b"2,3"], [b"4", b"5,,6,"]]},
{"testcase_name": "EmptySeparatorMaxSplit",
"input": [b"1 2 3", b" 4 5 6 "],
"maxsplit": 1,
"expected": [[b"1", b"2 3"], [b"4", b"5 6 "]]},
{"testcase_name": "ScalarInput",
"input": b"1,2,3",
"sep": b",",
"expected": [b"1", b"2", b"3"]},
{"testcase_name": "Dense2DInput",
"input": [[b"1,2,3", b"4"], [b"5,6", b"7,8,9"]],
"sep": b",",
"expected": [[[b"1", b"2", b"3"], [b"4"]],
[[b"5", b"6"], [b"7", b"8", b"9"]]]},
{"testcase_name": "Ragged2DInput",
"input": [[b"1,2,3", b"4"], [b"5,6"]],
"input_is_ragged": True,
"sep": b",",
"expected": [[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]]},
{"testcase_name": "Ragged3DInput",
"input": [[[b"1,2,3", b"4"], [b"5,6"]], [[b"7,8,9"]]],
"input_is_ragged": True,
"sep": b",",
"expected": [[[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]],
[[[b"7", b"8", b"9"]]]]},
{"testcase_name": "Ragged4DInput",
"input": [[[[b"1,2,3", b"4"], [b"5,6"]], [[b"7,8,9"]]], [[[b""]]]],
"input_is_ragged": True,
"sep": b",",
"expected": [[[[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]],
[[[b"7", b"8", b"9"]]]], [[[[b""]]]]]},
{"testcase_name": "Ragged4DInputEmptySeparator",
"input": [[[[b"1 2 3", b"4"], [b"5 6"]], [[b"7 8 9"]]], [[[b""]]]],
"input_is_ragged": True,
"expected": [[[[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]],
[[[b"7", b"8", b"9"]]]], [[[[]]]]]},
]) # pyformat: disable
def testSplitV2(self,
input,
expected,
input_is_ragged=False,
**kwargs): # pylint: disable=redefined-builtin
# Check that we are matching the behavior of Python's str.split:
self.assertEqual(expected, self._py_split(input, **kwargs))
# Prepare the input tensor.
if input_is_ragged:
input = ragged_factory_ops.constant(input, dtype=dtypes.string)
else:
input = constant_op.constant(input, dtype=dtypes.string)
# Check that the public version (which returns a RaggedTensor) works
# correctly.
expected_ragged = ragged_factory_ops.constant(
expected, ragged_rank=input.shape.ndims)
actual_ragged_v1 = ragged_string_ops.strings_split_v1(
input, result_type="RaggedTensor", **kwargs)
actual_ragged_v1_input_kwarg = ragged_string_ops.strings_split_v1(
input=input, result_type="RaggedTensor", **kwargs)
actual_ragged_v1_source_kwarg = ragged_string_ops.strings_split_v1(
source=input, result_type="RaggedTensor", **kwargs)
actual_ragged_v2 = ragged_string_ops.string_split_v2(input, **kwargs)
actual_ragged_v2_input_kwarg = ragged_string_ops.string_split_v2(
input=input, **kwargs)
self.assertRaggedEqual(expected_ragged, actual_ragged_v1)
self.assertRaggedEqual(expected_ragged, actual_ragged_v1_input_kwarg)
self.assertRaggedEqual(expected_ragged, actual_ragged_v1_source_kwarg)
self.assertRaggedEqual(expected_ragged, actual_ragged_v2)
self.assertRaggedEqual(expected_ragged, actual_ragged_v2_input_kwarg)
# Check that the internal version (which returns a SparseTensor) works
# correctly. Note: the internal version oly supports vector inputs.
if input.shape.ndims == 1:
expected_sparse = self.evaluate(expected_ragged.to_sparse())
actual_sparse_v1 = ragged_string_ops.strings_split_v1(
input, result_type="SparseTensor", **kwargs)
actual_sparse_v2 = string_ops.string_split_v2(input, **kwargs)
for actual_sparse in [actual_sparse_v1, actual_sparse_v2]:
self.assertEqual(expected_sparse.indices.tolist(),
self.evaluate(actual_sparse.indices).tolist())
self.assertEqual(expected_sparse.values.tolist(),
self.evaluate(actual_sparse.values).tolist())
self.assertEqual(expected_sparse.dense_shape.tolist(),
self.evaluate(actual_sparse.dense_shape).tolist())
def _py_split(self, strings, **kwargs):
if isinstance(strings, compat.bytes_or_text_types):
# Note: str.split doesn't accept keyword args.
if "maxsplit" in kwargs:
return strings.split(kwargs.get("sep", None), kwargs["maxsplit"])
else:
return strings.split(kwargs.get("sep", None))
else:
return [self._py_split(s, **kwargs) for s in strings]
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/string_split_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodePngOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class DecodePngOpTest(test.TestCase):
def test16bit(self):
img_bytes = [[0, 255], [1024, 1024 + 255]]
# Encoded PNG bytes resulting from encoding the above img_bytes
# using go's image/png encoder.
encoded_bytes = [
137, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68, 82, 0, 0, 0,
2, 0, 0, 0, 2, 16, 0, 0, 0, 0, 7, 77, 142, 187, 0, 0, 0, 21, 73, 68, 65,
84, 120, 156, 98, 98, 96, 96, 248, 207, 194, 2, 36, 1, 1, 0, 0, 255,
255, 6, 60, 1, 10, 68, 160, 26, 131, 0, 0, 0, 0, 73, 69, 78, 68, 174,
66, 96, 130
]
byte_string = bytes(bytearray(encoded_bytes))
img_in = constant_op.constant(byte_string, dtype=dtypes.string)
decode = array_ops.squeeze(
image_ops.decode_png(
img_in, dtype=dtypes.uint16))
with self.cached_session():
decoded = self.evaluate(decode)
self.assertAllEqual(decoded, img_bytes)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/decode_png_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 summary ops from summary_ops_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import six
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine.sequential import Sequential
from tensorflow.python.keras.engine.training import Model
from tensorflow.python.keras.layers.core import Activation
from tensorflow.python.keras.layers.core import Dense
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class SummaryOpsCoreTest(test_util.TensorFlowTestCase):
def testWrite(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write('tag', 42, step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
value = events[1].summary.value[0]
self.assertEqual('tag', value.tag)
self.assertEqual(42, to_numpy(value))
def testWrite_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
return summary_ops.write('tag', 42, step=12)
output = f()
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
value = events[1].summary.value[0]
self.assertEqual('tag', value.tag)
self.assertEqual(42, to_numpy(value))
def testWrite_metadata(self):
logdir = self.get_temp_dir()
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = 'foo'
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('obj', 0, 0, metadata=metadata)
summary_ops.write('bytes', 0, 0, metadata=metadata.SerializeToString())
m = constant_op.constant(metadata.SerializeToString())
summary_ops.write('string_tensor', 0, 0, metadata=m)
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(metadata, events[1].summary.value[0].metadata)
self.assertEqual(metadata, events[2].summary.value[0].metadata)
self.assertEqual(metadata, events[3].summary.value[0].metadata)
def testWrite_name(self):
@def_function.function
def f():
output = summary_ops.write('tag', 42, step=12, name='anonymous')
self.assertTrue(output.name.startswith('anonymous'))
f()
def testWrite_ndarray(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', [[1, 2], [3, 4]], step=12)
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual([[1, 2], [3, 4]], to_numpy(value))
def testWrite_tensor(self):
logdir = self.get_temp_dir()
with context.eager_mode():
t = constant_op.constant([[1, 2], [3, 4]])
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', t, step=12)
expected = t.numpy()
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual(expected, to_numpy(value))
def testWrite_tensor_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f(t):
with writer.as_default():
summary_ops.write('tag', t, step=12)
t = constant_op.constant([[1, 2], [3, 4]])
f(t)
expected = t.numpy()
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual(expected, to_numpy(value))
def testWrite_stringTensor(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', [b'foo', b'bar'], step=12)
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual([b'foo', b'bar'], to_numpy(value))
@test_util.run_gpu_only
def testWrite_gpuDeviceContext(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer(logdir).as_default():
with ops.device('/GPU:0'):
value = constant_op.constant(42.0)
step = constant_op.constant(12, dtype=dtypes.int64)
summary_ops.write('tag', value, step=step).numpy()
empty_metadata = summary_pb2.SummaryMetadata()
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertEqual(42, to_numpy(events[1].summary.value[0]))
self.assertEqual(empty_metadata, events[1].summary.value[0].metadata)
@test_util.also_run_as_tf_function
def testWrite_noDefaultWriter(self):
# Use assertAllEqual instead of assertFalse since it works in a defun.
self.assertAllEqual(False, summary_ops.write('tag', 42, step=0))
@test_util.also_run_as_tf_function
def testWrite_noStep_okayIfAlsoNoDefaultWriter(self):
# Use assertAllEqual instead of assertFalse since it works in a defun.
self.assertAllEqual(False, summary_ops.write('tag', 42))
@test_util.also_run_as_tf_function
def testWrite_noStep(self):
logdir = self.get_temp_dir()
with summary_ops.create_file_writer(logdir).as_default():
with self.assertRaisesRegex(ValueError, 'No step set'):
summary_ops.write('tag', 42)
def testWrite_usingDefaultStep(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
with summary_ops.create_file_writer(logdir).as_default():
summary_ops.set_step(1)
summary_ops.write('tag', 1.0)
summary_ops.set_step(2)
summary_ops.write('tag', 1.0)
mystep = variables.Variable(10, dtype=dtypes.int64)
summary_ops.set_step(mystep)
summary_ops.write('tag', 1.0)
mystep.assign_add(1)
summary_ops.write('tag', 1.0)
events = events_from_logdir(logdir)
self.assertEqual(5, len(events))
self.assertEqual(1, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(10, events[3].step)
self.assertEqual(11, events[4].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepConstant_fromFunction(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer(logdir)
@def_function.function
def f():
with writer.as_default():
summary_ops.write('tag', 1.0)
summary_ops.set_step(1)
f()
summary_ops.set_step(2)
f()
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual(1, events[1].step)
# The step value will still be 1 because the value was captured at the
# time the function was first traced.
self.assertEqual(1, events[2].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromFunction(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer(logdir)
@def_function.function
def f():
with writer.as_default():
summary_ops.write('tag', 1.0)
mystep = variables.Variable(0, dtype=dtypes.int64)
summary_ops.set_step(mystep)
f()
mystep.assign_add(1)
f()
mystep.assign(10)
f()
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(1, events[2].step)
self.assertEqual(10, events[3].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepConstant_fromLegacyGraph(self):
logdir = self.get_temp_dir()
try:
with context.graph_mode():
writer = summary_ops.create_file_writer(logdir)
summary_ops.set_step(1)
with writer.as_default():
write_op = summary_ops.write('tag', 1.0)
summary_ops.set_step(2)
with self.cached_session() as sess:
sess.run(writer.init())
sess.run(write_op)
sess.run(write_op)
sess.run(writer.flush())
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual(1, events[1].step)
# The step value will still be 1 because the value was captured at the
# time the graph was constructed.
self.assertEqual(1, events[2].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromLegacyGraph(self):
logdir = self.get_temp_dir()
try:
with context.graph_mode():
writer = summary_ops.create_file_writer(logdir)
mystep = variables.Variable(0, dtype=dtypes.int64)
summary_ops.set_step(mystep)
with writer.as_default():
write_op = summary_ops.write('tag', 1.0)
first_assign_op = mystep.assign_add(1)
second_assign_op = mystep.assign(10)
with self.cached_session() as sess:
sess.run(writer.init())
sess.run(mystep.initializer)
sess.run(write_op)
sess.run(first_assign_op)
sess.run(write_op)
sess.run(second_assign_op)
sess.run(write_op)
sess.run(writer.flush())
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(1, events[2].step)
self.assertEqual(10, events[3].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_recordIf_constant(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
self.assertTrue(summary_ops.write('default', 1, step=0))
with summary_ops.record_if(True):
self.assertTrue(summary_ops.write('set_on', 1, step=0))
with summary_ops.record_if(False):
self.assertFalse(summary_ops.write('set_off', 1, step=0))
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual('default', events[1].summary.value[0].tag)
self.assertEqual('set_on', events[2].summary.value[0].tag)
def testWrite_recordIf_constant_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
# Use assertAllEqual instead of assertTrue since it works in a defun.
self.assertAllEqual(summary_ops.write('default', 1, step=0), True)
with summary_ops.record_if(True):
self.assertAllEqual(summary_ops.write('set_on', 1, step=0), True)
with summary_ops.record_if(False):
self.assertAllEqual(summary_ops.write('set_off', 1, step=0), False)
f()
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual('default', events[1].summary.value[0].tag)
self.assertEqual('set_on', events[2].summary.value[0].tag)
def testWrite_recordIf_callable(self):
logdir = self.get_temp_dir()
with context.eager_mode():
step = variables.Variable(-1, dtype=dtypes.int64)
def record_fn():
step.assign_add(1)
return int(step % 2) == 0
with summary_ops.create_file_writer_v2(logdir).as_default():
with summary_ops.record_if(record_fn):
self.assertTrue(summary_ops.write('tag', 1, step=step))
self.assertFalse(summary_ops.write('tag', 1, step=step))
self.assertTrue(summary_ops.write('tag', 1, step=step))
self.assertFalse(summary_ops.write('tag', 1, step=step))
self.assertTrue(summary_ops.write('tag', 1, step=step))
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWrite_recordIf_callable_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
step = variables.Variable(-1, dtype=dtypes.int64)
@def_function.function
def record_fn():
step.assign_add(1)
return math_ops.equal(step % 2, 0)
@def_function.function
def f():
with writer.as_default():
with summary_ops.record_if(record_fn):
return [
summary_ops.write('tag', 1, step=step),
summary_ops.write('tag', 1, step=step),
summary_ops.write('tag', 1, step=step)]
self.assertAllEqual(f(), [True, False, True])
self.assertAllEqual(f(), [False, True, False])
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWrite_recordIf_tensorInput_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int64)])
def f(step):
with writer.as_default():
with summary_ops.record_if(math_ops.equal(step % 2, 0)):
return summary_ops.write('tag', 1, step=step)
self.assertTrue(f(0))
self.assertFalse(f(1))
self.assertTrue(f(2))
self.assertFalse(f(3))
self.assertTrue(f(4))
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWriteRawPb(self):
logdir = self.get_temp_dir()
pb = summary_pb2.Summary()
pb.value.add().simple_value = 42.0
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write_raw_pb(pb.SerializeToString(), step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertProtoEquals(pb, events[1].summary)
def testWriteRawPb_fromFunction(self):
logdir = self.get_temp_dir()
pb = summary_pb2.Summary()
pb.value.add().simple_value = 42.0
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
return summary_ops.write_raw_pb(pb.SerializeToString(), step=12)
output = f()
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertProtoEquals(pb, events[1].summary)
def testWriteRawPb_multipleValues(self):
logdir = self.get_temp_dir()
pb1 = summary_pb2.Summary()
pb1.value.add().simple_value = 1.0
pb1.value.add().simple_value = 2.0
pb2 = summary_pb2.Summary()
pb2.value.add().simple_value = 3.0
pb3 = summary_pb2.Summary()
pb3.value.add().simple_value = 4.0
pb3.value.add().simple_value = 5.0
pb3.value.add().simple_value = 6.0
pbs = [pb.SerializeToString() for pb in (pb1, pb2, pb3)]
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write_raw_pb(pbs, step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
expected_pb = summary_pb2.Summary()
for i in range(6):
expected_pb.value.add().simple_value = i + 1.0
self.assertProtoEquals(expected_pb, events[1].summary)
def testWriteRawPb_invalidValue(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
with self.assertRaisesRegex(
errors.DataLossError,
'Bad tf.compat.v1.Summary binary proto tensor string'):
summary_ops.write_raw_pb('notaproto', step=12)
@test_util.also_run_as_tf_function
def testGetSetStep(self):
try:
self.assertIsNone(summary_ops.get_step())
summary_ops.set_step(1)
# Use assertAllEqual instead of assertEqual since it works in a defun.
self.assertAllEqual(1, summary_ops.get_step())
summary_ops.set_step(constant_op.constant(2))
self.assertAllEqual(2, summary_ops.get_step())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testGetSetStep_variable(self):
with context.eager_mode():
try:
mystep = variables.Variable(0)
summary_ops.set_step(mystep)
self.assertAllEqual(0, summary_ops.get_step().read_value())
mystep.assign_add(1)
self.assertAllEqual(1, summary_ops.get_step().read_value())
# Check that set_step() properly maintains reference to variable.
del mystep
self.assertAllEqual(1, summary_ops.get_step().read_value())
summary_ops.get_step().assign_add(1)
self.assertAllEqual(2, summary_ops.get_step().read_value())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testGetSetStep_variable_fromFunction(self):
with context.eager_mode():
try:
@def_function.function
def set_step(step):
summary_ops.set_step(step)
return summary_ops.get_step()
@def_function.function
def get_and_increment():
summary_ops.get_step().assign_add(1)
return summary_ops.get_step()
mystep = variables.Variable(0)
self.assertAllEqual(0, set_step(mystep))
self.assertAllEqual(0, summary_ops.get_step().read_value())
self.assertAllEqual(1, get_and_increment())
self.assertAllEqual(2, get_and_increment())
# Check that set_step() properly maintains reference to variable.
del mystep
self.assertAllEqual(3, get_and_increment())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.also_run_as_tf_function
def testSummaryScope(self):
with summary_ops.summary_scope('foo') as (tag, scope):
self.assertEqual('foo', tag)
self.assertEqual('foo/', scope)
with summary_ops.summary_scope('bar') as (tag, scope):
self.assertEqual('foo/bar', tag)
self.assertEqual('foo/bar/', scope)
with summary_ops.summary_scope('with/slash') as (tag, scope):
self.assertEqual('foo/with/slash', tag)
self.assertEqual('foo/with/slash/', scope)
with ops.name_scope(None):
with summary_ops.summary_scope('unnested') as (tag, scope):
self.assertEqual('unnested', tag)
self.assertEqual('unnested/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_defaultName(self):
with summary_ops.summary_scope(None) as (tag, scope):
self.assertEqual('summary', tag)
self.assertEqual('summary/', scope)
with summary_ops.summary_scope(None, 'backup') as (tag, scope):
self.assertEqual('backup', tag)
self.assertEqual('backup/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_handlesCharactersIllegalForScope(self):
with summary_ops.summary_scope('f?o?o') as (tag, scope):
self.assertEqual('f?o?o', tag)
self.assertEqual('foo/', scope)
# If all characters aren't legal for a scope name, use default name.
with summary_ops.summary_scope('???', 'backup') as (tag, scope):
self.assertEqual('???', tag)
self.assertEqual('backup/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_nameNotUniquifiedForTag(self):
constant_op.constant(0, name='foo')
with summary_ops.summary_scope('foo') as (tag, _):
self.assertEqual('foo', tag)
with summary_ops.summary_scope('foo') as (tag, _):
self.assertEqual('foo', tag)
with ops.name_scope('with'):
constant_op.constant(0, name='slash')
with summary_ops.summary_scope('with/slash') as (tag, _):
self.assertEqual('with/slash', tag)
def testAllV2SummaryOps(self):
logdir = self.get_temp_dir()
def define_ops():
result = []
# TF 2.0 summary ops
result.append(summary_ops.write('write', 1, step=0))
result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb'))
# TF 1.x tf.contrib.summary ops
result.append(summary_ops.generic('tensor', 1, step=1))
result.append(summary_ops.scalar('scalar', 2.0, step=1))
result.append(summary_ops.histogram('histogram', [1.0], step=1))
result.append(summary_ops.image('image', [[[[1.0]]]], step=1))
result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1))
return result
with context.graph_mode():
ops_without_writer = define_ops()
with summary_ops.create_file_writer_v2(logdir).as_default():
with summary_ops.record_if(True):
ops_recording_on = define_ops()
with summary_ops.record_if(False):
ops_recording_off = define_ops()
# We should be collecting all ops defined with a default writer present,
# regardless of whether recording was set on or off, but not those defined
# without a writer at all.
del ops_without_writer
expected_ops = ops_recording_on + ops_recording_off
self.assertCountEqual(expected_ops, summary_ops.all_v2_summary_ops())
class SummaryWriterTest(test_util.TensorFlowTestCase):
def testCreate_withInitAndClose(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=1000, flush_millis=1000000)
get_total = lambda: len(events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
# Calling init() again while writer is open has no effect
writer.init()
self.assertEqual(1, get_total())
with writer.as_default():
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
# Calling .close() should do an implicit flush
writer.close()
self.assertEqual(2, get_total())
def testCreate_fromFunction(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
# Returned SummaryWriter must be stored in a non-local variable so it
# lives throughout the function execution.
if not hasattr(f, 'writer'):
f.writer = summary_ops.create_file_writer_v2(logdir)
with context.eager_mode():
f()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
def testCreate_graphTensorArgument_raisesError(self):
logdir = self.get_temp_dir()
with context.graph_mode():
logdir_tensor = constant_op.constant(logdir)
with context.eager_mode():
with self.assertRaisesRegex(
ValueError, 'Invalid graph Tensor argument.*logdir'):
summary_ops.create_file_writer_v2(logdir_tensor)
self.assertEmpty(gfile.Glob(os.path.join(logdir, '*')))
def testCreate_fromFunction_graphTensorArgument_raisesError(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
summary_ops.create_file_writer_v2(constant_op.constant(logdir))
with context.eager_mode():
with self.assertRaisesRegex(
ValueError, 'Invalid graph Tensor argument.*logdir'):
f()
self.assertEmpty(gfile.Glob(os.path.join(logdir, '*')))
def testCreate_fromFunction_unpersistedResource_raisesError(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
with summary_ops.create_file_writer_v2(logdir).as_default():
pass # Calling .as_default() is enough to indicate use.
with context.eager_mode():
# TODO(nickfelt): change this to a better error
with self.assertRaisesRegex(
errors.NotFoundError, 'Resource.*does not exist'):
f()
# Even though we didn't use it, an event file will have been created.
self.assertEqual(1, len(gfile.Glob(os.path.join(logdir, '*'))))
def testCreate_immediateSetAsDefault_retainsReference(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
summary_ops.create_file_writer_v2(logdir).set_as_default()
summary_ops.flush()
finally:
# Ensure we clean up no matter how the test executes.
context.context().summary_writer_resource = None
def testCreate_immediateAsDefault_retainsReference(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.flush()
def testNoSharing(self):
# Two writers with the same logdir should not share state.
logdir = self.get_temp_dir()
with context.eager_mode():
writer1 = summary_ops.create_file_writer_v2(logdir)
with writer1.as_default():
summary_ops.write('tag', 1, step=1)
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
file1 = event_files[0]
writer2 = summary_ops.create_file_writer_v2(logdir)
with writer2.as_default():
summary_ops.write('tag', 1, step=2)
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(2, len(event_files))
event_files.remove(file1)
file2 = event_files[0]
# Extra writes to ensure interleaved usage works.
with writer1.as_default():
summary_ops.write('tag', 1, step=1)
with writer2.as_default():
summary_ops.write('tag', 1, step=2)
events = iter(events_from_file(file1))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(1, next(events).step)
self.assertEqual(1, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
events = iter(events_from_file(file2))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(2, next(events).step)
self.assertEqual(2, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
def testNoSharing_fromFunction(self):
logdir = self.get_temp_dir()
@def_function.function
def f1():
if not hasattr(f1, 'writer'):
f1.writer = summary_ops.create_file_writer_v2(logdir)
with f1.writer.as_default():
summary_ops.write('tag', 1, step=1)
@def_function.function
def f2():
if not hasattr(f2, 'writer'):
f2.writer = summary_ops.create_file_writer_v2(logdir)
with f2.writer.as_default():
summary_ops.write('tag', 1, step=2)
with context.eager_mode():
f1()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
file1 = event_files[0]
f2()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(2, len(event_files))
event_files.remove(file1)
file2 = event_files[0]
# Extra writes to ensure interleaved usage works.
f1()
f2()
events = iter(events_from_file(file1))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(1, next(events).step)
self.assertEqual(1, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
events = iter(events_from_file(file2))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(2, next(events).step)
self.assertEqual(2, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
def testMaxQueue(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(
logdir, max_queue=1, flush_millis=999999).as_default():
get_total = lambda: len(events_from_logdir(logdir))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
summary_ops.write('tag', 1, step=0)
self.assertEqual(3, get_total())
def testWriterFlush(self):
logdir = self.get_temp_dir()
get_total = lambda: len(events_from_logdir(logdir))
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=1000, flush_millis=1000000)
self.assertEqual(1, get_total()) # file_version Event
with writer.as_default():
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
writer.flush()
self.assertEqual(2, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(2, get_total())
# Exiting the "as_default()" should do an implicit flush
self.assertEqual(3, get_total())
def testFlushFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=999999, flush_millis=999999)
with writer.as_default():
get_total = lambda: len(events_from_logdir(logdir))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.write('tag', 1, step=0)
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
summary_ops.flush()
self.assertEqual(3, get_total())
# Test "writer" parameter
summary_ops.write('tag', 1, step=0)
self.assertEqual(3, get_total())
summary_ops.flush(writer=writer)
self.assertEqual(4, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(4, get_total())
summary_ops.flush(writer=writer._resource) # pylint:disable=protected-access
self.assertEqual(5, get_total())
@test_util.assert_no_new_tensors
def testNoMemoryLeak_graphMode(self):
logdir = self.get_temp_dir()
with context.graph_mode(), ops.Graph().as_default():
summary_ops.create_file_writer_v2(logdir)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNoMemoryLeak_eagerMode(self):
logdir = self.get_temp_dir()
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', 1, step=0)
def testClose_preventsLaterUse(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
writer.close()
writer.close() # redundant close() is a no-op
writer.flush() # redundant flush() is a no-op
with self.assertRaisesRegex(RuntimeError, 'already closed'):
writer.init()
with self.assertRaisesRegex(RuntimeError, 'already closed'):
with writer.as_default():
self.fail('should not get here')
with self.assertRaisesRegex(RuntimeError, 'already closed'):
writer.set_as_default()
def testClose_closesOpenFile(self):
try:
import psutil # pylint: disable=g-import-not-at-top
except ImportError:
raise unittest.SkipTest('test requires psutil')
proc = psutil.Process()
get_open_filenames = lambda: set(info[0] for info in proc.open_files())
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(files))
eventfile = files[0]
self.assertIn(eventfile, get_open_filenames())
writer.close()
self.assertNotIn(eventfile, get_open_filenames())
def testDereference_closesOpenFile(self):
try:
import psutil # pylint: disable=g-import-not-at-top
except ImportError:
raise unittest.SkipTest('test requires psutil')
proc = psutil.Process()
get_open_filenames = lambda: set(info[0] for info in proc.open_files())
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(files))
eventfile = files[0]
self.assertIn(eventfile, get_open_filenames())
del writer
self.assertNotIn(eventfile, get_open_filenames())
class SummaryOpsTest(test_util.TensorFlowTestCase):
def tearDown(self):
summary_ops.trace_off()
def run_metadata(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.run_metadata(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def run_metadata_graphs(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.run_metadata_graphs(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def create_run_metadata(self):
step_stats = step_stats_pb2.StepStats(dev_stats=[
step_stats_pb2.DeviceStepStats(
device='cpu:0',
node_stats=[step_stats_pb2.NodeExecStats(node_name='hello')])
])
return config_pb2.RunMetadata(
function_graphs=[
config_pb2.RunMetadata.FunctionGraphs(
pre_optimization_graph=graph_pb2.GraphDef(
node=[node_def_pb2.NodeDef(name='foo')]))
],
step_stats=step_stats)
def keras_model(self, *args, **kwargs):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.keras_model(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
# The first event contains no summary values. The written content goes to
# the second event.
return events[1]
def run_trace(self, f, step=1):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
summary_ops.trace_on(graph=True, profiler=False)
with writer.as_default():
f()
summary_ops.trace_export(name='foo', step=step)
writer.close()
events = events_from_logdir(logdir)
return events[1]
@test_util.run_v2_only
def testRunMetadata_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo'):
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadata_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadata_wholeRunMetadata(self):
expected_run_metadata = """
step_stats {
dev_stats {
device: "cpu:0"
node_stats {
node_name: "hello"
}
}
}
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadata_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testRunMetadataGraph_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo'):
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadataGraph_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata_graph"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_runMetadataFragment(self):
expected_run_metadata = """
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata_graphs(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testKerasModel(self):
model = Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
event = self.keras_model(name='my_name', data=model, step=1)
first_val = event.summary.value[0]
self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode())
@test_util.run_v2_only
def testKerasModel_usesDefaultStep(self):
model = Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
try:
summary_ops.set_step(42)
event = self.keras_model(name='my_name', data=model)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testKerasModel_subclass(self):
class SimpleSubclass(Model):
def __init__(self):
super(SimpleSubclass, self).__init__(name='subclass')
self.dense = Dense(10, input_shape=(100,))
self.activation = Activation('relu', name='my_relu')
def call(self, inputs):
x = self.dense(inputs)
return self.activation(x)
model = SimpleSubclass()
with test.mock.patch.object(logging, 'warn') as mock_log:
self.assertFalse(
summary_ops.keras_model(name='my_name', data=model, step=1))
self.assertRegexpMatches(
str(mock_log.call_args), 'Model failed to serialize as JSON.')
@test_util.run_v2_only
def testKerasModel_otherExceptions(self):
model = Sequential()
with test.mock.patch.object(model, 'to_json') as mock_to_json:
with test.mock.patch.object(logging, 'warn') as mock_log:
mock_to_json.side_effect = Exception('oops')
self.assertFalse(
summary_ops.keras_model(name='my_name', data=model, step=1))
self.assertRegexpMatches(
str(mock_log.call_args),
'Model failed to serialize as JSON. Ignoring... oops')
@test_util.run_v2_only
def testTrace(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
event = self.run_trace(f)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
# Content of function_graphs is large and, for instance, device can change.
self.assertTrue(hasattr(actual_run_metadata, 'function_graphs'))
@test_util.run_v2_only
def testTrace_cannotEnableTraceInFunction(self):
@def_function.function
def f():
summary_ops.trace_on(graph=True, profiler=False)
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegexpMatches(
str(mock_log.call_args), 'Cannot enable trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotEnableTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_on(graph=True, profiler=False)
self.assertRegexpMatches(
str(mock_log.call_args), 'Must enable trace in eager mode.')
@test_util.run_v2_only
def testTrace_cannotExportTraceWithoutTrace(self):
with six.assertRaisesRegex(self, ValueError,
'Must enable trace before export.'):
summary_ops.trace_export(name='foo', step=1)
@test_util.run_v2_only
def testTrace_cannotExportTraceInFunction(self):
summary_ops.trace_on(graph=True, profiler=False)
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
summary_ops.trace_export(name='foo', step=1)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegexpMatches(
str(mock_log.call_args),
'Cannot export trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotExportTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_export(name='foo', step=1)
self.assertRegexpMatches(
str(mock_log.call_args),
'Can only export trace while executing eagerly.')
@test_util.run_v2_only
def testTrace_usesDefaultStep(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
try:
summary_ops.set_step(42)
event = self.run_trace(f, step=None)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
def to_numpy(summary_value):
return tensor_util.MakeNdarray(summary_value.tensor)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/summary_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for set_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sets
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import googletest
_DTYPES = set([
dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8,
dtypes.uint16, dtypes.string
])
def _values(values, dtype):
return np.array(
values,
dtype=(np.unicode if (dtype == dtypes.string) else dtype.as_numpy_dtype))
def _constant(values, dtype):
return constant_op.constant(_values(values, dtype), dtype=dtype)
def _dense_to_sparse(dense, dtype):
indices = []
values = []
max_row_len = 0
for row in dense:
max_row_len = max(max_row_len, len(row))
shape = [len(dense), max_row_len]
row_ix = 0
for row in dense:
col_ix = 0
for cell in row:
indices.append([row_ix, col_ix])
values.append(str(cell) if dtype == dtypes.string else cell)
col_ix += 1
row_ix += 1
return sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(values, dtype),
constant_op.constant(shape, dtypes.int64))
class SetOpsTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def test_set_size_2d(self):
for dtype in _DTYPES:
self._test_set_size_2d(dtype)
def _test_set_size_2d(self, dtype):
self.assertAllEqual([1], self._set_size(_dense_to_sparse([[1]], dtype)))
self.assertAllEqual([2, 1],
self._set_size(_dense_to_sparse([[1, 9], [1]], dtype)))
self.assertAllEqual(
[3, 0], self._set_size(_dense_to_sparse([[1, 9, 2], []], dtype)))
self.assertAllEqual(
[0, 3], self._set_size(_dense_to_sparse([[], [1, 9, 2]], dtype)))
@test_util.run_deprecated_v1
def test_set_size_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_size_duplicates_2d(dtype)
def _test_set_size_duplicates_2d(self, dtype):
self.assertAllEqual(
[1], self._set_size(_dense_to_sparse([[1, 1, 1, 1, 1, 1]], dtype)))
self.assertAllEqual([2, 7, 3, 0, 1],
self._set_size(
_dense_to_sparse([[1, 9], [
6, 7, 8, 8, 6, 7, 5, 3, 3, 0, 6, 6, 9, 0, 0, 0
], [999, 1, -1000], [], [-1]], dtype)))
@test_util.run_deprecated_v1
def test_set_size_3d(self):
for dtype in _DTYPES:
self._test_set_size_3d(dtype)
def test_set_size_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_set_size_3d(dtype, invalid_indices=True)
def _test_set_size_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = constant_op.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[0, 0, 0], [0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
], dtypes.int64)
else:
indices = constant_op.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
], dtypes.int64)
sp = sparse_tensor_lib.SparseTensor(
indices,
_constant([
1, 9, # 0,0
3, 3, # 0,1
1, # 1,0
9, 7, 8, # 1,1
# 2,0
5 # 2,1
], dtype),
constant_op.constant([3, 2, 3], dtypes.int64))
if invalid_indices:
with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
self._set_size(sp)
else:
self.assertAllEqual([
[2, # 0,0
1], # 0,1
[1, # 1,0
3], # 1,1
[0, # 2,0
1] # 2,1
], self._set_size(sp))
def _set_size(self, sparse_data):
# Validate that we get the same results with or without `validate_indices`.
ops = [
sets.set_size(sparse_data, validate_indices=True),
sets.set_size(sparse_data, validate_indices=False)
]
for op in ops:
self.assertEqual(None, op.get_shape().dims)
self.assertEqual(dtypes.int32, op.dtype)
with self.cached_session() as sess:
results = self.evaluate(ops)
self.assertAllEqual(results[0], results[1])
return results[0]
@test_util.run_deprecated_v1
def test_set_intersection_multirow_2d(self):
for dtype in _DTYPES:
self._test_set_intersection_multirow_2d(dtype)
def _test_set_intersection_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1]]
expected_indices = [[0, 0], [0, 1]]
expected_values = _values([1, 9], dtype)
expected_shape = [2, 2]
expected_counts = [2, 0]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
intersection = self._set_intersection(a, sp_b)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
intersection,
dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
intersection = self._set_intersection(sp_a, sp_b)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
intersection,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
@test_util.run_deprecated_v1
def test_dense_set_intersection_multirow_2d(self):
for dtype in _DTYPES:
self._test_dense_set_intersection_multirow_2d(dtype)
def _test_dense_set_intersection_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1, 5]]
expected_indices = [[0, 0], [0, 1]]
expected_values = _values([1, 9], dtype)
expected_shape = [2, 2]
expected_counts = [2, 0]
# Dense to dense.
a = _constant(a_values, dtype)
b = _constant(b_values, dtype)
intersection = self._set_intersection(a, b)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
intersection,
dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
@test_util.run_deprecated_v1
def test_set_intersection_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_intersection_duplicates_2d(dtype)
def _test_set_intersection_duplicates_2d(self, dtype):
a_values = [[1, 1, 3]]
b_values = [[1]]
expected_indices = [[0, 0]]
expected_values = _values([1], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to dense.
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
intersection = self._set_intersection(a, b)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
intersection,
dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
# Dense to sparse.
sp_b = _dense_to_sparse(b_values, dtype=dtype)
intersection = self._set_intersection(a, sp_b)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
intersection,
dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
intersection = self._set_intersection(sp_a, sp_b)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
intersection,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
@test_util.run_deprecated_v1
def test_set_intersection_3d(self):
for dtype in _DTYPES:
self._test_set_intersection_3d(dtype=dtype)
def test_set_intersection_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_set_intersection_3d(dtype=dtype, invalid_indices=True)
def _test_set_intersection_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = constant_op.constant(
[
[0, 1, 0],
[0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0],
[1, 1, 1],
[1, 1, 2], # 1,1
[0, 0, 0],
[0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
# 3,*
],
dtypes.int64)
else:
indices = constant_op.constant(
[
[0, 0, 0],
[0, 0, 2], # 0,0
[0, 1, 0],
[0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0],
[1, 1, 1],
[1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
],
dtypes.int64)
sp_a = sparse_tensor_lib.SparseTensor(
indices,
_constant(
[
1,
9, # 0,0
3,
3, # 0,1
1, # 1,0
9,
7,
8, # 1,1
# 2,0
5 # 2,1
# 3,*
],
dtype),
constant_op.constant([4, 2, 3], dtypes.int64))
sp_b = sparse_tensor_lib.SparseTensor(
constant_op.constant(
[
[0, 0, 0],
[0, 0, 3], # 0,0
# 0,1
[1, 0, 0], # 1,0
[1, 1, 0],
[1, 1, 1], # 1,1
[2, 0, 1], # 2,0
[2, 1, 1], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
],
dtypes.int64),
_constant(
[
1,
3, # 0,0
# 0,1
3, # 1,0
7,
8, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4 # 3,1
],
dtype),
constant_op.constant([4, 2, 4], dtypes.int64))
if invalid_indices:
with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
self._set_intersection(sp_a, sp_b)
else:
expected_indices = [
[0, 0, 0], # 0,0
# 0,1
# 1,0
[1, 1, 0],
[1, 1, 1], # 1,1
# 2,0
[2, 1, 0], # 2,1
# 3,*
]
expected_values = _values(
[
1, # 0,0
# 0,1
# 1,0
7,
8, # 1,1
# 2,0
5, # 2,1
# 3,*
],
dtype)
expected_shape = [4, 2, 2]
expected_counts = [
[
1, # 0,0
0 # 0,1
],
[
0, # 1,0
2 # 1,1
],
[
0, # 2,0
1 # 2,1
],
[
0, # 3,0
0 # 3,1
]
]
# Sparse to sparse.
intersection = self._set_intersection(sp_a, sp_b)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
intersection,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
# NOTE: sparse_to_dense doesn't support uint8 and uint16.
if dtype not in [dtypes.uint8, dtypes.uint16]:
# Dense to sparse.
a = math_ops.cast(
sparse_ops.sparse_to_dense(
sp_a.indices,
sp_a.dense_shape,
sp_a.values,
default_value="-1" if dtype == dtypes.string else -1),
dtype=dtype)
intersection = self._set_intersection(a, sp_b)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
intersection,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(a, sp_b))
# Dense to dense.
b = math_ops.cast(
sparse_ops.sparse_to_dense(
sp_b.indices,
sp_b.dense_shape,
sp_b.values,
default_value="-2" if dtype == dtypes.string else -2),
dtype=dtype)
intersection = self._set_intersection(a, b)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
intersection,
dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
def _assert_static_shapes(self, input_tensor, result_sparse_tensor):
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
sparse_shape_dims = input_tensor.dense_shape.get_shape().dims
if sparse_shape_dims is None:
expected_rank = None
else:
expected_rank = sparse_shape_dims[0].value
else:
expected_rank = input_tensor.get_shape().ndims
self.assertAllEqual((None, expected_rank),
result_sparse_tensor.indices.get_shape().as_list())
self.assertAllEqual((None,),
result_sparse_tensor.values.get_shape().as_list())
self.assertAllEqual((expected_rank,),
result_sparse_tensor.dense_shape.get_shape().as_list())
def _run_equivalent_set_ops(self, ops):
"""Assert all ops return the same shapes, and return 1st result."""
# Collect shapes and results for all ops, and assert static shapes match.
dynamic_indices_shape_ops = []
dynamic_values_shape_ops = []
static_indices_shape = None
static_values_shape = None
with self.cached_session() as sess:
for op in ops:
if static_indices_shape is None:
static_indices_shape = op.indices.get_shape()
else:
self.assertAllEqual(
static_indices_shape.as_list(), op.indices.get_shape().as_list())
if static_values_shape is None:
static_values_shape = op.values.get_shape()
else:
self.assertAllEqual(
static_values_shape.as_list(), op.values.get_shape().as_list())
dynamic_indices_shape_ops.append(array_ops.shape(op.indices))
dynamic_values_shape_ops.append(array_ops.shape(op.values))
results = sess.run(
list(ops) + dynamic_indices_shape_ops + dynamic_values_shape_ops)
op_count = len(ops)
op_results = results[0:op_count]
dynamic_indices_shapes = results[op_count:2 * op_count]
dynamic_values_shapes = results[2 * op_count:3 * op_count]
# Assert static and dynamic tensor shapes, and result shapes, are all
# consistent.
static_indices_shape.assert_is_compatible_with(dynamic_indices_shapes[0])
static_values_shape.assert_is_compatible_with(dynamic_values_shapes[0])
self.assertAllEqual(dynamic_indices_shapes[0], op_results[0].indices.shape)
self.assertAllEqual(dynamic_values_shapes[0], op_results[0].values.shape)
# Assert dynamic shapes and values are the same for all ops.
for i in range(1, len(ops)):
self.assertAllEqual(dynamic_indices_shapes[0], dynamic_indices_shapes[i])
self.assertAllEqual(dynamic_values_shapes[0], dynamic_values_shapes[i])
self.assertAllEqual(op_results[0].indices, op_results[i].indices)
self.assertAllEqual(op_results[0].values, op_results[i].values)
self.assertAllEqual(op_results[0].dense_shape, op_results[i].dense_shape)
return op_results[0]
def _set_intersection(self, a, b):
# Validate that we get the same results with or without `validate_indices`,
# and with a & b swapped.
ops = (
sets.set_intersection(
a, b, validate_indices=True),
sets.set_intersection(
a, b, validate_indices=False),
sets.set_intersection(
b, a, validate_indices=True),
sets.set_intersection(
b, a, validate_indices=False),)
for op in ops:
self._assert_static_shapes(a, op)
return self._run_equivalent_set_ops(ops)
def _set_intersection_count(self, a, b):
op = sets.set_size(sets.set_intersection(a, b))
with self.cached_session() as sess:
return self.evaluate(op)
@test_util.run_deprecated_v1
def test_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_set_difference_multirow_2d(dtype)
def _test_set_difference_multirow_2d(self, dtype):
a_values = [[1, 1, 1], [1, 5, 9], [4, 5, 3], [5, 5, 1]]
b_values = [[], [1, 2], [1, 2, 2], []]
# a - b.
expected_indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0],
[3, 1]]
expected_values = _values([1, 5, 9, 3, 4, 5, 1, 5], dtype)
expected_shape = [4, 3]
expected_counts = [1, 2, 3, 2]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
difference = self._set_difference(a, sp_b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, True))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, True))
# b - a.
expected_indices = [[1, 0], [2, 0], [2, 1]]
expected_values = _values([2, 1, 2], dtype)
expected_shape = [4, 2]
expected_counts = [0, 1, 2, 0]
# Dense to sparse.
difference = self._set_difference(a, sp_b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, False))
# Sparse to sparse.
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
@test_util.run_deprecated_v1
def test_dense_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_dense_set_difference_multirow_2d(dtype)
def _test_dense_set_difference_multirow_2d(self, dtype):
a_values = [[1, 5, 9], [4, 5, 3]]
b_values = [[1, 2, 6], [1, 2, 2]]
# a - b.
expected_indices = [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]]
expected_values = _values([5, 9, 3, 4, 5], dtype)
expected_shape = [2, 3]
expected_counts = [2, 3]
# Dense to dense.
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
difference = self._set_difference(a, b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts, self._set_difference_count(a, b, True))
# b - a.
expected_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
expected_values = _values([2, 6, 1, 2], dtype)
expected_shape = [2, 2]
expected_counts = [2, 2]
# Dense to dense.
difference = self._set_difference(a, b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, b, False))
@test_util.run_deprecated_v1
def test_sparse_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_multirow_2d(dtype)
def _test_sparse_set_difference_multirow_2d(self, dtype):
sp_a = _dense_to_sparse(
[[], [1, 5, 9], [4, 5, 3, 3, 4, 5], [5, 1]], dtype=dtype)
sp_b = _dense_to_sparse([[], [1, 2], [1, 2, 2], []], dtype=dtype)
# a - b.
expected_indices = [[1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1]]
expected_values = _values([5, 9, 3, 4, 5, 1, 5], dtype)
expected_shape = [4, 3]
expected_counts = [0, 2, 3, 2]
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, True))
# b - a.
expected_indices = [[1, 0], [2, 0], [2, 1]]
expected_values = _values([2, 1, 2], dtype)
expected_shape = [4, 2]
expected_counts = [0, 1, 2, 0]
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
@test_util.run_deprecated_v1
def test_set_difference_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_difference_duplicates_2d(dtype)
def _test_set_difference_duplicates_2d(self, dtype):
a_values = [[1, 1, 3]]
b_values = [[1, 2, 2]]
# a - b.
expected_indices = [[0, 0]]
expected_values = _values([3], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
difference = self._set_difference(a, sp_b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, True))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, True))
# b - a.
expected_indices = [[0, 0]]
expected_values = _values([2], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to sparse.
difference = self._set_difference(a, sp_b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, False))
# Sparse to sparse.
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, False))
@test_util.run_deprecated_v1
def test_sparse_set_difference_3d(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_3d(dtype)
def test_sparse_set_difference_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_3d(dtype, invalid_indices=True)
def _test_sparse_set_difference_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = constant_op.constant(
[
[0, 1, 0],
[0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0],
[1, 1, 1],
[1, 1, 2], # 1,1
[0, 0, 0],
[0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
# 3,*
],
dtypes.int64)
else:
indices = constant_op.constant(
[
[0, 0, 0],
[0, 0, 2], # 0,0
[0, 1, 0],
[0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0],
[1, 1, 1],
[1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
],
dtypes.int64)
sp_a = sparse_tensor_lib.SparseTensor(
indices,
_constant(
[
1,
9, # 0,0
3,
3, # 0,1
1, # 1,0
9,
7,
8, # 1,1
# 2,0
5 # 2,1
# 3,*
],
dtype),
constant_op.constant([4, 2, 3], dtypes.int64))
sp_b = sparse_tensor_lib.SparseTensor(
constant_op.constant(
[
[0, 0, 0],
[0, 0, 3], # 0,0
# 0,1
[1, 0, 0], # 1,0
[1, 1, 0],
[1, 1, 1], # 1,1
[2, 0, 1], # 2,0
[2, 1, 1], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
],
dtypes.int64),
_constant(
[
1,
3, # 0,0
# 0,1
3, # 1,0
7,
8, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4 # 3,1
],
dtype),
constant_op.constant([4, 2, 4], dtypes.int64))
if invalid_indices:
with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
self._set_difference(sp_a, sp_b, False)
with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
self._set_difference(sp_a, sp_b, True)
else:
# a-b
expected_indices = [
[0, 0, 0], # 0,0
[0, 1, 0], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], # 1,1
# 2,*
# 3,*
]
expected_values = _values(
[
9, # 0,0
3, # 0,1
1, # 1,0
9, # 1,1
# 2,*
# 3,*
],
dtype)
expected_shape = [4, 2, 1]
expected_counts = [
[
1, # 0,0
1 # 0,1
],
[
1, # 1,0
1 # 1,1
],
[
0, # 2,0
0 # 2,1
],
[
0, # 3,0
0 # 3,1
]
]
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b))
# b-a
expected_indices = [
[0, 0, 0], # 0,0
# 0,1
[1, 0, 0], # 1,0
# 1,1
[2, 0, 0], # 2,0
# 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
]
expected_values = _values(
[
3, # 0,0
# 0,1
3, # 1,0
# 1,1
2, # 2,0
# 2,1
4, # 3,0
4, # 3,1
],
dtype)
expected_shape = [4, 2, 1]
expected_counts = [
[
1, # 0,0
0 # 0,1
],
[
1, # 1,0
0 # 1,1
],
[
1, # 2,0
0 # 2,1
],
[
1, # 3,0
1 # 3,1
]
]
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
def _set_difference(self, a, b, aminusb=True):
# Validate that we get the same results with or without `validate_indices`,
# and with a & b swapped.
ops = (
sets.set_difference(
a, b, aminusb=aminusb, validate_indices=True),
sets.set_difference(
a, b, aminusb=aminusb, validate_indices=False),
sets.set_difference(
b, a, aminusb=not aminusb, validate_indices=True),
sets.set_difference(
b, a, aminusb=not aminusb, validate_indices=False),)
for op in ops:
self._assert_static_shapes(a, op)
return self._run_equivalent_set_ops(ops)
def _set_difference_count(self, a, b, aminusb=True):
op = sets.set_size(sets.set_difference(a, b, aminusb))
with self.cached_session() as sess:
return self.evaluate(op)
@test_util.run_deprecated_v1
def test_set_union_multirow_2d(self):
for dtype in _DTYPES:
self._test_set_union_multirow_2d(dtype)
def _test_set_union_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1]]
expected_indices = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [1, 3]]
expected_values = _values([1, 5, 9, 1, 2, 3, 4], dtype)
expected_shape = [2, 4]
expected_counts = [3, 4]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
union = self._set_union(a, sp_b)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
union = self._set_union(sp_a, sp_b)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(sp_a, sp_b))
@test_util.run_deprecated_v1
def test_dense_set_union_multirow_2d(self):
for dtype in _DTYPES:
self._test_dense_set_union_multirow_2d(dtype)
def _test_dense_set_union_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1, 2]]
expected_indices = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [1, 3]]
expected_values = _values([1, 5, 9, 1, 2, 3, 4], dtype)
expected_shape = [2, 4]
expected_counts = [3, 4]
# Dense to dense.
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
union = self._set_union(a, b)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(a, b))
@test_util.run_deprecated_v1
def test_set_union_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_union_duplicates_2d(dtype)
def _test_set_union_duplicates_2d(self, dtype):
a_values = [[1, 1, 3]]
b_values = [[1]]
expected_indices = [[0, 0], [0, 1]]
expected_values = _values([1, 3], dtype)
expected_shape = [1, 2]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
union = self._set_union(a, sp_b)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual([2], self._set_union_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
union = self._set_union(sp_a, sp_b)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual([2], self._set_union_count(sp_a, sp_b))
@test_util.run_deprecated_v1
def test_sparse_set_union_3d(self):
for dtype in _DTYPES:
self._test_sparse_set_union_3d(dtype)
def test_sparse_set_union_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_sparse_set_union_3d(dtype, invalid_indices=True)
def _test_sparse_set_union_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = constant_op.constant(
[
[0, 1, 0],
[0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[0, 0, 0],
[0, 0, 2], # 0,0
[1, 1, 0],
[1, 1, 1],
[1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
],
dtypes.int64)
else:
indices = constant_op.constant(
[
[0, 0, 0],
[0, 0, 2], # 0,0
[0, 1, 0],
[0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0],
[1, 1, 1],
[1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
],
dtypes.int64)
sp_a = sparse_tensor_lib.SparseTensor(
indices,
_constant(
[
1,
9, # 0,0
3,
3, # 0,1
1, # 1,0
9,
7,
8, # 1,1
# 2,0
5 # 2,1
# 3,*
],
dtype),
constant_op.constant([4, 2, 3], dtypes.int64))
sp_b = sparse_tensor_lib.SparseTensor(
constant_op.constant(
[
[0, 0, 0],
[0, 0, 3], # 0,0
# 0,1
[1, 0, 0], # 1,0
[1, 1, 0],
[1, 1, 1], # 1,1
[2, 0, 1], # 2,0
[2, 1, 1], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
],
dtypes.int64),
_constant(
[
1,
3, # 0,0
# 0,1
3, # 1,0
7,
8, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4 # 3,1
],
dtype),
constant_op.constant([4, 2, 4], dtypes.int64))
if invalid_indices:
with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
self._set_union(sp_a, sp_b)
else:
expected_indices = [
[0, 0, 0],
[0, 0, 1],
[0, 0, 2], # 0,0
[0, 1, 0], # 0,1
[1, 0, 0],
[1, 0, 1], # 1,0
[1, 1, 0],
[1, 1, 1],
[1, 1, 2], # 1,1
[2, 0, 0], # 2,0
[2, 1, 0], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0], # 3,1
]
expected_values = _values(
[
1,
3,
9, # 0,0
3, # 0,1
1,
3, # 1,0
7,
8,
9, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4, # 3,1
],
dtype)
expected_shape = [4, 2, 3]
expected_counts = [
[
3, # 0,0
1 # 0,1
],
[
2, # 1,0
3 # 1,1
],
[
1, # 2,0
1 # 2,1
],
[
1, # 3,0
1 # 3,1
]
]
intersection = self._set_union(sp_a, sp_b)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
intersection,
dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(sp_a, sp_b))
def _set_union(self, a, b):
# Validate that we get the same results with or without `validate_indices`,
# and with a & b swapped.
ops = (
sets.set_union(
a, b, validate_indices=True),
sets.set_union(
a, b, validate_indices=False),
sets.set_union(
b, a, validate_indices=True),
sets.set_union(
b, a, validate_indices=False),)
for op in ops:
self._assert_static_shapes(a, op)
return self._run_equivalent_set_ops(ops)
def _set_union_count(self, a, b):
op = sets.set_size(sets.set_union(a, b))
with self.cached_session() as sess:
return self.evaluate(op)
def _assert_set_operation(self, expected_indices, expected_values,
expected_shape, sparse_tensor_value, dtype):
self.assertAllEqual(expected_indices, sparse_tensor_value.indices)
self.assertAllEqual(len(expected_indices), len(expected_values))
self.assertAllEqual(len(expected_values), len(sparse_tensor_value.values))
expected_set = set()
actual_set = set()
last_indices = None
for indices, expected_value, actual_value in zip(
expected_indices, expected_values, sparse_tensor_value.values):
if dtype == dtypes.string:
actual_value = actual_value.decode("utf-8")
if last_indices and (last_indices[:-1] != indices[:-1]):
self.assertEqual(expected_set, actual_set,
"Expected %s, got %s, at %s." % (expected_set,
actual_set, indices))
expected_set.clear()
actual_set.clear()
expected_set.add(expected_value)
actual_set.add(actual_value)
last_indices = indices
self.assertEqual(expected_set, actual_set,
"Expected %s, got %s, at %s." % (expected_set, actual_set,
last_indices))
self.assertAllEqual(expected_shape, sparse_tensor_value.dense_shape)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sets_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for summary V1 audio op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
class SummaryV1AudioOpTest(test.TestCase):
def _AsSummary(self, s):
summ = summary_pb2.Summary()
summ.ParseFromString(s)
return summ
def _CheckProto(self, audio_summ, sample_rate, num_channels, length_frames):
"""Verify that the non-audio parts of the audio_summ proto match shape."""
# Only the first 3 sounds are returned.
for v in audio_summ.value:
v.audio.ClearField("encoded_audio_string")
expected = "\n".join("""
value {
tag: "snd/audio/%d"
audio { content_type: "audio/wav" sample_rate: %d
num_channels: %d length_frames: %d }
}""" % (i, sample_rate, num_channels, length_frames) for i in xrange(3))
self.assertProtoEquals(expected, audio_summ)
def testAudioSummary(self):
np.random.seed(7)
for channels in (1, 2, 5, 8):
with self.session(graph=ops.Graph()) as sess:
num_frames = 7
shape = (4, num_frames, channels)
# Generate random audio in the range [-1.0, 1.0).
const = 2.0 * np.random.random(shape) - 1.0
# Summarize
sample_rate = 8000
summ = summary.audio(
"snd", const, max_outputs=3, sample_rate=sample_rate)
value = self.evaluate(summ)
self.assertEqual([], summ.get_shape())
audio_summ = self._AsSummary(value)
# Check the rest of the proto
self._CheckProto(audio_summ, sample_rate, channels, num_frames)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/summary_v1_audio_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.nn_ops.Cross."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class CrossOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testGradientRandomValues(self):
with self.cached_session():
us = [2, 3]
u = array_ops.reshape(
[0.854, -0.616, 0.767, 0.725, -0.927, 0.159], shape=us)
v = array_ops.reshape(
[-0.522, 0.755, 0.407, -0.652, 0.241, 0.247], shape=us)
s = math_ops.cross(u, v)
jacob_u, jacob_v = gradient_checker.compute_gradient([u, v], [us, us], s,
us)
self.assertAllClose(jacob_u[0], jacob_u[1], rtol=1e-3, atol=1e-3)
self.assertAllClose(jacob_v[0], jacob_v[1], rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/cross_grad_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for scalar strictness and scalar leniency."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class ScalarTest(test.TestCase):
def check(self, op, args, error, correct=None):
# Within Google, the switch to scalar strict occurred at version 6.
lenient = []
strict = [5, 6]
# Use placeholders to bypass shape inference, since only the C++
# GraphDef level is ever scalar lenient.
def placeholders(args, feed):
if isinstance(args, tuple):
return [placeholders(x, feed) for x in args]
else:
x = ops.convert_to_tensor(args).eval()
fake = array_ops.placeholder(np.asarray(x).dtype)
feed[fake] = x
return fake
# Test various GraphDef versions
for version in strict + lenient:
with ops.Graph().as_default() as g:
test_util.set_producer_version(g, version)
with self.session(graph=g) as sess:
feed = {}
xs = placeholders(args, feed)
x = op(*xs)
if version in strict:
with self.assertRaisesOpError(error):
sess.run(x, feed_dict=feed)
else:
r = sess.run(x, feed_dict=feed)
if correct is not None:
self.assertAllEqual(r, correct)
def testConcat(self):
self.check(array_ops.concat, (([2], [3], [7]), [0]),
'axis tensor should be a scalar integer', [2, 3, 7])
for data in (2, 3, 7), (2, [3], 7), (2, 3, [7]):
self.check(array_ops.concat, (data, 0),
r'Expected \w+ dimensions in the range \[0, 0\)', [2, 3, 7])
for data in ([2], 3, 7), ([2], [3], 7):
self.check(array_ops.concat, (data, 0),
r'Ranks of all input tensors should match', [2, 3, 7])
def testFill(self):
self.check(array_ops.fill, (2, 3), 'dims must be a vector', [3, 3])
self.check(array_ops.fill, ([2], [3]), 'value must be a scalar', [3, 3])
def testPad(self):
self.check(array_ops.pad, (7, [[1, 2]]),
'The first dimension of paddings must be the rank of inputs',
[0, 7, 0, 0])
def testRandom(self):
self.check(random_ops.random_uniform, (3,), 'shape must be a vector')
def testReshape(self):
self.check(array_ops.reshape, (7, 1), 'sizes input must be 1-D', [7])
def testShardedFilename(self):
self.check(gen_io_ops.sharded_filename, ('foo', 4, [100]),
'must be a scalar', b'foo-00004-of-00100')
def testShardedFilespec(self):
self.check(gen_io_ops.sharded_filespec, ('foo', [100]), 'must be a scalar',
b'foo-?????-of-00100')
def testUnsortedSegmentSum(self):
self.check(math_ops.unsorted_segment_sum, (7, 1, [4]),
'num_segments should be a scalar', [0, 7, 0, 0])
def testRange(self):
self.check(math_ops.range, ([0], 3, 2), 'start must be a scalar', [0, 2])
self.check(math_ops.range, (0, [3], 2), 'limit must be a scalar', [0, 2])
self.check(math_ops.range, (0, 3, [2]), 'delta must be a scalar', [0, 2])
def testSlice(self):
data = np.arange(10)
error = 'Expected begin and size arguments to be 1-D tensors'
self.check(array_ops.slice, (data, 2, 3), error, [2, 3, 4])
self.check(array_ops.slice, (data, [2], 3), error, [2, 3, 4])
self.check(array_ops.slice, (data, 2, [3]), error, [2, 3, 4])
def testSparseToDense(self):
self.check(sparse_ops.sparse_to_dense, (1, 4, 7),
'output_shape should be a vector', [0, 7, 0, 0])
def testTile(self):
self.check(array_ops.tile, ([7], 2), 'Expected multiples to be 1-D', [7, 7])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/scalar_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test as test_lib
_MAXDIM = 5
def _add_test(test, test_name, fn):
test_name = "_".join(["test", test_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class TensordotTest(test_lib.TestCase):
@test_util.run_v1_only("b/120545219")
def test_invalid_shape(self):
a = [[1, 2], [3, 4]]
b = [[1, 2], [3, 4], [5, 6]]
a_axes = [1]
b_axes = [0]
# Invalid static shapes.
with self.assertRaises(ValueError):
math_ops.tensordot(a, b, (a_axes, b_axes))
# Invalid dynamic shapes.
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Matrix size-incompatible"):
a_ph = array_ops.placeholder(dtypes.float32)
b_ph = array_ops.placeholder(dtypes.float32)
axes_ph = array_ops.placeholder(dtypes.int32)
output = math_ops.tensordot(a_ph, b_ph, axes_ph)
_ = sess.run(
[output], feed_dict={
a_ph: a,
b_ph: b,
axes_ph: (a_axes, b_axes)
})
@test_util.run_v1_only("b/120545219")
def test_invalid_axes(self):
a = [[1, 2], [3, 4]]
b = [[1, 2], [3, 4]]
# Invalid static axes.
for axes_value in -1, 3, [1], [[1]], [[1], [0, 1]]:
with self.assertRaises(ValueError):
math_ops.tensordot(a, b, axes_value)
with self.assertRaises(IndexError):
math_ops.tensordot(a, b, [[0], [7]])
# Invalid dynamic axes.
a_ph = array_ops.placeholder(dtypes.float32)
b_ph = array_ops.placeholder(dtypes.float32)
axes_ph = array_ops.placeholder(dtypes.int32)
output = math_ops.tensordot(a_ph, b_ph, axes_ph)
# Note: We don't support scalar Tensor values for axes.
for axes_value in 1, [1], [0, 1], [[1]], [[0, 1]], [[0], [7]]:
with self.cached_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
_ = sess.run(
[output], feed_dict={
a_ph: a,
b_ph: b,
axes_ph: axes_value
})
# Test case for 11950
def test_valid_axis(self):
for axes_value in [1, 2], [[1], [2]], [[], []], 0:
with self.cached_session():
np_a = np.ones((3, 3))
np_b = np.array([2, 3, 1])[None, None]
np_ans = np.tensordot(np_a, np_b, axes_value)
tf_a = array_ops.ones((3, 3), dtype=dtypes.float32)
tf_b = constant_op.constant([2, 3, 1], dtype=dtypes.float32)[None, None]
tf_ans = math_ops.tensordot(tf_a, tf_b, axes_value)
self.assertAllEqual(tf_ans.shape, np_ans.shape)
self.assertAllEqual(tf_ans, np_ans)
@test_util.run_v1_only("b/120545219")
def test_partial_shape_inference(self):
for axes in ([1], [0]), 1:
a = array_ops.placeholder(dtypes.float32)
b = array_ops.placeholder(dtypes.float32)
output = math_ops.tensordot(a, b, axes)
self.assertEqual(output.get_shape().ndims, None)
a.set_shape([None, 2])
b.set_shape([2, 3])
output = math_ops.tensordot(a, b, axes)
output_shape = output.get_shape()
self.assertEqual(output_shape.ndims, 2)
output_shape = output_shape.as_list()
self.assertEqual(output_shape[0], None)
self.assertEqual(output_shape[1], 3)
a = array_ops.placeholder(dtypes.float32)
b = array_ops.placeholder(dtypes.float32)
a.set_shape([2, 2])
b.set_shape([2, None])
output = math_ops.tensordot(a, b, axes)
output_shape = output.get_shape()
self.assertEqual(output_shape.ndims, 2)
output_shape = output_shape.as_list()
self.assertEqual(output_shape[0], 2)
self.assertEqual(output_shape[1], None)
def _get_tensordot_tests(dtype_, rank_a_, rank_b_, num_dims_, dynamic_shape_):
# Select a random subset of size m from [0, 1, ..., n-1].
def _random_subset(m, n):
assert m <= n
return (np.random.permutation(n)[:m]).astype(np.int32)
def _generate_random_tensors_and_dims():
a_shape = np.random.random_integers(1, _MAXDIM, rank_a_)
b_shape = np.random.random_integers(1, _MAXDIM, rank_b_)
shared_shape = np.random.random_integers(1, _MAXDIM, num_dims_)
a_dims = _random_subset(num_dims_, rank_a_)
b_dims = _random_subset(num_dims_, rank_b_)
for i in range(num_dims_):
a_shape[a_dims[i]] = shared_shape[i]
b_shape[b_dims[i]] = shared_shape[i]
a = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(a_shape)).reshape(a_shape).astype(dtype_)
b = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(b_shape)).reshape(b_shape).astype(dtype_)
return a, b, a_dims, b_dims
def test_tensordot(self):
num_trials = min(30, num_dims_ * num_dims_)
if dtype_ == np.float16:
tol = 0.05
elif dtype_ == np.float32 or dtype_ == np.complex64:
tol = 1e-5
else:
tol = 1e-12
for _ in range(num_trials):
a_np, b_np, a_dims_np, b_dims_np = _generate_random_tensors_and_dims()
np_ans = np.tensordot(a_np, b_np, axes=(a_dims_np, b_dims_np))
with self.cached_session(use_gpu=True) as sess:
if dynamic_shape_:
a = array_ops.placeholder(dtype_)
b = array_ops.placeholder(dtype_)
axes = array_ops.placeholder(dtypes.int32)
c = math_ops.tensordot(a, b, axes)
tf_ans = sess.run(
c, feed_dict={
a: a_np,
b: b_np,
axes: (a_dims_np, b_dims_np)
})
else:
tf_ans = math_ops.tensordot(a_np, b_np, (a_dims_np, b_dims_np))
self.assertAllClose(tf_ans, np_ans, rtol=tol, atol=tol)
self.assertAllEqual(tf_ans.shape, np_ans.shape)
def test_tensordot_scalar_axes(self):
if num_dims_ < 1:
self.skipTest("Not a test")
if dtype_ == np.float16:
tol = 0.05
elif dtype_ == np.float32 or dtype_ == np.complex64:
tol = 1e-5
else:
tol = 1e-12
shape = [5] * num_dims_
a_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype_)
b_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype_)
all_axes = [0, 1]
if a_np.ndim > 2:
all_axes.append(a_np.ndim - 1)
for axes in all_axes:
np_ans = np.tensordot(a_np, b_np, axes=axes)
with self.cached_session(use_gpu=True) as sess:
if dynamic_shape_:
a = array_ops.placeholder(dtype_)
b = array_ops.placeholder(dtype_)
c = math_ops.tensordot(a, b, axes=axes)
tf_ans = sess.run(c, feed_dict={a: a_np, b: b_np})
else:
tf_ans = math_ops.tensordot(a_np, b_np, axes=axes)
self.assertAllClose(tf_ans, np_ans, rtol=tol, atol=tol)
self.assertAllEqual(tf_ans.shape, np_ans.shape)
return [test_tensordot, test_tensordot_scalar_axes]
if __name__ == "__main__":
for dtype in np.float16, np.float32, np.float64, np.complex64, np.complex128:
for rank_a in 1, 2, 4, 5:
for rank_b in 1, 2, 4, 5:
for num_dims in range(0, min(rank_a, rank_b) + 1):
# TF2 does not support placeholders under eager so we skip it
for dynamic_shape in set([False, not tf2.enabled()]):
for testcase in _get_tensordot_tests(dtype, rank_a, rank_b,
num_dims, dynamic_shape):
name = "%s_%s_%s_%s_%s_%s" % (testcase.__name__, dtype.__name__,
rank_a, rank_b, num_dims,
dynamic_shape)
_add_test(TensordotTest, name, testcase)
test_lib.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/tensordot_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseSoftmaxCrossEntropyWithLogits op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import app
from tensorflow.python.platform import test
class SparseXentTest(test.TestCase):
def _npXent(self, features, labels):
features = np.reshape(features, [-1, features.shape[-1]])
labels = np.reshape(labels, [-1])
batch_dim = 0
class_dim = 1
batch_size = features.shape[batch_dim]
e = np.exp(features - np.reshape(
np.amax(
features, axis=class_dim), [batch_size, 1]))
probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
labels_mat = np.zeros_like(probs).astype(probs.dtype)
labels_mat[np.arange(batch_size), labels] = 1.0
bp = (probs - labels_mat)
l = -np.sum(labels_mat * np.log(probs + 1.0e-20), axis=1)
return l, bp
def _testXent(self, np_features, np_labels):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.cached_session(use_gpu=True) as sess:
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def testSingleClass(self):
for label_dtype in np.int32, np.int64:
with self.cached_session(use_gpu=True) as sess:
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(np.float32),
np.array([0, 0, 0]).astype(label_dtype))
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
self.assertAllClose([[0.0], [0.0], [0.0]], tf_backprop)
@test_util.run_deprecated_v1
def testInvalidLabel(self):
features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
[1., 2., 3., 4.]]
labels = [4, 3, 0, -1]
if test.is_built_with_cuda() and test.is_gpu_available():
with self.session(use_gpu=True) as sess:
loss, backprop = (
gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
features, labels))
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllClose(
[[np.nan] * 4, [0.25, 0.25, 0.25, -0.75],
[-0.968, 0.087, 0.237, 0.6439], [np.nan] * 4],
tf_backprop,
rtol=1e-3,
atol=1e-3)
self.assertAllClose(
[np.nan, 1.3862, 3.4420, np.nan], tf_loss, rtol=1e-3, atol=1e-3)
with self.session(use_gpu=False) as sess:
loss, backprop = (
gen_nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels))
with self.assertRaisesOpError("Received a label value of"):
self.evaluate([loss, backprop])
def testNpXent(self):
# We create 2 batches of logits for testing.
# batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
# batch 1 has a bit of difference: 1, 2, 3, 4, with target 0.
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
labels = [3, 0]
# For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
# With a hard target 3, the backprop is [0.25, 0.25, 0.25, -0.75]
# The loss for this batch is -log(0.25) = 1.386
#
# For batch 1, we have:
# exp(0) = 1
# exp(1) = 2.718
# exp(2) = 7.389
# exp(3) = 20.085
# SUM = 31.192
# So we have as probabilities:
# exp(0) / SUM = 0.032
# exp(1) / SUM = 0.087
# exp(2) / SUM = 0.237
# exp(3) / SUM = 0.644
# With a hard 1, the backprop is [0.032 - 1.0 = -0.968, 0.087, 0.237, 0.644]
# The loss for this batch is [1.0 * -log(0.25), 1.0 * -log(0.032)]
# = [1.3862, 3.4420]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, -0.75], [-0.968, 0.087, 0.237, 0.6439]]),
np_backprop,
rtol=1.e-3,
atol=1.e-3)
self.assertAllClose(
np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)
def testShapeMismatch(self):
with self.session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, ".*Rank mismatch:*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
def testScalar(self):
with self.session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, ".*Logits cannot be scalars*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant(1.0))
@test_util.run_deprecated_v1
def testLabelsPlaceholderScalar(self):
with self.session(use_gpu=True):
labels = array_ops.placeholder(np.int32)
y = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=[[7.]])
with self.assertRaisesOpError("labels must be 1-D"):
y.eval(feed_dict={labels: 0})
def testVector(self):
with self.session(use_gpu=True):
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant([1.0]))
self.assertAllClose(0.0, self.evaluate(loss))
def testFloat(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),
np.array([3, 0]).astype(label_dtype))
def testDouble(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
np.array([0, 3]).astype(label_dtype))
def testHalf(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([3, 0]).astype(label_dtype))
def testEmpty(self):
self._testXent(np.zeros((0, 3)), np.zeros((0,), dtype=np.int32))
@test_util.run_deprecated_v1
def testGradient(self):
with self.session(use_gpu=True):
l = constant_op.constant([3, 0, 1], name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
x = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
err = gradient_checker.compute_gradient_error(f, [3, 4], x, [3])
print("cross entropy gradient err = ", err)
self.assertLess(err, 5e-8)
@test_util.run_deprecated_v1
def testSecondGradient(self):
images_placeholder = array_ops.placeholder(dtypes.float32, shape=(3, 2))
labels_placeholder = array_ops.placeholder(dtypes.int32, shape=(3))
weights = variables.Variable(random_ops.truncated_normal([2], stddev=1.0))
weights_with_zeros = array_ops.stack([array_ops.zeros([2]), weights],
axis=1)
logits = math_ops.matmul(images_placeholder, weights_with_zeros)
cross_entropy = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels_placeholder, logits=logits)
loss = math_ops.reduce_mean(cross_entropy)
# Taking ths second gradient should fail, since it is not
# yet supported.
with self.assertRaisesRegexp(LookupError,
"explicitly disabled"):
_ = gradients_impl.hessians(loss, [weights])
def _testHighDim(self, features, labels):
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
# manually reshape loss
np_loss = np.reshape(np_loss, np.array(labels).shape)
with self.cached_session(use_gpu=True) as sess:
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=features)
backprop = loss.op.inputs[0].op.outputs[1]
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
@test_util.run_deprecated_v1
def testHighDim(self):
features = [[[1., 1., 1., 1.]], [[1., 2., 3., 4.]]]
labels = [[3], [0]]
self._testHighDim(features, labels)
@test_util.run_deprecated_v1
def testHighDim2(self):
features = [[[1., 1., 1., 1.], [2., 2., 2., 2.]],
[[1., 2., 3., 4.], [5., 6., 7., 8.]]]
labels = [[3, 2], [0, 3]]
self._testHighDim(features, labels)
@test_util.run_deprecated_v1
def testScalarHandling(self):
with self.session(use_gpu=False) as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
".*labels must be 1-D.*"):
labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])
logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])
ce = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(labels), logits=logits)
labels_v2 = np.zeros((1, 1), dtype=np.int32)
logits_v2 = np.random.randn(1, 3)
sess.run([ce], feed_dict={labels: labels_v2, logits: logits_v2})
def _sparse_vs_dense_xent_benchmark_dense(labels, logits):
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
with ops_lib.device("/cpu:0"): # Sparse-to-dense must be on CPU
batch_size = array_ops.shape(logits)[0]
num_entries = array_ops.shape(logits)[1]
length = batch_size * num_entries
labels += num_entries * math_ops.range(batch_size)
target = sparse_ops.sparse_to_dense(labels,
array_ops.stack([length]), 1.0, 0.0)
target = array_ops.reshape(target, array_ops.stack([-1, num_entries]))
crossent = nn_ops.softmax_cross_entropy_with_logits(
labels=target, logits=logits, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def _sparse_vs_dense_xent_benchmark_sparse(labels, logits):
# Using sparse_softmax_cross_entropy_with_logits
labels = labels.astype(np.int64)
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits, labels, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
labels = np.random.randint(num_entries, size=batch_size).astype(np.int32)
logits = np.random.randn(batch_size, num_entries).astype(np.float32)
def _timer(sess, ops):
# Warm in
for _ in range(20):
sess.run(ops)
# Timing run
start = time.time()
for _ in range(20):
sess.run(ops)
end = time.time()
return (end - start) / 20.0 # Average runtime per iteration
# Using sparse_to_dense and softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with ops_lib.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
delta_dense = _timer(sess, ops)
# Using sparse_softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with test_util.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
delta_sparse = _timer(sess, ops)
print("%d \t %d \t %s \t %f \t %f \t %f" % (batch_size, num_entries, use_gpu,
delta_dense, delta_sparse,
delta_sparse / delta_dense))
def main(_):
print("Sparse Xent vs. SparseToDense + Xent")
print("batch \t depth \t gpu \t dt(dense) \t dt(sparse) "
"\t dt(sparse)/dt(dense)")
for use_gpu in (False, True):
for batch_size in (32, 64, 128):
for num_entries in (100, 1000, 10000):
sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu)
sparse_vs_dense_xent_benchmark(32, 100000, use_gpu)
sparse_vs_dense_xent_benchmark(8, 1000000, use_gpu)
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
app.run()
else:
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_xent_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state updating ops that may have benign race conditions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AssignOpTest(test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(array_ops.zeros([1024, 1024]))
adds = [
state_ops.assign_add(
p, ones_t, use_locking=False) for _ in range(20)
]
self.evaluate(variables.global_variables_initializer())
def run_add(add_op):
self.evaluate(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all())
def testParallelAssignWithoutLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], float(1))
p = variables.Variable(array_ops.zeros([1024, 1024]))
assigns = [
state_ops.assign(p, math_ops.multiply(ones_t, float(i)), False)
for i in range(1, 21)
]
self.evaluate(variables.global_variables_initializer())
def run_assign(assign_op):
self.evaluate(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
# Assert every element is taken from one of the assignments.
self.assertTrue((vals > 0).all())
self.assertTrue((vals <= 20).all())
# NOTE(skyewm): We exclude these tests from the TSAN TAP target, because they
# contain non-benign but known data races between the variable assignment and
# returning the output tensors. This issue will be resolved with the new
# resource variables.
def testParallelUpdateWithLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
adds = [
state_ops.assign_add(
p, ones_t, use_locking=True) for _ in range(20)
]
self.evaluate(p.initializer)
def run_add(add_op):
self.evaluate(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertAllEqual(vals, ones * 20)
def testParallelAssignWithLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
assigns = [
state_ops.assign(
p, math_ops.multiply(ones_t, float(i)), use_locking=True)
for i in range(1, 21)
]
self.evaluate(p.initializer)
def run_assign(assign_op):
self.evaluate(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
# Assert every element is the same, and taken from one of the assignments.
self.assertTrue(vals[0, 0] > 0)
self.assertTrue(vals[0, 0] <= 20)
self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# from functools import reduce
class ConditionalAccumulatorTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.ConditionalAccumulator(dtypes_lib.float32, name="Q")
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals(
"""
name:'Q' op:'ConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { unknown_rank: true} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
attr { key: 'reduction_type' value {s: 'MEAN'} }
""", q.accumulator_ref.op.node_def)
def testConstructorWithInvalidArg(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", reduction_type="Invalid")
def testConstructorWithShape(self):
with ops.Graph().as_default():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1, 5, 2, 8]))
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals(
"""
name:'Q' op:'ConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { dim {size: 1 }
dim {size: 5 }
dim {size: 2 }
dim {size: 8 }
} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
attr { key: 'reduction_type' value {s: 'MEAN'} }
""", q.accumulator_ref.op.node_def)
@test_util.run_deprecated_v1
def testAccumulatorSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(dtypes_lib.float32, name="Q")
self.assertEqual(q.num_accumulated().eval(), 0)
@test_util.run_deprecated_v1
def testAccumulatorSetGlobalStep(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
set_global_step_op = q.set_global_step(1)
set_global_step_op.run()
@test_util.run_deprecated_v1
def testAccumulatorApplyGradFloat32(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
accum_op.run()
@test_util.run_deprecated_v1
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [dtypes_lib.float16, dtypes_lib.float32, dtypes_lib.float64]
for i in range(len(dtypes)):
dtype = dtypes[i]
q = data_flow_ops.ConditionalAccumulator(
dtype, shape=tensor_shape.TensorShape([1]))
elems = np.arange(10).astype(dtype.as_numpy_dtype)
for e in elems:
q.apply_grad((e,)).run()
result = self.evaluate(q.take_grad(1))
self.assertEqual(sum(elems) / len(elems), result)
@test_util.run_deprecated_v1
def testAccumulatorMultipleAccumulators(self):
with self.cached_session():
q_f32_0 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
q_f32_1 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
q_f16_0 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([1]))
q_f16_1 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([1]))
accums = [q_f16_0, q_f16_1, q_f32_0, q_f32_1]
for i in range(len(accums)):
accums[i].apply_grad((i + 10.0,)).run()
for i in range(len(accums)):
result = accums[i].take_grad(1).eval()
self.assertEqual(result, i + 10.0)
@test_util.run_deprecated_v1
def testAccumulatorApplyAndTakeGradWithShape(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(3, 2))
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
elems_ave = [[(a + b) / len(elems) for a, b in zip(x, y)]
for x, y in zip(elems[0], elems[1])]
accum_ops = [q.apply_grad(x) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
is_all_equal = True
val = self.evaluate(takeg_t)
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
self.assertTrue(is_all_equal)
@test_util.run_deprecated_v1
def testAccumulatorApplyGradWithWrongShape(self):
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(3, 2))
with self.assertRaises(ValueError):
q.apply_grad([[1.0, 2.0], [3.0, 4.0]])
with self.assertRaises(ValueError):
q.apply_grad([[1.0], [2.0], [3.0]])
@test_util.run_deprecated_v1
def testAccumulatorDynamicShape(self):
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=None)
x = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(x)
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
elems_ave = [[(a + b) / len(elems) for a, b in zip(c, d)]
for c, d in zip(elems[0], elems[1])]
takeg_t = q.take_grad(1)
for elem in elems:
sess.run(accum_op, feed_dict={x: elem})
is_all_equal = True
val = self.evaluate(takeg_t)
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
self.assertTrue(is_all_equal)
@test_util.run_v1_only("b/120545219")
def testAccumulatorWrongDynamicShape(self):
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=None)
x = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(x)
# First successful apply_grad determines shape
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0]]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0], [2.0], [3.0]]})
@test_util.run_deprecated_v1
def testAccumulatorSizeAfterApplyGrad(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
self.assertEqual(q.num_accumulated().eval(), 0)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
@test_util.run_deprecated_v1
def testAccumulatorSizeAfterApplyGradAndTakeGrad(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
extract_t = q.take_grad(2)
# Applying gradient multiple times to increase size from 0 to 2.
self.assertEqual(q.num_accumulated().eval(), 0)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
# Extract will reduce size to 0
extract_t.op.run()
self.assertEqual(q.num_accumulated().eval(), 0)
# Take gradients always sets the size back to 0 if successful.
accum_op = q.apply_grad((10.0,), local_step=1)
accum_op.run()
accum_op.run()
accum_op.run()
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 4)
extract_t.op.run()
self.assertEqual(q.num_accumulated().eval(), 0)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradMean(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(15.0, val)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(constant_op.constant(1))
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(15.0, val)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradSum(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1]),
reduction_type="SUM")
elems = [10.0, 20.0]
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(30.0, val)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(constant_op.constant(1))
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(30.0, val)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradInvalidReductionType(self):
with self.assertRaises(ValueError):
data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1]),
reduction_type="Invalid")
@test_util.run_v1_only("b/120545219")
def testAccumulatorInvalidTakeGrad(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
accum_ops = [q.apply_grad((x,)) for x in elems]
takeg_t = q.take_grad(-1)
for accum_op in accum_ops:
accum_op.run()
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(takeg_t)
@test_util.run_deprecated_v1
def testAccumulatorRepeatedTakeGradMean(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(elems_ave, val)
elems = [20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(elems_ave + 0.0, val)
@test_util.run_deprecated_v1
def testAccumulatorRepeatedTakeGradSum(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1]),
reduction_type="SUM")
elems = [10.0, 20.0]
elems_sum = 30.0
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(elems_sum, val)
elems = [20.0, 30.0]
elems_sum = 50.0
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(elems_sum, val)
@test_util.run_deprecated_v1
def testAccumulatorIncrementGlobalStep(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
global_step = variables.Variable(0, name="global_step")
new_global_step = math_ops.add(global_step, 1)
inc_global_step = state_ops.assign(global_step, new_global_step)
set_global_step_op = q.set_global_step(new_global_step)
self.evaluate(variables.global_variables_initializer())
for _ in range(3):
set_global_step_op.run()
self.evaluate(inc_global_step)
@test_util.run_deprecated_v1
def testAccumulatorSetGlobalStepPreventsAccumulation(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
local_steps = range(1000, 1005)
accum_ops = [q.apply_grad((0.0 + x,), local_step=x) for x in local_steps]
for ls in local_steps:
set_global_step_op = q.set_global_step(ls)
set_global_step_op.run()
for accum_op in accum_ops:
accum_op.run()
takeg_t = q.take_grad(1)
val = self.evaluate(takeg_t)
self.assertEqual(0.0 + sum(x for x in local_steps
if x >= ls) / sum(1 for x in local_steps
if x >= ls), val)
@test_util.run_v1_only("b/120545219")
def testParallelApplyGrad(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
def apply_grad(accum_op):
self.evaluate(accum_op)
threads = [
self.checkedThread(
target=apply_grad, args=(o,)) for o in accum_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
val = self.evaluate(takeg_t)
self.assertEqual(val, sum(elems) / len(elems))
@test_util.run_v1_only("b/120545219")
def testParallelTakeGrad(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [e for e in range(10)]
accum_ops = [q.apply_grad((np.float32(e),), local_step=e) for e in elems]
takeg_t = q.take_grad(1)
def apply_grad():
for accum_op in accum_ops:
time.sleep(1.0)
self.evaluate(accum_op)
apply_grad_thread = self.checkedThread(target=apply_grad)
results = []
def take_grad():
results.append(self.evaluate(takeg_t))
threads = [self.checkedThread(target=take_grad) for _ in range(10)]
for thread in threads:
thread.start()
apply_grad_thread.start()
for thread in threads:
thread.join()
apply_grad_thread.join()
self.assertItemsEqual(elems, results)
@test_util.run_v1_only("b/120545219")
def testAccumulatorApplyAndBlockingTake(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(3)
def apply_grad():
time.sleep(1.0)
for accum_op in accum_ops:
self.evaluate(accum_op)
return_array = []
def take_grad():
return_array.append(self.evaluate(takeg_t))
accum_thread = self.checkedThread(target=apply_grad)
takeg_thread = self.checkedThread(target=take_grad)
accum_thread.start()
takeg_thread.start()
accum_thread.join()
takeg_thread.join()
self.assertEqual([elems_ave], return_array)
def _blocking_takeg(self, sess, takeg_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(takeg_op)
@test_util.run_v1_only("b/120545219")
def testAccumulatorCancel(self):
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
takeg_t = q.take_grad(1)
takeg_thread = self.checkedThread(
self._blocking_takeg, args=(sess, takeg_t))
takeg_thread.start()
time.sleep(1.0)
sess.close() # Will cancel blocked operation
takeg_thread.join()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/conditional_accumulator_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
def _AddTest(test_class, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test_class, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test_class, test_name, fn)
class SvdOpTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self):
# The input to svd should be a tensor of at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 0"):
linalg_ops.svd(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 1"):
linalg_ops.svd(vector)
@test_util.run_v1_only("b/120545219")
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
all_ops = []
for compute_uv_ in True, False:
for full_matrices_ in True, False:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
if compute_uv_:
s1, u1, v1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2, u2, v2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, u1, v1, s2, u2, v2]
else:
s1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, s2]
val = self.evaluate(all_ops)
for i in range(2):
s = 6 * i
self.assertAllEqual(val[s], val[s + 3]) # s1 == s2
self.assertAllEqual(val[s + 1], val[s + 4]) # u1 == u2
self.assertAllEqual(val[s + 2], val[s + 5]) # v1 == v2
for i in range(2):
s = 12 + 2 * i
self.assertAllEqual(val[s], val[s + 1]) # s1 == s2
def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_,
full_matrices_):
def CompareSingularValues(self, x, y, tol):
self.assertAllClose(x, y, atol=(x[0] + y[0]) * tol)
def CompareSingularVectors(self, x, y, rank, tol):
# We only compare the first 'rank' singular vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Singular vectors are only unique up to sign (complex phase factor for
# complex matrices), so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertAllClose(x, y, atol=2 * tol)
def CheckApproximation(self, a, u, s, v, full_matrices_, tol):
# Tests that a ~= u*diag(s)*transpose(v).
batch_shape = a.shape[:-2]
m = a.shape[-2]
n = a.shape[-1]
diag_s = math_ops.cast(array_ops.matrix_diag(s), dtype=dtype_)
if full_matrices_:
if m > n:
zeros = array_ops.zeros(batch_shape + (m - n, n), dtype=dtype_)
diag_s = array_ops.concat([diag_s, zeros], a.ndim - 2)
elif n > m:
zeros = array_ops.zeros(batch_shape + (m, n - m), dtype=dtype_)
diag_s = array_ops.concat([diag_s, zeros], a.ndim - 1)
a_recon = math_ops.matmul(u, diag_s)
a_recon = math_ops.matmul(a_recon, v, adjoint_b=True)
self.assertAllClose(a_recon, a, rtol=tol, atol=tol)
def CheckUnitary(self, x, tol):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = math_ops.matmul(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
self.assertAllClose(identity, xx, atol=tol)
@test_util.run_v1_only("b/120545219")
def Test(self):
is_complex = dtype_ in (np.complex64, np.complex128)
is_single = dtype_ in (np.float32, np.complex64)
tol = 3e-4 if is_single else 1e-12
if test.is_gpu_available():
# The gpu version returns results that are much less accurate.
tol *= 100
np.random.seed(42)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if is_complex:
x_np += 1j * np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
with self.session(use_gpu=True) as sess:
if use_static_shape_:
x_tf = constant_op.constant(x_np)
else:
x_tf = array_ops.placeholder(dtype_)
if compute_uv_:
s_tf, u_tf, v_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val, u_tf_val, v_tf_val = self.evaluate([s_tf, u_tf, v_tf])
else:
s_tf_val, u_tf_val, v_tf_val = sess.run(
[s_tf, u_tf, v_tf], feed_dict={x_tf: x_np})
else:
s_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val = self.evaluate(s_tf)
else:
s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np})
if compute_uv_:
u_np, s_np, v_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
else:
s_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
# We explicitly avoid the situation where numpy eliminates a first
# dimension that is equal to one.
s_np = np.reshape(s_np, s_tf_val.shape)
CompareSingularValues(self, s_np, s_tf_val, tol)
if compute_uv_:
CompareSingularVectors(self, u_np, u_tf_val, min(shape_[-2:]), tol)
CompareSingularVectors(self,
np.conj(np.swapaxes(v_np, -2, -1)), v_tf_val,
min(shape_[-2:]), tol)
CheckApproximation(self, x_np, u_tf_val, s_tf_val, v_tf_val,
full_matrices_, tol)
CheckUnitary(self, u_tf_val, tol)
CheckUnitary(self, v_tf_val, tol)
return Test
class SvdGradOpTest(test.TestCase):
pass # Filled in below
def _NormalizingSvd(tf_a, full_matrices_):
tf_s, tf_u, tf_v = linalg_ops.svd(
tf_a, compute_uv=True, full_matrices=full_matrices_)
# Singular vectors are only unique up to an arbitrary phase. We normalize
# the vectors such that the first component of u (if m >=n) or v (if n > m)
# have phase 0.
m = tf_a.shape[-2]
n = tf_a.shape[-1]
if m >= n:
top_rows = tf_u[..., 0:1, :]
else:
top_rows = tf_v[..., 0:1, :]
if tf_u.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_u *= phase[..., :m]
tf_v *= phase[..., :n]
return tf_s, tf_u, tf_v
def _GetSvdGradOpTest(dtype_, shape_, compute_uv_, full_matrices_):
@test_util.run_v1_only("b/120545219")
def Test(self):
np.random.seed(42)
a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
# See Equation (21) in:
# http://www.karenkopecky.net/Teaching/eco613614/Notes_NumericalDifferentiation.pdf
# TODO(rmlarsen): Move step size control to gradient checker.
epsilon = np.finfo(dtype_).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
if dtype_ in [np.float32, np.complex64]:
tol = 3e-2
else:
tol = 1e-6
with self.session(use_gpu=True):
tf_a = constant_op.constant(a)
if compute_uv_:
tf_s, tf_u, tf_v = _NormalizingSvd(tf_a, full_matrices_)
outputs = [tf_s, tf_u, tf_v]
else:
tf_s = linalg_ops.svd(tf_a, compute_uv=False)
outputs = [tf_s]
for b in outputs:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class SvdGradGradOpTest(test.TestCase):
pass # Filled in below
def _GetSvdGradGradOpTest(dtype_, shape_, compute_uv_, full_matrices_):
@test_util.run_v1_only("b/120545219")
def Test(self):
np.random.seed(42)
a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
# See Equation (21) in:
# http://www.karenkopecky.net/Teaching/eco613614/Notes_NumericalDifferentiation.pdf
# TODO(rmlarsen): Move step size control to gradient checker.
epsilon = np.finfo(dtype_).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
tol = 1e-5
with self.session(use_gpu=True):
tf_a = constant_op.constant(a)
if compute_uv_:
tf_s, tf_u, tf_v = _NormalizingSvd(tf_a, full_matrices_)
outputs = [tf_s, tf_u, tf_v]
else:
tf_s = linalg_ops.svd(tf_a, compute_uv=False)
outputs = [tf_s]
outputs_sums = [math_ops.reduce_sum(o) for o in outputs]
tf_func_outputs = math_ops.add_n(outputs_sums)
grad = gradients_impl.gradients(tf_func_outputs, tf_a)[0]
x_init = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
grad,
grad.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class SVDBenchmark(test.Benchmark):
shapes = [
(4, 4),
(8, 8),
(16, 16),
(101, 101),
(256, 256),
(1024, 1024),
(2048, 2048),
(1, 8, 8),
(10, 8, 8),
(100, 8, 8),
(1, 256, 256),
(10, 256, 256),
(100, 256, 256),
]
def benchmarkSVDOp(self):
for shape_ in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix_value = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(np.float32)
matrix = variables.Variable(matrix_value)
u, s, v = linalg_ops.svd(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(u, s, v),
min_iters=25,
name="SVD_cpu_{shape}".format(shape=shape_))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix_value = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(np.float32)
matrix = variables.Variable(matrix_value)
u, s, v = linalg_ops.svd(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(u, s, v),
min_iters=25,
name="SVD_gpu_{shape}".format(shape=shape_))
if __name__ == "__main__":
for compute_uv in False, True:
for full_matrices in False, True:
for dtype in np.float32, np.float64, np.complex64, np.complex128:
for rows in 1, 2, 5, 10, 32, 100:
for cols in 1, 2, 5, 10, 32, 100:
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
shape = batch_dims + (rows, cols)
# TF2 does not support placeholders under eager so we skip it
for use_static_shape in set([True, tf2.enabled()]):
name = "%s_%s_static_shape_%s__compute_uv_%s_full_%s" % (
dtype.__name__, "_".join(map(str, shape)), use_static_shape,
compute_uv, full_matrices)
_AddTest(SvdOpTest, "Svd", name,
_GetSvdOpTest(dtype, shape, use_static_shape,
compute_uv, full_matrices))
for compute_uv in False, True:
for full_matrices in False, True:
dtypes = ([np.float32, np.float64]
+ [np.complex64, np.complex128] * (not compute_uv))
for dtype in dtypes:
mat_shapes = [(10, 11), (11, 10), (11, 11), (2, 2, 2, 3)]
if not full_matrices or not compute_uv:
mat_shapes += [(5, 11), (11, 5)]
for mat_shape in mat_shapes:
for batch_dims in [(), (3,)]:
shape = batch_dims + mat_shape
name = "%s_%s_compute_uv_%s_full_%s" % (
dtype.__name__, "_".join(map(str, shape)), compute_uv,
full_matrices)
_AddTest(SvdGradOpTest, "SvdGrad", name,
_GetSvdGradOpTest(dtype, shape, compute_uv, full_matrices))
# The results are too inacurate for float32.
if dtype == np.float64:
_AddTest(
SvdGradGradOpTest, "SvdGradGrad", name,
_GetSvdGradGradOpTest(dtype, shape, compute_uv,
full_matrices))
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/svd_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Substr op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class SubstrOpTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(np.int32, 1, "BYTE"),
(np.int64, 1, "BYTE"),
(np.int32, -4, "BYTE"),
(np.int64, -4, "BYTE"),
(np.int32, 1, "UTF8_CHAR"),
(np.int64, 1, "UTF8_CHAR"),
(np.int32, -4, "UTF8_CHAR"),
(np.int64, -4, "UTF8_CHAR"),
)
def testScalarString(self, dtype, pos, unit):
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"He\xc3\xc3\U0001f604".encode("utf-8"),
}[unit]
expected_value = {
"BYTE": b"ell",
"UTF8_CHAR": u"e\xc3\xc3".encode("utf-8"),
}[unit]
position = np.array(pos, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testScalarString_EdgeCases(self, dtype, unit):
# Empty string
test_string = {
"BYTE": b"",
"UTF8_CHAR": u"".encode("utf-8"),
}[unit]
expected_value = b""
position = np.array(0, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
# Full string
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"H\xc3ll\U0001f604".encode("utf-8"),
}[unit]
position = np.array(0, dtype)
length = np.array(5, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, test_string)
# Full string (Negative)
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"H\xc3ll\U0001f604".encode("utf-8"),
}[unit]
position = np.array(-5, dtype)
length = np.array(5, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, test_string)
# Length is larger in magnitude than a negative position
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"H\xc3ll\U0001f604".encode("utf-8"),
}[unit]
expected_string = {
"BYTE": b"ello",
"UTF8_CHAR": u"\xc3ll\U0001f604".encode("utf-8"),
}[unit]
position = np.array(-4, dtype)
length = np.array(5, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_string)
@parameterized.parameters(
(np.int32, 1, "BYTE"),
(np.int64, 1, "BYTE"),
(np.int32, -4, "BYTE"),
(np.int64, -4, "BYTE"),
(np.int32, 1, "UTF8_CHAR"),
(np.int64, 1, "UTF8_CHAR"),
(np.int32, -4, "UTF8_CHAR"),
(np.int64, -4, "UTF8_CHAR"),
)
def testVectorStrings(self, dtype, pos, unit):
test_string = {
"BYTE": [b"Hello", b"World"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"H\xc3llo",
u"W\U0001f604rld"]],
}[unit]
expected_value = {
"BYTE": [b"ell", b"orl"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"\xc3ll", u"\U0001f604rl"]],
}[unit]
position = np.array(pos, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testMatrixStrings(self, dtype, unit):
test_string = {
"BYTE": [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227n",
u"\xc6\u053c\u025bv\u025bn",
u"tw\u0c1dlv\u025b"]],
[x.encode("utf-8") for x in [u"He\xc3\xc3o",
u"W\U0001f604rld",
u"d\xfcd\xea"]]],
}[unit]
position = np.array(1, dtype)
length = np.array(4, dtype)
expected_value = {
"BYTE": [[b"en", b"leve", b"welv"], [b"hirt", b"ourt", b"ifte"],
[b"ixte", b"even", b"ight"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d227n",
u"\u053c\u025bv\u025b",
u"w\u0c1dlv"]],
[x.encode("utf-8") for x in [u"e\xc3\xc3o",
u"\U0001f604rld",
u"\xfcd\xea"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
position = np.array(-3, dtype)
length = np.array(2, dtype)
expected_value = {
"BYTE": [[b"te", b"ve", b"lv"], [b"ee", b"ee", b"ee"],
[b"ee", b"ee", b"ee"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227",
u"v\u025b", u"lv"]],
[x.encode("utf-8") for x in [u"\xc3\xc3", u"rl",
u"\xfcd"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testElementWisePosLen(self, dtype, unit):
test_string = {
"BYTE": [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227n",
u"\xc6\u053c\u025bv\u025bn",
u"tw\u0c1dlv\u025b"]],
[x.encode("utf-8") for x in [u"He\xc3\xc3o",
u"W\U0001f604rld",
u"d\xfcd\xea"]],
[x.encode("utf-8") for x in [u"sixt\xea\xean",
u"se\U00010299enteen",
u"ei\U0001e920h\x86een"]]],
}[unit]
position = np.array([[1, -4, 3], [1, 2, -4], [-5, 2, 3]], dtype)
length = np.array([[2, 2, 4], [4, 3, 2], [5, 5, 5]], dtype)
expected_value = {
"BYTE": [[b"en", b"ev", b"lve"], [b"hirt", b"urt", b"te"],
[b"xteen", b"vente", b"hteen"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d227n",
u"\u025bv",
u"lv\u025b"]],
[x.encode("utf-8") for x in [u"e\xc3\xc3o",
u"rld",
u"d\xfc"]],
[x.encode("utf-8") for x in [u"xt\xea\xean",
u"\U00010299ente",
u"h\x86een"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testBroadcast(self, dtype, unit):
# Broadcast pos/len onto input string
test_string = {
"BYTE": [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"],
[b"nineteen", b"twenty", b"twentyone"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227n",
u"\xc6\u053c\u025bv\u025bn",
u"tw\u0c1dlv\u025b"]],
[x.encode("utf-8") for x in [u"th\xcdrt\xea\xean",
u"f\U0001f604urt\xea\xean",
u"f\xcd\ua09ctee\ua0e4"]],
[x.encode("utf-8") for x in [u"s\xcdxt\xea\xean",
u"se\U00010299enteen",
u"ei\U0001e920h\x86een"]],
[x.encode("utf-8") for x in [u"nineteen",
u"twenty",
u"twentyone"]]],
}[unit]
position = np.array([1, -4, 3], dtype)
length = np.array([1, 2, 3], dtype)
expected_value = {
"BYTE": [[b"e", b"ev", b"lve"], [b"h", b"te", b"tee"],
[b"i", b"te", b"hte"], [b"i", b"en", b"nty"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d227",
u"\u025bv", u"lv\u025b"]],
[x.encode("utf-8") for x in [u"h", u"t\xea", u"tee"]],
[x.encode("utf-8") for x in [u"\xcd", u"te", u"h\x86e"]],
[x.encode("utf-8") for x in [u"i", u"en", u"nty"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
# Broadcast input string onto pos/len
test_string = {
"BYTE": [b"thirteen", b"fourteen", b"fifteen"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"th\xcdrt\xea\xean",
u"f\U0001f604urt\xea\xean",
u"f\xcd\ua09ctee\ua0e4"]],
}[unit]
position = np.array([[1, -2, 3], [-3, 2, 1], [5, 5, -5]], dtype)
length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
expected_value = {
"BYTE": [[b"hir", b"en", b"t"], [b"e", b"ur", b"ift"],
[b"ee", b"ee", b"ft"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"h\xcdr", u"\xean", u"t"]],
[x.encode("utf-8") for x in [u"\xea", u"ur",
u"\xcd\ua09ct"]],
[x.encode("utf-8") for x in [u"\xea\xea", u"\xea\xea",
u"\ua09ct"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
# Test 1D broadcast
test_string = {
"BYTE": b"thirteen",
"UTF8_CHAR": u"th\xcdrt\xea\xean".encode("utf-8"),
}[unit]
position = np.array([1, -4, 7], dtype)
length = np.array([3, 2, 1], dtype)
expected_value = {
"BYTE": [b"hir", b"te", b"n"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"h\xcdr", u"t\xea", u"n"]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
@test_util.run_deprecated_v1
def testBadBroadcast(self, dtype, unit):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array([1, 2, -3, 4], dtype)
length = np.array([1, 2, 3, 4], dtype)
with self.assertRaises(ValueError):
string_ops.substr(test_string, position, length, unit=unit)
@parameterized.parameters(
(np.int32, 6, "BYTE"),
(np.int64, 6, "BYTE"),
(np.int32, -6, "BYTE"),
(np.int64, -6, "BYTE"),
(np.int32, 6, "UTF8_CHAR"),
(np.int64, 6, "UTF8_CHAR"),
(np.int32, -6, "UTF8_CHAR"),
(np.int64, -6, "UTF8_CHAR"),
)
@test_util.run_deprecated_v1
def testOutOfRangeError_Scalar(self, dtype, pos, unit):
# Scalar/Scalar
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"H\xc3ll\U0001f604".encode("utf-8"),
}[unit]
position = np.array(pos, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(substr_op)
@parameterized.parameters(
(np.int32, 4, "BYTE"),
(np.int64, 4, "BYTE"),
(np.int32, -4, "BYTE"),
(np.int64, -4, "BYTE"),
(np.int32, 4, "UTF8_CHAR"),
(np.int64, 4, "UTF8_CHAR"),
(np.int32, -4, "UTF8_CHAR"),
(np.int64, -4, "UTF8_CHAR"),
)
@test_util.run_deprecated_v1
def testOutOfRangeError_VectorScalar(self, dtype, pos, unit):
# Vector/Scalar
test_string = {
"BYTE": [b"good", b"good", b"bad", b"good"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"g\xc3\xc3d", u"b\xc3d",
u"g\xc3\xc3d"]],
}[unit]
position = np.array(pos, dtype)
length = np.array(1, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(substr_op)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
@test_util.run_deprecated_v1
def testOutOfRangeError_MatrixMatrix(self, dtype, unit):
# Matrix/Matrix
test_string = {
"BYTE": [[b"good", b"good", b"good"], [b"good", b"good", b"bad"],
[b"good", b"good", b"good"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"g\xc3\xc3d"]],
[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"b\xc3d"]],
[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"g\xc3\xc3d"]]],
}[unit]
position = np.array([[1, 2, 3], [1, 2, 4], [1, 2, 3]], dtype)
length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(substr_op)
# Matrix/Matrix (with negative)
position = np.array([[1, 2, -3], [1, 2, -4], [1, 2, -3]], dtype)
length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(substr_op)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
@test_util.run_deprecated_v1
def testOutOfRangeError_Broadcast(self, dtype, unit):
# Broadcast
test_string = {
"BYTE": [[b"good", b"good", b"good"], [b"good", b"good", b"bad"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"g\xc3\xc3d"]],
[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"b\xc3d"]]],
}[unit]
position = np.array([1, 2, 4], dtype)
length = np.array([1, 2, 3], dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(substr_op)
# Broadcast (with negative)
position = np.array([-1, -2, -4], dtype)
length = np.array([1, 2, 3], dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(substr_op)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
@test_util.run_deprecated_v1
def testMismatchPosLenShapes(self, dtype, unit):
test_string = {
"BYTE": [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227n",
u"\xc6\u053c\u025bv\u025bn",
u"tw\u0c1dlv\u025b"]],
[x.encode("utf-8") for x in [u"th\xcdrt\xea\xean",
u"f\U0001f604urt\xea\xean",
u"f\xcd\ua09ctee\ua0e4"]],
[x.encode("utf-8") for x in [u"s\xcdxt\xea\xean",
u"se\U00010299enteen",
u"ei\U0001e920h\x86een"]]],
}[unit]
position = np.array([[1, 2, 3]], dtype)
length = np.array([2, 3, 4], dtype)
# Should fail: position/length have different rank
with self.assertRaises(ValueError):
string_ops.substr(test_string, position, length)
position = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]], dtype)
length = np.array([[2, 3, 4]], dtype)
# Should fail: position/length have different dimensionality
with self.assertRaises(ValueError):
string_ops.substr(test_string, position, length)
@test_util.run_deprecated_v1
def testWrongDtype(self):
with self.cached_session():
with self.assertRaises(TypeError):
string_ops.substr(b"test", 3.0, 1)
with self.assertRaises(TypeError):
string_ops.substr(b"test", 3, 1.0)
@test_util.run_deprecated_v1
def testInvalidUnit(self):
with self.cached_session():
with self.assertRaises(ValueError):
string_ops.substr(b"test", 3, 1, unit="UTF8")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/substr_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.sparse_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
def _SparseToDense(sparse_indices,
output_size,
sparse_values,
default_value,
validate_indices=True):
return sparse_ops.sparse_to_dense(
sparse_indices,
output_size,
sparse_values,
default_value=default_value,
validate_indices=validate_indices)
class SparseToDenseTest(test.TestCase):
@test_util.run_deprecated_v1
def testInt(self):
with self.session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], 1, 0).eval()
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testFloat(self):
with self.session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], 1.0, 0.0).eval()
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.float32)
self.assertAllClose(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testString(self):
with self.session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], "a", "b").eval()
np_ans = np.array(["b", "a", "b", "a", "b"]).astype(np.string_)
self.assertAllEqual(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testSetValue(self):
with self.session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], [1, 2], -1).eval()
np_ans = np.array([-1, 1, -1, 2, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testSetSingleValue(self):
with self.session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], 1, -1).eval()
np_ans = np.array([-1, 1, -1, 1, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
@test_util.run_deprecated_v1
def test2d(self):
# pylint: disable=bad-whitespace
with self.session(use_gpu=False):
tf_ans = _SparseToDense([[1, 3], [2, 0]], [3, 4], 1, -1).eval()
np_ans = np.array([[-1, -1, -1, -1],
[-1, -1, -1, 1],
[ 1, -1, -1, -1]]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testZeroDefault(self):
with self.cached_session():
x = sparse_ops.sparse_to_dense(2, [4], 7).eval()
self.assertAllEqual(x, [0, 0, 7, 0])
@test_util.run_deprecated_v1
def test3d(self):
with self.session(use_gpu=False):
tf_ans = _SparseToDense([[1, 3, 0], [2, 0, 1]], [3, 4, 2], 1, -1).eval()
np_ans = np.ones((3, 4, 2), dtype=np.int32) * -1
np_ans[1, 3, 0] = 1
np_ans[2, 0, 1] = 1
self.assertAllClose(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testBadShape(self):
with self.cached_session():
with self.assertRaisesWithPredicateMatch(ValueError, "must be rank 1"):
_SparseToDense([1, 3], [[5], [3]], 1, -1)
@test_util.run_deprecated_v1
def testBadValue(self):
with self.cached_session():
dense = _SparseToDense([1, 3], [5], [[5], [3]], -1)
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[2,1\], "
r"should be \[\] or \[2\]"):
self.evaluate(dense)
@test_util.run_deprecated_v1
def testBadNumValues(self):
with self.cached_session():
dense = _SparseToDense([1, 3], [5], [1, 2, 3], -1)
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[3\], should be \[\] or \[2\]"):
self.evaluate(dense)
@test_util.run_deprecated_v1
def testBadDefault(self):
with self.cached_session():
dense = _SparseToDense([1, 3], [5], [1, 2], [0])
with self.assertRaisesOpError("default_value should be a scalar"):
self.evaluate(dense)
@test_util.run_deprecated_v1
def testOutOfBoundsIndicesWithWithoutValidation(self):
with self.cached_session():
dense = _SparseToDense(
sparse_indices=[[1], [10]],
output_size=[5],
sparse_values=[-1.0, 1.0],
default_value=0.0)
with self.assertRaisesOpError(
r"indices\[1\] = \[10\] is out of bounds: need 0 <= index < \[5\]"):
self.evaluate(dense)
# Disable checks, the allocation should still fail.
with self.assertRaisesOpError("out of bounds"):
dense_without_validation = _SparseToDense(
sparse_indices=[[1], [10]],
output_size=[5],
sparse_values=[-1.0, 1.0],
default_value=0.0,
validate_indices=False)
self.evaluate(dense_without_validation)
@test_util.run_deprecated_v1
def testRepeatingIndicesWithWithoutValidation(self):
with self.cached_session():
dense = _SparseToDense(
sparse_indices=[[1], [1]],
output_size=[5],
sparse_values=[-1.0, 1.0],
default_value=0.0)
with self.assertRaisesOpError(r"indices\[1\] = \[1\] is repeated"):
self.evaluate(dense)
# Disable checks
dense_without_validation = _SparseToDense(
sparse_indices=[[1], [1]],
output_size=[5],
sparse_values=[-1.0, 1.0],
default_value=0.0,
validate_indices=False)
self.evaluate(dense_without_validation)
@test_util.run_deprecated_v1
def testUnsortedIndicesWithWithoutValidation(self):
with self.cached_session():
dense = _SparseToDense(
sparse_indices=[[2], [1]],
output_size=[5],
sparse_values=[-1.0, 1.0],
default_value=0.0)
with self.assertRaisesOpError(r"indices\[1\] = \[1\] is out of order"):
self.evaluate(dense)
# Disable checks
dense_without_validation = _SparseToDense(
sparse_indices=[[2], [1]],
output_size=[5],
sparse_values=[-1.0, 1.0],
default_value=0.0,
validate_indices=False)
self.evaluate(dense_without_validation)
@test_util.run_deprecated_v1
def testShapeInferenceKnownShape(self):
with self.session(use_gpu=False):
indices = array_ops.placeholder(dtypes.int64)
shape = [4, 5, 6]
output = sparse_ops.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape(), [4, 5, 6])
shape = array_ops.placeholder(dtypes.int64, shape=(3,))
output = sparse_ops.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape().as_list(), [None, None, None])
@test_util.run_deprecated_v1
def testShapeInferenceUnknownShape(self):
with self.session(use_gpu=False):
indices = array_ops.placeholder(dtypes.int64)
shape = array_ops.placeholder(dtypes.int64)
output = sparse_ops.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape().ndims, None)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_to_dense_op_py_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse_ops.sparse_tensor_dense_matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import app
from tensorflow.python.platform import test
def _maybe_complex(x):
if x.dtype.kind == "c": # complex
return (x + 1j * x) / 2
return x
class SparseTensorDenseMatMulTest(test.TestCase):
def _testMatmul(self,
x,
y,
adjoint_a=False,
adjoint_b=False,
indices_dtype=np.int64):
x_mat = np.matrix(x)
if adjoint_a:
x_mat = x_mat.H
y_mat = np.matrix(y)
if adjoint_b:
y_mat = y_mat.H
np_ans = x_mat * y_mat
x_indices = np.vstack(np.where(x)).astype(indices_dtype).T
x_values = x[np.where(x)]
x_shape = x.shape
with self.cached_session(use_gpu=True):
sp_x_value = sparse_tensor.SparseTensorValue(
indices=x_indices, values=x_values, dense_shape=x_shape)
tf_value_ans = sparse_ops.sparse_tensor_dense_matmul(
sp_x_value, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
tf_tensor_ans = sparse_ops.sparse_tensor_dense_matmul(
sparse_tensor.SparseTensor.from_value(sp_x_value),
y,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
# Ensure that the RHS shape is known at least.
self.assertEqual(tf_value_ans.get_shape()[1], np_ans.shape[1])
self.assertEqual(tf_tensor_ans.get_shape()[1], np_ans.shape[1])
for out in (self.evaluate(tf_value_ans), self.evaluate(tf_tensor_ans)):
if x.dtype == np.float32:
self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-4)
elif x.dtype == np.float64:
self.assertAllClose(np_ans, out, rtol=1e-6, atol=1e-6)
else:
self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-4)
def _testBasic(self, value_dtype, indices_dtype=np.int64):
x = _maybe_complex(np.random.rand(10, 10).astype(value_dtype))
x[np.abs(x) < 0.5] = 0 # Make it sparse
y = _maybe_complex(np.random.randn(10, 20).astype(value_dtype))
self._testMatmul(x, y, indices_dtype=indices_dtype)
@test_util.run_deprecated_v1
def testBasic(self):
np.random.seed(127) # Repeatable results
self._testBasic(np.int32)
self._testBasic(np.float32)
self._testBasic(np.float64)
self._testBasic(np.complex64)
self._testBasic(np.complex128)
self._testBasic(np.int32, indices_dtype=np.int32)
self._testBasic(np.float32, indices_dtype=np.int32)
@test_util.run_deprecated_v1
def testShapeInference(self):
x = np.random.rand(10, 10)
x[np.abs(x) < 0.5] = 0 # Make it sparse
y = np.random.randn(10, 20)
x_indices = np.vstack(np.where(x)).astype(np.int64).T
x_values = x[np.where(x)]
x_shape = x.shape
x_st = sparse_tensor.SparseTensor(x_indices, x_values, x_shape)
result = sparse_ops.sparse_tensor_dense_matmul(x_st, y)
self.assertEqual(result.get_shape(), (10, 20))
x_shape_unknown = array_ops.placeholder(dtype=dtypes.int64, shape=None)
x_st_shape_unknown = sparse_tensor.SparseTensor(x_indices, x_values,
x_shape_unknown)
result_left_shape_unknown = sparse_ops.sparse_tensor_dense_matmul(
x_st_shape_unknown, y)
self.assertEqual(result_left_shape_unknown.get_shape().as_list(),
[None, 20])
x_shape_inconsistent = [10, 15]
x_st_shape_inconsistent = sparse_tensor.SparseTensor(x_indices, x_values,
x_shape_inconsistent)
with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
sparse_ops.sparse_tensor_dense_matmul(x_st_shape_inconsistent, y)
@test_util.deprecated_graph_mode_only
def testInvalidIndicesForSparseTensorDenseMatmul(self):
# Note: use_gpu=False because nice errors are only returned from CPU kernel.
with self.session(use_gpu=False):
indices = np.matrix([[1, 10]]).astype(np.int64)
values = np.array([10]).astype(np.float32)
shape = [3, 2]
sparse_t = sparse_tensor.SparseTensor(indices, values, shape)
# Test multiplying by both a small and large dense matrix, to hit
# both cases in the kernel.
dense_t = np.matrix([[1] * 5, [2] * 5], dtype=np.float32)
with self.assertRaisesOpError(
"k .10. from index.0,1. out of bounds .>=2."):
self.evaluate(sparse_ops.sparse_tensor_dense_matmul(sparse_t, dense_t))
dense_t = np.matrix([[1] * 500, [2] * 500], dtype=np.float32)
with self.assertRaisesOpError(
"k .10. from index.0,1. out of bounds .>=2."):
self.evaluate(sparse_ops.sparse_tensor_dense_matmul(sparse_t, dense_t))
# Repeat with adjoint_a, to get a different error.
dense_t = np.matrix([[1] * 5, [2] * 5, [3] * 5], dtype=np.float32)
with self.assertRaisesOpError(
"m .10. from index.0,1. out of bounds .>=2."):
self.evaluate(
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True))
dense_t = np.matrix([[1] * 500, [2] * 500, [3] * 500], dtype=np.float32)
with self.assertRaisesOpError(
"m .10. from index.0,1. out of bounds .>=2."):
self.evaluate(
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True))
def testInvalidIndicesForSparseTensorDenseMatmulOnGPU(self):
# Note: use_gpu=False because nice errors are only returned from CPU kerne
if not test.is_gpu_available():
return
with self.session(use_gpu=True):
indices = np.array([[1, 10]]).astype(np.int64)
values = np.array([10]).astype(np.float32)
shape = [3, 2]
sparse_t = sparse_tensor.SparseTensor(indices, values, shape)
# Test multiplying by both a small and large dense matrix, to hit
# both cases in the kernel.
dense_t = np.matrix([[1] * 5, [2] * 5], dtype=np.float32)
expected_t = np.array([[0] * 5, [np.nan] * 5, [0] * 5], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t))
dense_t = np.matrix([[1] * 500, [2] * 500], dtype=np.float32)
expected_t = np.array(
[[0] * 500, [np.nan] * 500, [0] * 500], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t))
# Repeat with adjoint_a, now the error is that the sparse index
# is OOO w.r.t. the output. The GPU kernel can't do much here,
# so it just doesn't accumulate.
dense_t = np.matrix([[1] * 5, [2] * 5, [3] * 5], dtype=np.float32)
expected_t = np.array([[0] * 5, [0] * 5], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True))
dense_t = np.matrix([[1] * 500, [2] * 500, [3] * 500], dtype=np.float32)
expected_t = np.array([[0] * 500, [0] * 500], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True))
# Tests setting one dimension to be a high value.
def _testLarge(self, np_dtype):
r1 = np.random.randint(6000, 20000)
r2 = np.random.randint(1, 10)
r3 = np.random.randint(1, 10)
for m, k, n in [(r1, r2, r3),
(r2, r1, r3),
(r2, r3, r1)]:
x = _maybe_complex(np.random.rand(m, k).astype(np_dtype))
x[np.abs(x) < 0.8] = 0
y = _maybe_complex(np.random.randn(k, n).astype(np_dtype))
self._testMatmul(x, y, adjoint_a=False, adjoint_b=False)
self._testMatmul(x.transpose(), y, adjoint_a=True, adjoint_b=False)
self._testMatmul(x, y.transpose(), adjoint_a=False, adjoint_b=True)
self._testMatmul(
x.transpose(), y.transpose(), adjoint_a=True, adjoint_b=True)
np.random.seed(127) # Repeatable results
self._testLarge(np.float32)
self._testLarge(np.float64)
self._testLarge(np.complex64)
self._testLarge(np.complex128)
# Tests random sized matrices.
@test_util.run_deprecated_v1
def testFloatRandom(self):
np.random.seed(127) # Repeatable results
for _ in range(8):
for adjoint_a in [True, False]:
for adjoint_b in [True, False]:
for thresh in [0.0, 0.2, 0.8, 1.0]:
n, k, m = np.random.randint(1, 100, size=3)
x = np.random.rand(n, k).astype(np.float32)
x[x < thresh] = 0 # Make it sparse
y = np.random.randn(k, m).astype(np.float32)
x = x.transpose() if adjoint_a else x
y = y.transpose() if adjoint_b else y
self._testMatmul(x, y, adjoint_a, adjoint_b)
def _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(x, y, adjoint_a,
adjoint_b):
def body(t, prev):
with ops.control_dependencies([prev]):
return (t + 1, math_ops.matmul(
x,
y,
transpose_a=adjoint_a,
transpose_b=adjoint_b,
a_is_sparse=True,
b_is_sparse=False))
t0 = constant_op.constant(0)
v0 = constant_op.constant(0.0)
def _timeit(iterations, _):
(_, final) = control_flow_ops.while_loop(
lambda t, _: t < iterations,
body, (t0, v0),
parallel_iterations=1,
back_prop=False,
shape_invariants=(tensor_shape.TensorShape(()),
tensor_shape.TensorShape(None)))
return [final]
return _timeit
def _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(x_ind, x_val, x_shape,
y, adjoint_a,
adjoint_b):
sp_x = sparse_tensor.SparseTensor(
indices=x_ind, values=x_val, dense_shape=x_shape)
def body(t, prev):
with ops.control_dependencies([prev]):
return (t + 1, sparse_ops.sparse_tensor_dense_matmul(
sp_x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b))
t0 = constant_op.constant(0)
v0 = constant_op.constant(0.0)
def _timeit(iterations, _):
(_, final) = control_flow_ops.while_loop(
lambda t, _: t < iterations,
body, (t0, v0),
parallel_iterations=1,
back_prop=False,
shape_invariants=(tensor_shape.TensorShape(()),
tensor_shape.TensorShape(None)))
return [final]
return _timeit
def sparse_tensor_dense_vs_dense_matmul_benchmark(thresh,
m,
k,
n,
adjoint_a,
adjoint_b,
use_gpu,
skip_dense=False):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Configurable for benchmarking:
# config.intra_op_parallelism_threads = 100
# config.gpu_options.per_process_gpu_memory_fraction = 0.3
np.random.seed([6, 117]) # Reproducibility
x = np.random.rand(m, k).astype(np.float32)
x[x < thresh] = 0
y = np.random.randn(k, n).astype(np.float32)
if adjoint_a:
x = x.T
if adjoint_b:
y = y.T
def _timer(sess, ops_fn, iterations):
# Warm in
sess.run(ops_fn(10, sess))
# Timing run
start = time.time()
sess.run(ops_fn(iterations, sess))
end = time.time()
return (end - start) / (1.0 * iterations) # Average runtime per iteration
# Using regular matmul, marking one of the matrices as dense.
if skip_dense:
delta_dense = float("nan")
else:
with session.Session(config=config, graph=ops.Graph()) as sess:
if not use_gpu:
with ops.device("/cpu:0"):
x_t = constant_op.constant(x)
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(
x_t, y_t, adjoint_a, adjoint_b)
else:
with ops.device("/device:GPU:0"):
x_t = constant_op.constant(x)
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(
x_t, y_t, adjoint_a, adjoint_b)
delta_dense = _timer(sess, ops_fn, 200)
# Using sparse_tensor_dense_matmul.
with session.Session("", config=config, graph=ops.Graph()) as sess:
if not use_gpu:
with ops.device("/cpu:0"):
x_ind = constant_op.constant(np.vstack(np.where(x)).astype(np.int64).T)
x_val = constant_op.constant(x[np.where(x)])
x_shape = constant_op.constant(np.array(x.shape).astype(np.int64))
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(
x_ind, x_val, x_shape, y_t, adjoint_a, adjoint_b)
else:
with ops.device("/device:GPU:0"):
x_ind = constant_op.constant(np.vstack(np.where(x)).astype(np.int64).T)
x_val = constant_op.constant(x[np.where(x)])
x_shape = constant_op.constant(np.array(x.shape).astype(np.int64))
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(
x_ind, x_val, x_shape, y_t, adjoint_a, adjoint_b)
delta_sparse = _timer(sess, ops_fn, 200)
print("%g \t %d \t %s \t %d \t %d \t %g \t %g \t %g" %
(1 - thresh, n, use_gpu, m, k, delta_dense, delta_sparse,
delta_sparse / delta_dense))
def main(_):
print("DenseDense MatMul (w/ Sparse Flag) vs. SparseTensorDense MatMul")
print("Matrix sizes:")
print(" A sparse [m, k] with % nonzero values between 1% and 80%")
print(" B dense [k, n]")
print("")
print("% nnz \t n \t gpu \t m \t k \t dt(dense) \t dt(sparse) "
"\t dt(sparse)/dt(dense)")
for thresh in (0.99, 0.8, 0.5, 0.2):
for n in (50, 100):
for use_gpu in (True, False):
for m in (100, 1000):
for k in (100, 1000):
sparse_tensor_dense_vs_dense_matmul_benchmark(
thresh, m, k, n, False, False, use_gpu=use_gpu)
# Enable for large scale benchmarks, these ones take a long time to run.
#
# for use_gpu in (True, False):
# sparse_tensor_dense_vs_dense_matmul_benchmark(
# thresh=0.99, m=1000000, k=1000, n=100, adjoint_a=False,
# adjoint_b=False, use_gpu=use_gpu, skip_dense=True)
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
app.run()
else:
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.logging_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class StringFormatOpTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testFormatOneTensorOneDim(self):
with self.cached_session():
tensor = math_ops.range(10)
format_output = string_ops.string_format("{}", tensor)
out = self.evaluate(format_output)
expected = "[0 1 2 ... 7 8 9]"
self.assertEqual(compat.as_text(out), expected)
with self.cached_session():
tensor = math_ops.range(10)
format_output = string_ops.string_format("{}", [tensor])
out = self.evaluate(format_output)
expected = "[0 1 2 ... 7 8 9]"
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatOneVariableScalar(self):
with self.cached_session():
var = variables.Variable(3.34)
format_output = string_ops.string_format("{}", [var])
if not context.executing_eagerly():
variables.global_variables_initializer().run()
out = self.evaluate(format_output)
expected = "3.34"
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatOneVariableOneDim(self):
with self.cached_session():
var = variables.Variable(math_ops.range(10))
format_output = string_ops.string_format("{}", [var])
if not context.executing_eagerly():
variables.global_variables_initializer().run()
out = self.evaluate(format_output)
expected = "[0 1 2 ... 7 8 9]"
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatTwoVariablesWithAssignAdd(self):
with self.cached_session():
var_one = variables.Variable(2.14)
plus_one = var_one.assign_add(1.0)
var_two = variables.Variable(math_ops.range(10))
format_output = string_ops.string_format("{}, {}", [var_one, var_two])
if not context.executing_eagerly():
variables.global_variables_initializer().run()
self.evaluate(plus_one)
out = self.evaluate(format_output)
expected = "3.14, [0 1 2 ... 7 8 9]"
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatOneTensorOneDimFloat(self):
with self.cached_session():
tensor = constant_op.constant([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
format_output = string_ops.string_format("{}", tensor)
out = self.evaluate(format_output)
expected = "[0 0.1 0.2 ... 0.5 0.6 0.7]"
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatOneTensorOneDimMatchesSummarize(self):
with self.cached_session():
tensor = math_ops.range(6)
format_output = string_ops.string_format("{}", tensor, summarize=3)
out = self.evaluate(format_output)
expected = "[0 1 2 3 4 5]"
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatOneTensorOneDimVarySummarize(self):
with self.cached_session():
tensor = math_ops.range(6)
format_output = string_ops.string_format("{}", tensor, summarize=-1)
out = self.evaluate(format_output)
expected = "[0 1 2 3 4 5]"
self.assertEqual(compat.as_text(out), expected)
with self.cached_session():
tensor = math_ops.range(6)
format_output = string_ops.string_format("{}", tensor, summarize=1)
out = self.evaluate(format_output)
expected = "[0 ... 5]"
self.assertEqual(compat.as_text(out), expected)
with self.cached_session():
tensor = math_ops.range(6)
format_output = string_ops.string_format("{}", tensor, summarize=2)
out = self.evaluate(format_output)
expected = "[0 1 ... 4 5]"
self.assertEqual(compat.as_text(out), expected)
with self.cached_session():
tensor = math_ops.range(6)
format_output = string_ops.string_format("{}", tensor, summarize=10)
out = self.evaluate(format_output)
expected = "[0 1 2 3 4 5]"
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatOneTensorOneDimAlmostSummarize(self):
with self.cached_session():
tensor = math_ops.range(5)
format_output = string_ops.string_format("{}", tensor, summarize=3)
out = self.evaluate(format_output)
expected = "[0 1 2 3 4]"
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatOneTensorTwoDimLessThanSummarize(self):
with self.cached_session():
tensor = array_ops.reshape(math_ops.range(4), [2, 2])
format_output = string_ops.string_format("{}", tensor, summarize=3)
out = self.evaluate(format_output)
expected = ("[[0 1]\n"
" [2 3]]")
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatOneTensorTwoDim(self):
with self.cached_session():
tensor = array_ops.reshape(math_ops.range(100), [10, 10])
format_output = string_ops.string_format("{}", tensor)
out = self.evaluate(format_output)
expected = ("[[0 1 2 ... 7 8 9]\n"
" [10 11 12 ... 17 18 19]\n"
" [20 21 22 ... 27 28 29]\n"
" ...\n"
" [70 71 72 ... 77 78 79]\n"
" [80 81 82 ... 87 88 89]\n"
" [90 91 92 ... 97 98 99]]")
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatOneTensorTwoDimSummarizeTwo(self):
with self.cached_session():
tensor = array_ops.reshape(math_ops.range(100), [10, 10])
format_output = string_ops.string_format("{}", tensor, summarize=2)
out = self.evaluate(format_output)
expected = ("[[0 1 ... 8 9]\n"
" [10 11 ... 18 19]\n"
" ...\n"
" [80 81 ... 88 89]\n"
" [90 91 ... 98 99]]")
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatOneTensorThreeDim(self):
with self.cached_session():
tensor = array_ops.reshape(math_ops.range(1000), [10, 10, 10])
format_output = string_ops.string_format("{}", tensor)
out = self.evaluate(format_output)
expected = ("[[[0 1 2 ... 7 8 9]\n"
" [10 11 12 ... 17 18 19]\n"
" [20 21 22 ... 27 28 29]\n"
" ...\n"
" [70 71 72 ... 77 78 79]\n"
" [80 81 82 ... 87 88 89]\n"
" [90 91 92 ... 97 98 99]]\n"
"\n"
" [[100 101 102 ... 107 108 109]\n"
" [110 111 112 ... 117 118 119]\n"
" [120 121 122 ... 127 128 129]\n"
" ...\n [170 171 172 ... 177 178 179]\n"
" [180 181 182 ... 187 188 189]\n"
" [190 191 192 ... 197 198 199]]\n"
"\n"
" [[200 201 202 ... 207 208 209]\n"
" [210 211 212 ... 217 218 219]\n"
" [220 221 222 ... 227 228 229]\n"
" ...\n"
" [270 271 272 ... 277 278 279]\n"
" [280 281 282 ... 287 288 289]\n"
" [290 291 292 ... 297 298 299]]\n"
"\n"
" ...\n"
"\n"
" [[700 701 702 ... 707 708 709]\n"
" [710 711 712 ... 717 718 719]\n"
" [720 721 722 ... 727 728 729]\n"
" ...\n"
" [770 771 772 ... 777 778 779]\n"
" [780 781 782 ... 787 788 789]\n"
" [790 791 792 ... 797 798 799]]\n"
"\n"
" [[800 801 802 ... 807 808 809]\n"
" [810 811 812 ... 817 818 819]\n"
" [820 821 822 ... 827 828 829]\n"
" ...\n"
" [870 871 872 ... 877 878 879]\n"
" [880 881 882 ... 887 888 889]\n"
" [890 891 892 ... 897 898 899]]\n"
"\n"
" [[900 901 902 ... 907 908 909]\n"
" [910 911 912 ... 917 918 919]\n"
" [920 921 922 ... 927 928 929]\n"
" ...\n"
" [970 971 972 ... 977 978 979]\n"
" [980 981 982 ... 987 988 989]\n"
" [990 991 992 ... 997 998 999]]]")
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatOneTensorTemplatePrefix(self):
with self.cached_session():
tensor = array_ops.reshape(math_ops.range(100), [10, 10])
format_output = string_ops.string_format("tensor summary: {}", tensor)
out = self.evaluate(format_output)
expected = ("tensor summary: [[0 1 2 ... 7 8 9]\n"
" [10 11 12 ... 17 18 19]\n"
" [20 21 22 ... 27 28 29]\n"
" ...\n"
" [70 71 72 ... 77 78 79]\n"
" [80 81 82 ... 87 88 89]\n"
" [90 91 92 ... 97 98 99]]")
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatOneTensorTemplatePrefixAndSuffix(self):
with self.cached_session():
tensor = array_ops.reshape(math_ops.range(100), [10, 10])
format_output = string_ops.string_format("tensor summary: {}, suffix",
tensor)
out = self.evaluate(format_output)
expected = ("tensor summary: [[0 1 2 ... 7 8 9]\n"
" [10 11 12 ... 17 18 19]\n"
" [20 21 22 ... 27 28 29]\n"
" ...\n"
" [70 71 72 ... 77 78 79]\n"
" [80 81 82 ... 87 88 89]\n"
" [90 91 92 ... 97 98 99]], suffix")
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatOneTensorTemplateSuffix(self):
with self.cached_session():
tensor = array_ops.reshape(math_ops.range(100), [10, 10])
format_output = string_ops.string_format("{}, suffix", tensor)
out = self.evaluate(format_output)
expected = ("[[0 1 2 ... 7 8 9]\n"
" [10 11 12 ... 17 18 19]\n"
" [20 21 22 ... 27 28 29]\n"
" ...\n"
" [70 71 72 ... 77 78 79]\n"
" [80 81 82 ... 87 88 89]\n"
" [90 91 92 ... 97 98 99]], suffix")
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatNoTensor(self):
with self.cached_session():
format_output = string_ops.string_format("No tensor.", ())
out = self.evaluate(format_output)
expected = "No tensor."
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatMultiTensor(self):
with self.cached_session():
tensor_one = array_ops.reshape(math_ops.range(100), [10, 10])
tensor_two = tensor_one * 10
format_output = string_ops.string_format("One: {},\nTwo: {}",
(tensor_one, tensor_two))
out = self.evaluate(format_output)
expected = ("One: [[0 1 2 ... 7 8 9]\n"
" [10 11 12 ... 17 18 19]\n"
" [20 21 22 ... 27 28 29]\n"
" ...\n"
" [70 71 72 ... 77 78 79]\n"
" [80 81 82 ... 87 88 89]\n"
" [90 91 92 ... 97 98 99]],\n"
"Two: [[0 10 20 ... 70 80 90]\n"
" [100 110 120 ... 170 180 190]\n"
" [200 210 220 ... 270 280 290]\n"
" ...\n"
" [700 710 720 ... 770 780 790]\n"
" [800 810 820 ... 870 880 890]\n"
" [900 910 920 ... 970 980 990]]")
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatSummarizeOne(self):
with self.cached_session():
tensor = array_ops.reshape(math_ops.range(100), [10, 10])
format_output = string_ops.string_format("tensor summary: {}", tensor,
summarize=1)
out = self.evaluate(format_output)
expected = ("tensor summary: [[0 ... 9]\n"
" ...\n"
" [90 ... 99]]")
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatSummarizeTwo(self):
with self.cached_session():
tensor = array_ops.reshape(math_ops.range(100), [10, 10])
format_output = string_ops.string_format("tensor summary: {}", tensor,
summarize=2)
out = self.evaluate(format_output)
expected = ("tensor summary: [[0 1 ... 8 9]\n"
" [10 11 ... 18 19]\n"
" ...\n"
" [80 81 ... 88 89]\n"
" [90 91 ... 98 99]]")
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testFormatPlaceholder(self):
with self.cached_session():
tensor = array_ops.reshape(math_ops.range(100), [10, 10])
format_output = string_ops.string_format("tensor summary: %t%", tensor,
placeholder="%t%")
out = self.evaluate(format_output)
expected = ("tensor summary: [[0 1 2 ... 7 8 9]\n"
" [10 11 12 ... 17 18 19]\n"
" [20 21 22 ... 27 28 29]\n"
" ...\n"
" [70 71 72 ... 77 78 79]\n"
" [80 81 82 ... 87 88 89]\n"
" [90 91 92 ... 97 98 99]]")
self.assertEqual(compat.as_text(out), expected)
@test_util.run_in_graph_and_eager_modes()
def testTensorCountMustMatchPlaceholderCount(self):
with self.cached_session():
with self.assertRaisesRegexp(
ValueError, r"2 placeholder\(s\) in template does not match 1 "
r"tensor\(s\) provided as input"):
tensor = math_ops.range(10)
format_output = string_ops.string_format("{} {}", tensor)
self.evaluate(format_output)
with self.cached_session():
with self.assertRaisesRegexp(
ValueError, r"2 placeholder\(s\) in template does not match 1 "
r"tensor\(s\) provided as input"):
tensor = math_ops.range(10)
format_output = string_ops.string_format("{} {}", [tensor])
self.evaluate(format_output)
with self.cached_session():
with self.assertRaisesRegexp(
ValueError, r"1 placeholder\(s\) in template does not match 2 "
r"tensor\(s\) provided as input"):
tensor = math_ops.range(10)
format_output = string_ops.string_format("{}", (tensor, tensor))
self.evaluate(format_output)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/string_format_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.logging_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import string
import sys
import tempfile
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class LoggingOpsTest(test.TestCase):
@test_util.run_deprecated_v1
def testAssertDivideByZero(self):
with self.cached_session() as sess:
epsilon = ops.convert_to_tensor(1e-20)
x = ops.convert_to_tensor(0.0)
y = ops.convert_to_tensor(1.0)
z = ops.convert_to_tensor(2.0)
# assert(epsilon < y)
# z / y
with sess.graph.control_dependencies([
control_flow_ops.Assert(
math_ops.less(epsilon, y), ["Divide-by-zero"])
]):
out = math_ops.div(z, y)
self.assertAllEqual(2.0, self.evaluate(out))
# assert(epsilon < x)
# z / x
#
# This tests printing out multiple tensors
with sess.graph.control_dependencies([
control_flow_ops.Assert(
math_ops.less(epsilon, x), ["Divide-by-zero", "less than x"])
]):
out = math_ops.div(z, x)
with self.assertRaisesOpError("less than x"):
self.evaluate(out)
class PrintV2Test(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testPrintOneTensor(self):
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]"
self.assertTrue((expected + "\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintOneStringTensor(self):
with self.cached_session():
tensor = ops.convert_to_tensor([char for char in string.ascii_lowercase])
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor)
self.evaluate(print_op)
expected = "[\"a\" \"b\" \"c\" ... \"x\" \"y\" \"z\"]"
self.assertIn((expected + "\n"), printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintOneTensorVarySummarize(self):
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, summarize=1)
self.evaluate(print_op)
expected = "[0 ... 9]"
self.assertTrue((expected + "\n") in printed.contents())
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, summarize=2)
self.evaluate(print_op)
expected = "[0 1 ... 8 9]"
self.assertTrue((expected + "\n") in printed.contents())
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, summarize=3)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]"
self.assertTrue((expected + "\n") in printed.contents())
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, summarize=-1)
self.evaluate(print_op)
expected = "[0 1 2 3 4 5 6 7 8 9]"
self.assertTrue((expected + "\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintOneVariable(self):
with self.cached_session():
var = variables.Variable(math_ops.range(10))
if not context.executing_eagerly():
variables.global_variables_initializer().run()
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(var)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]"
self.assertTrue((expected + "\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintTwoVariablesInStructWithAssignAdd(self):
with self.cached_session():
var_one = variables.Variable(2.14)
plus_one = var_one.assign_add(1.0)
var_two = variables.Variable(math_ops.range(10))
if not context.executing_eagerly():
variables.global_variables_initializer().run()
with self.captureWritesToStream(sys.stderr) as printed:
self.evaluate(plus_one)
print_op = logging_ops.print_v2(var_one, {"second": var_two})
self.evaluate(print_op)
expected = "3.14 {'second': [0 1 2 ... 7 8 9]}"
self.assertTrue((expected + "\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintTwoTensors(self):
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, tensor * 10)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9] [0 10 20 ... 70 80 90]"
self.assertTrue((expected + "\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintTwoTensorsDifferentSep(self):
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, tensor * 10, sep="<separator>")
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]<separator>[0 10 20 ... 70 80 90]"
self.assertIn(expected + "\n", printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintPlaceholderGeneration(self):
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2("{}6", {"{}": tensor * 10})
self.evaluate(print_op)
expected = "{}6 {'{}': [0 10 20 ... 70 80 90]}"
self.assertTrue((expected + "\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintNoTensors(self):
with self.cached_session():
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(23, [23, 5], {"6": 12})
self.evaluate(print_op)
expected = "23 [23, 5] {'6': 12}"
self.assertTrue((expected + "\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintFloatScalar(self):
with self.cached_session():
tensor = ops.convert_to_tensor(434.43)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor)
self.evaluate(print_op)
expected = "434.43"
self.assertTrue((expected + "\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintStringScalar(self):
with self.cached_session():
tensor = ops.convert_to_tensor("scalar")
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor)
self.evaluate(print_op)
expected = "scalar"
self.assertTrue((expected + "\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintStringScalarDifferentEnd(self):
with self.cached_session():
tensor = ops.convert_to_tensor("scalar")
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, end="<customend>")
self.evaluate(print_op)
expected = "scalar<customend>"
self.assertIn(expected, printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintComplexTensorStruct(self):
with self.cached_session():
tensor = math_ops.range(10)
small_tensor = constant_op.constant([0.3, 12.4, -16.1])
big_tensor = math_ops.mul(tensor, 10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(
"first:", tensor, "middle:",
{"small": small_tensor, "Big": big_tensor}, 10,
[tensor * 2, tensor])
self.evaluate(print_op)
# Note that the keys in the dict will always be sorted,
# so 'Big' comes before 'small'
expected = ("first: [0 1 2 ... 7 8 9] "
"middle: {'Big': [0 10 20 ... 70 80 90], "
"'small': [0.3 12.4 -16.1]} "
"10 [[0 2 4 ... 14 16 18], [0 1 2 ... 7 8 9]]")
self.assertTrue((expected + "\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintSparseTensor(self):
with self.cached_session():
ind = [[0, 0], [1, 0], [1, 3], [4, 1], [1, 4], [3, 2], [3, 3]]
val = [0, 10, 13, 4, 14, 32, 33]
shape = [5, 6]
sparse = sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(sparse)
self.evaluate(print_op)
expected = ("'SparseTensor(indices=[[0 0]\n"
" [1 0]\n"
" [1 3]\n"
" ...\n"
" [1 4]\n"
" [3 2]\n"
" [3 3]], values=[0 10 13 ... 14 32 33], shape=[5 6])'")
self.assertTrue((expected + "\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintSparseTensorInDataStruct(self):
with self.cached_session():
ind = [[0, 0], [1, 0], [1, 3], [4, 1], [1, 4], [3, 2], [3, 3]]
val = [0, 10, 13, 4, 14, 32, 33]
shape = [5, 6]
sparse = sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2([sparse])
self.evaluate(print_op)
expected = ("['SparseTensor(indices=[[0 0]\n"
" [1 0]\n"
" [1 3]\n"
" ...\n"
" [1 4]\n"
" [3 2]\n"
" [3 3]], values=[0 10 13 ... 14 32 33], shape=[5 6])']")
self.assertTrue((expected + "\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintOneTensorStdout(self):
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stdout) as printed:
print_op = logging_ops.print_v2(
tensor, output_stream=sys.stdout)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]"
self.assertTrue((expected + "\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintTensorsToFile(self):
tmpfile_name = tempfile.mktemp(".printv2_test")
tensor_0 = math_ops.range(0, 10)
print_op_0 = logging_ops.print_v2(tensor_0,
output_stream="file://"+tmpfile_name)
self.evaluate(print_op_0)
tensor_1 = math_ops.range(11, 20)
print_op_1 = logging_ops.print_v2(tensor_1,
output_stream="file://"+tmpfile_name)
self.evaluate(print_op_1)
try:
f = open(tmpfile_name, "r")
line_0 = f.readline()
expected_0 = "[0 1 2 ... 7 8 9]"
self.assertTrue(expected_0 in line_0)
line_1 = f.readline()
expected_1 = "[11 12 13 ... 17 18 19]"
self.assertTrue(expected_1 in line_1)
f.close()
os.remove(tmpfile_name)
except IOError as e:
self.fail(e)
@test_util.run_in_graph_and_eager_modes()
def testInvalidOutputStreamRaisesError(self):
with self.cached_session():
tensor = math_ops.range(10)
with self.assertRaises(ValueError):
print_op = logging_ops.print_v2(
tensor, output_stream="unknown")
self.evaluate(print_op)
@test_util.run_deprecated_v1
def testPrintOpName(self):
with self.cached_session():
tensor = math_ops.range(10)
print_op = logging_ops.print_v2(tensor, name="print_name")
self.assertEqual(print_op.name, "print_name")
@test_util.run_deprecated_v1
def testNoDuplicateFormatOpGraphModeAfterExplicitFormat(self):
with self.cached_session():
tensor = math_ops.range(10)
formatted_string = string_ops.string_format("{}", tensor)
print_op = logging_ops.print_v2(formatted_string)
self.evaluate(print_op)
graph_ops = ops.get_default_graph().get_operations()
format_ops = [op for op in graph_ops if op.type == "StringFormat"]
# Should be only 1 format_op for graph mode.
self.assertEqual(len(format_ops), 1)
def testPrintOneTensorEagerOnOpCreate(self):
with self.cached_session():
with context.eager_mode():
tensor = math_ops.range(10)
expected = "[0 1 2 ... 7 8 9]"
with self.captureWritesToStream(sys.stderr) as printed:
logging_ops.print_v2(tensor)
self.assertTrue((expected + "\n") in printed.contents())
def testPrintsOrderedInDefun(self):
with context.eager_mode():
@function.defun
def prints():
logging_ops.print_v2("A")
logging_ops.print_v2("B")
logging_ops.print_v2("C")
with self.captureWritesToStream(sys.stderr) as printed:
prints()
self.assertTrue(("A\nB\nC\n") in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintInDefunWithoutExplicitEvalOfPrint(self):
@function.defun
def f():
tensor = math_ops.range(10)
logging_ops.print_v2(tensor)
return tensor
expected = "[0 1 2 ... 7 8 9]"
with self.captureWritesToStream(sys.stderr) as printed_one:
x = f()
self.evaluate(x)
self.assertTrue((expected + "\n") in printed_one.contents())
# We execute the function again to make sure it doesn't only print on the
# first call.
with self.captureWritesToStream(sys.stderr) as printed_two:
y = f()
self.evaluate(y)
self.assertTrue((expected + "\n") in printed_two.contents())
class PrintGradientTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testPrintShape(self):
inp = constant_op.constant(2.0, shape=[100, 32])
inp_printed = logging_ops.Print(inp, [inp])
self.assertEqual(inp.get_shape(), inp_printed.get_shape())
def testPrintString(self):
inp = constant_op.constant(2.0, shape=[100, 32])
inp_printed = logging_ops.Print(inp, ["hello"])
self.assertEqual(inp.get_shape(), inp_printed.get_shape())
@test_util.run_deprecated_v1
def testPrintGradient(self):
with self.cached_session():
inp = constant_op.constant(2.0, shape=[100, 32], name="in")
w = constant_op.constant(4.0, shape=[10, 100], name="w")
wx = math_ops.matmul(w, inp, name="wx")
wx_print = logging_ops.Print(wx, [w, w, w])
wx_grad = gradients_impl.gradients(wx, w)[0]
wx_print_grad = gradients_impl.gradients(wx_print, w)[0]
wxg = self.evaluate(wx_grad)
wxpg = self.evaluate(wx_print_grad)
self.assertAllEqual(wxg, wxpg)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/logging_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv2DTransposeTest(test.TestCase):
def testConv2DTransposeSingleStride(self):
with self.cached_session():
strides = [1, 1, 1, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 6, 4, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
# We count the number of cells being added at the locations in the output.
# At the center, #cells=kernel_height * kernel_width
# At the corners, #cells=ceil(kernel_height/2) * ceil(kernel_width/2)
# At the borders, #cells=ceil(kernel_height/2)*kernel_width or
# kernel_height * ceil(kernel_width/2)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[2]):
for h in xrange(y_shape[1]):
target = 4 * 3.0
h_in = h > 0 and h < y_shape[1] - 1
w_in = w > 0 and w < y_shape[2] - 1
if h_in and w_in:
target += 5 * 3.0
elif h_in or w_in:
target += 2 * 3.0
self.assertAllClose(target, value[n, h, w, k])
def testConv2DTransposeSame(self):
with self.cached_session():
strides = [1, 2, 2, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 12, 8, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[2]):
for h in xrange(y_shape[1]):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[1] == 0 and h > 0 and h < y_shape[1] - 1
w_in = w % strides[2] == 0 and w > 0 and w < y_shape[2] - 1
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
self.assertAllClose(target, value[n, h, w, k])
def testConv2DTransposeValid(self):
with self.cached_session():
strides = [1, 2, 2, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 13, 9, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(pad, y_shape[2] - pad):
for h in xrange(pad, y_shape[1] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[1] == 0 and h > pad and h < y_shape[
1] - 1 - pad
w_in = w % strides[2] == 0 and w > pad and w < y_shape[
2] - 1 - pad
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
cache_values[n, h, w, k] = target
# copy values in the border
cache_values[n, :, 0, k] = cache_values[n, :, 1, k]
cache_values[n, :, -1, k] = cache_values[n, :, -2, k]
cache_values[n, 0, :, k] = cache_values[n, 1, :, k]
cache_values[n, -1, :, k] = cache_values[n, -2, :, k]
self.assertAllClose(cache_values, value)
@test_util.run_deprecated_v1
def testGradient(self):
x_shape = [2, 6, 4, 3]
f_shape = [3, 3, 2, 3]
y_shape = [2, 12, 8, 2]
strides = [1, 2, 2, 1]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.cached_session():
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
print("conv2d_transpose gradient err = %g " % err)
err_tolerance = 0.0005
self.assertLess(err, err_tolerance)
def testConv2DTransposeSingleStrideNCHW(self):
# `NCHW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
strides = [1, 1, 1, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 6, 4]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
target = 4 * 3.0
h_in = h > 0 and h < y_shape[2] - 1
w_in = w > 0 and w < y_shape[3] - 1
if h_in and w_in:
target += 5 * 3.0
elif h_in or w_in:
target += 2 * 3.0
self.assertAllClose(target, value[n, k, h, w])
def testConv2DTransposeSameNCHW(self):
# `NCHW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
strides = [1, 1, 2, 2]
# Input, output: [batch, depth, height, width]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 12, 8]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[2] == 0 and h > 0 and h < y_shape[2] - 1
w_in = w % strides[3] == 0 and w > 0 and w < y_shape[3] - 1
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
self.assertAllClose(target, value[n, k, h, w])
def testConv2DTransposeValidNCHW(self):
# `NCHW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
strides = [1, 1, 2, 2]
# Input, output: [batch, depth, height, width]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 13, 9]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="VALID", data_format="NCHW")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(pad, y_shape[3] - pad):
for h in xrange(pad, y_shape[2] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[2] == 0 and h > pad and h < y_shape[
2] - 1 - pad
w_in = w % strides[3] == 0 and w > pad and w < y_shape[
3] - 1 - pad
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
cache_values[n, k, h, w] = target
# copy values in the border
cache_values[n, k, :, 0] = cache_values[n, k, :, 1]
cache_values[n, k, :, -1] = cache_values[n, k, :, -2]
cache_values[n, k, 0, :] = cache_values[n, k, 1, :]
cache_values[n, k, -1, :] = cache_values[n, k, -2, :]
self.assertAllClose(cache_values, value)
def testConv2DTransposeShapeInference(self):
# Test case for 8972
initializer = random_ops.truncated_normal(
[3, 3, 5, 1], mean=0.0, stddev=0.01, dtype=dtypes.float32)
x = variables.Variable(random_ops.random_normal([3, 10, 5, 1]))
f = variable_scope.get_variable("f", initializer=initializer)
f_shape = array_ops.stack([array_ops.shape(x)[0], 10, 5, 5])
output = nn_ops.conv2d_transpose(
x, f, f_shape, strides=[1, 1, 1, 1], padding="SAME")
self.assertEqual(output.get_shape().as_list(), [3, 10, 5, 5])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/conv2d_transpose_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF_CUDNN_DETERMINISTIC=true."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
# The TF_CUDNN_DETERMINISTIC flag disables autotuning of cuDNN algorithms and
# causes deterministic cuDNN algorithms to be selected when both deterministic
# and non-deterministic algorithms are available. These tests are intended to
# confirm that deterministic algorithms are chosen when
# TF_CUDNN_DETERMINISTIC=true. The configurations tested were confirmed to
# produce non-deterministic results without setting TF_CUDNN_DETERMINISTIC=true
_PADDING = 'SAME'
_STRIDES = [1, 1, 1, 1]
LayerShape = collections.namedtuple('LayerShape',
'batch, height, width, channels')
FilterShape = collections.namedtuple(
'FilterShape', 'height, width, in_channels, out_channels')
class ConvolutionTest(test.TestCase):
def _random_data_op(self, shape):
# np.random.random_sample can properly interpret either tf.TensorShape or
# namedtuple as a list.
return constant_op.constant(
2 * np.random.random_sample(shape) - 1, dtype=dtypes.float32)
def _random_out_op(self, in_shape, filter_shape):
# Choosing not to use array_op.zeros() to prevent possible removal by
# optimization
in_op = self._random_data_op(in_shape)
filter_op = self._random_data_op(filter_shape)
# Use the forward op's shape-inference
conv_op = nn_ops.conv2d(
in_op, filter_op, strides=_STRIDES, padding=_PADDING)
out_shape = conv_op.get_shape()
out_op = self._random_data_op(out_shape)
return out_op
def _assert_reproducible(self, operation):
with self.cached_session(force_gpu=True):
result_1 = self.evaluate(operation)
result_2 = self.evaluate(operation)
self.assertAllEqual(result_1, result_2)
@test_util.run_cuda_only
def testBackwardFilterGradient(self):
np.random.seed(1)
in_shape = LayerShape(batch=8, height=128, width=128, channels=8)
filter_shape = FilterShape(height=3, width=3, in_channels=8, out_channels=8)
in_op = self._random_data_op(in_shape)
out_op = self._random_out_op(in_shape, filter_shape)
filter_gradient_op = nn_ops.conv2d_backprop_filter(
in_op, filter_shape, out_op, strides=_STRIDES, padding=_PADDING)
self._assert_reproducible(filter_gradient_op)
@test_util.run_cuda_only
def testBackwardInputGradient(self):
np.random.seed(2)
in_shape = LayerShape(batch=8, height=32, width=32, channels=8)
filter_shape = FilterShape(
height=7, width=7, in_channels=8, out_channels=128)
filter_op = self._random_data_op(filter_shape)
out_op = self._random_out_op(in_shape, filter_shape)
input_gradient_op = nn_ops.conv2d_backprop_input(
in_shape, filter_op, out_op, strides=_STRIDES, padding=_PADDING)
self._assert_reproducible(input_gradient_op)
# TODO(duncanriach): (1) add test to confirm that forward autotuning is
# disabled for cuDNN convolution; (2) add test for deterministic cuDNN
# max-pooling
if __name__ == '__main__':
os.environ['TF_CUDNN_DETERMINISTIC'] = 'true'
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/cudnn_determinism_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeJpegOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
prefix_path = 'third_party/tensorflow/core/lib/jpeg/testdata'
class DecodeJpegBenchmark(test.Benchmark):
"""Evaluate tensorflow DecodeJpegOp performance."""
def _evalDecodeJpeg(self,
image_name,
parallelism,
num_iters,
crop_during_decode=None,
crop_window=None,
tile=None):
"""Evaluate DecodeJpegOp for the given image.
TODO(tanmingxing): add decoding+cropping as well.
Args:
image_name: a string of image file name (without suffix).
parallelism: the number of concurrent decode_jpeg ops to be run.
num_iters: number of iterations for evaluation.
crop_during_decode: If true, use fused DecodeAndCropJpeg instead of
separate decode and crop ops. It is ignored if crop_window is None.
crop_window: if not None, crop the decoded image. Depending on
crop_during_decode, cropping could happen during or after decoding.
tile: if not None, tile the image to composite a larger fake image.
Returns:
The duration of the run in seconds.
"""
ops.reset_default_graph()
image_file_path = os.path.join(prefix_path, image_name)
if tile is None:
image_content = variable_scope.get_variable(
'image_%s' % image_name,
initializer=io_ops.read_file(image_file_path))
else:
single_image = image_ops.decode_jpeg(
io_ops.read_file(image_file_path), channels=3, name='single_image')
# Tile the image to composite a new larger image.
tiled_image = array_ops.tile(single_image, tile)
image_content = variable_scope.get_variable(
'tiled_image_%s' % image_name,
initializer=image_ops.encode_jpeg(tiled_image))
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
images = []
for _ in xrange(parallelism):
if crop_window is None:
# No crop.
image = image_ops.decode_jpeg(image_content, channels=3)
elif crop_during_decode:
# combined decode and crop.
image = image_ops.decode_and_crop_jpeg(
image_content, crop_window, channels=3)
else:
# separate decode and crop.
image = image_ops.decode_jpeg(image_content, channels=3)
image = image_ops.crop_to_bounding_box(
image,
offset_height=crop_window[0],
offset_width=crop_window[1],
target_height=crop_window[2],
target_width=crop_window[3])
images.append(image)
r = control_flow_ops.group(*images)
for _ in xrange(3):
# Skip warm up time.
self.evaluate(r)
start_time = time.time()
for _ in xrange(num_iters):
self.evaluate(r)
end_time = time.time()
return end_time - start_time
def benchmarkDecodeJpegSmall(self):
"""Evaluate single DecodeImageOp for small size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
for parallelism in [1, 100]:
duration_decode = self._evalDecodeJpeg('small.jpg', parallelism,
num_iters)
duration_decode_crop = self._evalDecodeJpeg('small.jpg', parallelism,
num_iters, False, crop_window)
duration_decode_after_crop = self._evalDecodeJpeg(
'small.jpg', parallelism, num_iters, True, crop_window)
self.report_benchmark(
name='decode_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
def benchmarkDecodeJpegMedium(self):
"""Evaluate single DecodeImageOp for medium size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
for parallelism in [1, 100]:
duration_decode = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters)
duration_decode_crop = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters, False, crop_window)
duration_decode_after_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, True, crop_window)
self.report_benchmark(
name='decode_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
def benchmarkDecodeJpegLarge(self):
"""Evaluate single DecodeImageOp for large size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
tile = [4, 4, 1]
for parallelism in [1, 100]:
# Tile the medium size image to composite a larger fake image.
duration_decode = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters, tile)
duration_decode_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, False, crop_window, tile)
duration_decode_after_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, True, crop_window, tile)
self.report_benchmark(
name='decode_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/decode_jpeg_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.cast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
import platform
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class CastOpTest(test.TestCase):
def _toDataType(self, dtype):
"""Returns TensorFlow data type for numpy type."""
if dtype == np.float32:
return dtypes.float32
elif dtype == np.float64:
return dtypes.float64
elif dtype == np.int32:
return dtypes.int32
elif dtype == np.int64:
return dtypes.int64
elif dtype == np.bool:
return dtypes.bool
elif dtype == np.complex64:
return dtypes.complex64
elif dtype == np.complex128:
return dtypes.complex128
else:
return None
def _cast(self, x, dtype, use_gpu=False):
with self.cached_session(use_gpu=use_gpu):
val = constant_op.constant(x, self._toDataType(np.array([x]).dtype))
return math_ops.cast(val, self._toDataType(dtype), name="cast").eval()
def _test(self, x, dtype, use_gpu=False):
"""Tests cast(x) to dtype behaves the same as numpy.astype."""
np_ans = x.astype(dtype)
tf_ans = self._cast(x, dtype, use_gpu)
self.assertAllEqual(np_ans, tf_ans)
def _testTypes(self, x, use_gpu=False):
"""Tests cast(x) to different tf."""
if use_gpu:
type_list = [
np.float32, np.float64, np.int64, np.complex64, np.complex128
]
else:
type_list = [
np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]
for from_type in type_list:
for to_type in type_list:
self._test(x.astype(from_type), to_type, use_gpu)
self._test(x.astype(np.bool), np.float32, use_gpu)
self._test(x.astype(np.uint8), np.float32, use_gpu)
if not use_gpu:
self._test(x.astype(np.bool), np.int32, use_gpu)
self._test(x.astype(np.int32), np.int32, use_gpu)
def _testAll(self, x):
self._testTypes(x, use_gpu=False)
if x.dtype == np.float32 or x.dtype == np.float64:
self._testTypes(x, use_gpu=True)
@test_util.run_deprecated_v1
def testBasic(self):
self._testAll(np.arange(-10, 10).reshape(2, 10))
self._testAll(np.linspace(-10, 10, 17))
@test_util.run_deprecated_v1
def testSmallValues(self):
f4 = np.finfo(np.float32)
f8 = np.finfo(np.float64)
self._testAll(
np.array([
0, -1, 1, -f4.resolution, f4.resolution, f8.resolution,
-f8.resolution
]))
def testBfloat16(self):
a = np.random.uniform(-100, 100, 100).astype(np.float32)
with self.cached_session(use_gpu=False):
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
with self.cached_session(use_gpu=True):
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
@test_util.run_deprecated_v1
def testRandom(self):
self._testAll(np.random.normal(0, 10, 210).reshape([2, 3, 5, 7]))
self._testAll(np.random.normal(0, 1e6, 210).reshape([2, 3, 5, 7]))
# Special values like int32max, int64min, inf, -inf, nan casted to
# integer values in somewhat unexpected ways. And they behave
# differently on CPU and GPU.
def _compare(self, x, dst_dtype, expected, use_gpu=False):
np.testing.assert_equal(
self._cast(
x, dst_dtype, use_gpu=use_gpu), dst_dtype(expected))
@test_util.run_deprecated_v1
def testIntToFloatBoundary(self):
i4 = np.iinfo(np.int32)
i8 = np.iinfo(np.int64)
self._compare(i4.min, np.float32, i4.min, False)
self._compare(i4.max, np.float32, i4.max, False)
self._compare(i8.min, np.float32, i8.min, False)
self._compare(i8.max, np.float32, i8.max, False)
self._compare(i4.min, np.float64, i4.min, False)
self._compare(i4.max, np.float64, i4.max, False)
self._compare(i8.min, np.float64, i8.min, False)
self._compare(i8.max, np.float64, i8.max, False)
# NOTE: GPU does not support int32/int64 for casting.
@test_util.run_deprecated_v1
def testInfNan(self):
i4 = np.iinfo(np.int32)
i8 = np.iinfo(np.int64)
self._compare(np.inf, np.float32, np.inf, False)
self._compare(np.inf, np.float64, np.inf, False)
if sys.byteorder == "big":
self._compare(np.inf, np.int32, i4.max, False)
self._compare(np.inf, np.int64, i8.max, False)
else:
# np.float64("np.inf").astype(np.int32) is negative on x86 but positive on ppc64le
# Numpy link to relevant discussion - https://github.com/numpy/numpy/issues/9040
# Tensorflow link to relevant discussion - https://github.com/tensorflow/tensorflow/issues/9360
if platform.machine() == "ppc64le" or platform.machine() == "aarch64":
self._compare(-np.inf, np.int32, i4.min, False)
self._compare(-np.inf, np.int64, i8.min, False)
else:
self._compare(np.inf, np.int32, i4.min, False)
self._compare(np.inf, np.int64, i8.min, False)
self._compare(-np.inf, np.float32, -np.inf, False)
self._compare(-np.inf, np.float64, -np.inf, False)
self._compare(-np.inf, np.int32, i4.min, False)
self._compare(-np.inf, np.int64, i8.min, False)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, False)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, False)), True)
# np.float64(np.nan).astype(np.int32) is 0 on ARM
if platform.machine() == "aarch64":
self._compare(np.nan, np.int32, 0, False)
self._compare(np.nan, np.int64, 0, False)
else:
self._compare(np.nan, np.int32, i4.min, False)
self._compare(np.nan, np.int64, i8.min, False)
self._compare(np.inf, np.float32, np.inf, True)
self._compare(np.inf, np.float64, np.inf, True)
self._compare(-np.inf, np.float32, -np.inf, True)
self._compare(-np.inf, np.float64, -np.inf, True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, True)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, True)), True)
def _OpError(self, x, dtype, err):
with self.cached_session():
with self.assertRaisesOpError(err):
math_ops.cast(x, dtype).eval()
def testNotImplemented(self):
self._OpError(np.arange(0, 10), dtypes.string, "Cast.*int64.*string.*")
@test_util.run_deprecated_v1
def testCastToTypeOfVariable(self):
with self.cached_session() as sess:
x = variables.Variable(5, dtype=dtypes.float32)
y = variables.Variable(True, dtype=dtypes.bool)
cast = math_ops.cast(y, x.dtype)
variables.global_variables_initializer().run()
self.assertEqual(1.0, self.evaluate(cast))
@test_util.run_deprecated_v1
def testGradients(self):
t = [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
for src_t in t:
for dst_t in t:
with self.cached_session():
x = constant_op.constant(1.0, src_t)
z = array_ops.identity(x)
y = math_ops.cast(z, dst_t)
err = gradient_checker.compute_gradient_error(x, [], y, [])
self.assertLess(err, 1e-3)
class SparseTensorCastTest(test.TestCase):
@test_util.run_deprecated_v1
def testCast(self):
indices = constant_op.constant([[0], [1], [2]], dtypes.int64)
values = constant_op.constant(np.array([1, 2, 3], np.int64))
shape = constant_op.constant([3], dtypes.int64)
st = sparse_tensor.SparseTensor(indices, values, shape)
st_cast = math_ops.cast(st, dtypes.float32)
with self.cached_session():
self.assertAllEqual(st_cast.indices.eval(), [[0], [1], [2]])
self.assertAllEqual(st_cast.values.eval(),
np.array([1, 2, 3], np.float32))
self.assertAllEqual(st_cast.dense_shape.eval(), [3])
class SaturateCastTest(test.TestCase):
def testSaturate(self):
in_types = dtypes.float32,
out_types = dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.float32
with self.cached_session() as sess:
for in_type in in_types:
for out_type in out_types:
lo, hi = in_type.min, in_type.max
x = constant_op.constant(
[lo, lo + 1, lo // 2, hi // 2, hi - 1, hi], dtype=in_type)
y = math_ops.saturate_cast(x, dtype=out_type)
self.assertEqual(y.dtype, out_type)
x, y = self.evaluate([x, y])
correct = np.maximum(out_type.min, np.minimum(out_type.max, x))
self.assertAllEqual(correct, y)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/cast_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tensor_array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _make_converter(tf_dtype):
def _converter(x):
if tf_dtype == dtypes.string:
# In Python3, np.str is unicode, while we always want bytes
return np.asarray(x).astype("|S")
x = np.asarray(x).astype(tf_dtype.as_numpy_dtype)
if tf_dtype.is_complex:
# Add a non-zero imaginary component to x.
x -= 1j * x
return x
return _converter
def _make_ta(size, name, dtype=dtypes.float32, infer_shape=False):
return tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name=name, size=size, infer_shape=infer_shape)
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class TensorArrayTest(test.TestCase):
@classmethod
def setUpClass(cls):
super(TensorArrayTest, cls).setUpClass()
cls._workers, _ = test.create_local_cluster(num_workers=3, num_ps=0)
@classmethod
def tearDownClass(cls):
super(TensorArrayTest, cls).tearDownClass()
session_lib.Session.reset(cls._workers[0].target)
@test_util.run_in_graph_and_eager_modes
def testTensorArrayWriteRead(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
def _testTensorArrayWritePack(self, tf_dtype):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.stack()
c0 = self.evaluate(c0)
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0)
def _testTensorArrayWritePackMaybeLegacy(self):
self._testTensorArrayWritePack(dtypes.float32)
self._testTensorArrayWritePack(dtypes.float64)
self._testTensorArrayWritePack(dtypes.int32)
self._testTensorArrayWritePack(dtypes.int64)
self._testTensorArrayWritePack(dtypes.complex64)
self._testTensorArrayWritePack(dtypes.complex128)
self._testTensorArrayWritePack(dtypes.string)
def testTensorArrayWritePack(self):
self._testTensorArrayWritePackMaybeLegacy()
def testEmptyTensorArrayPack(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
empty_element = np.zeros((0, 1), dtype=np.float32)
w0 = ta.write(0, empty_element)
w1 = w0.write(1, empty_element)
w2 = w1.write(2, empty_element)
c0 = w2.stack()
c0 = self.evaluate(c0)
self.assertAllEqual([3, 0, 1], c0.shape)
def _testTensorArrayWriteConcat(self, tf_dtype):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.concat()
c0 = self.evaluate(c0)
self.assertAllEqual(
convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0], [6.0, 7.0],
[106.0, 107.0], [8.0, 9.0]]), c0)
@test_util.deprecated_graph_mode_only
def testTensorArrayWriteConcat(self):
self._testTensorArrayWriteConcat(dtypes.float32)
self._testTensorArrayWriteConcat(dtypes.float64)
self._testTensorArrayWriteConcat(dtypes.int32)
self._testTensorArrayWriteConcat(dtypes.int64)
self._testTensorArrayWriteConcat(dtypes.complex64)
self._testTensorArrayWriteConcat(dtypes.complex128)
self._testTensorArrayWriteConcat(dtypes.string)
def _testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
element_shape=tensor_shape.TensorShape([1, 2]))
self.assertAllEqual([[0.0, 0.0]], self.evaluate(ta.read(0)))
self.assertAllEqual([[[0.0, 0.0]], [[4.0, 5.0]], [[0.0, 0.0]]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).stack()))
self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).concat()))
@test_util.run_v1_only("b/122324791")
def testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
self._testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros()
def _testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros(self):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3)
self.assertAllEqual(
[[0.0, 0.0]], self.evaluate(ta.write(1, [[4.0, 5.0]]).read(0)))
self.assertAllEqual([[[0.0, 0.0]], [[4.0, 5.0]], [[0.0, 0.0]]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).stack()))
self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).concat()))
@test_util.run_v1_only("b/122324791")
def testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros(self):
self._testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros()
@test_util.run_v1_only("Uses placeholders")
def testSkipEagerTensorArrayReadUninitializedInferShapeFillsZeros(self):
with self.cached_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3)
val = array_ops.placeholder(dtypes.float32)
self.assertAllEqual(
[[0.0, 0.0]], sess.run(ta.write(1, val).read(0), {val: [[4.0, 5.0]]}))
def _testTensorArrayUnpackRead(self, tf_dtype):
with self.cached_session(use_gpu=True):
convert = _make_converter(tf_dtype)
ta = _make_ta(3, "foo", dtype=tf_dtype)
# Unpack a vector into scalars
w0 = ta.unstack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
# Unpack a matrix into vectors
w1 = ta.unstack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([1.0, 1.1]), d0)
self.assertAllEqual(convert([2.0, 2.1]), d1)
self.assertAllEqual(convert([3.0, 3.1]), d2)
# Try unpacking an empty matrix, which should not cause an error.
w2 = ta.unstack(convert([[], [], []]))
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def _testTensorArrayUnpackReadMaybeLegacy(self):
self._testTensorArrayUnpackRead(dtypes.float32)
self._testTensorArrayUnpackRead(dtypes.float64)
self._testTensorArrayUnpackRead(dtypes.int32)
self._testTensorArrayUnpackRead(dtypes.int64)
self._testTensorArrayUnpackRead(dtypes.complex64)
self._testTensorArrayUnpackRead(dtypes.complex128)
self._testTensorArrayUnpackRead(dtypes.string)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy()
def _testTensorArraySplitRead(self, tf_dtype):
with self.cached_session(use_gpu=True):
convert = _make_converter(tf_dtype)
# Split an empty vector
ta = _make_ta(3, "foo", dtype=tf_dtype)
lengths = constant_op.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
# Split a vector
lengths = constant_op.constant([2, 0, 1])
w0 = ta.split(convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([1.0, 2.0]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([3.0]), d2)
# Split a matrix
lengths = constant_op.constant([2, 0, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([[1.0, 101.0], [2.0, 201.0]]), d0)
self.assertAllEqual(convert([]).reshape(0, 2), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
@test_util.deprecated_graph_mode_only
def testTensorArraySplitRead(self):
self._testTensorArraySplitRead(dtypes.float32)
self._testTensorArraySplitRead(dtypes.float64)
self._testTensorArraySplitRead(dtypes.int32)
self._testTensorArraySplitRead(dtypes.int64)
self._testTensorArraySplitRead(dtypes.complex64)
self._testTensorArraySplitRead(dtypes.complex128)
self._testTensorArraySplitRead(dtypes.string)
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradArrayWriteRead(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
g_ta = ta.grad("grad")
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2 = session.run([r0, r1, r2, g_r0, g_r1, g_r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradGrad(self):
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.skipTest("Legacy TensorArray does not support double derivatives.")
with self.test_session(use_gpu=True) as session:
x = constant_op.constant(4.0)
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=1,
infer_shape=False)
w0 = ta.write(0, x)
r0 = w0.read(0)
y = r0 * r0
g1 = gradients_impl.gradients(ys=[y], xs=[x])
g2 = gradients_impl.gradients(ys=[g1], xs=[x])
self.assertAllEqual([2.0], session.run(g2))
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradArrayDynamicWriteRead(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run(
[r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradAccessTwiceReceiveSameObject(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
with ops.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
t_g_ta_0, t_g_ta_1, d_r1_0 = session.run(
[g_ta_0.handle.op, g_ta_1.handle.op, r1_0])
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32)
# TODO(b/129870929): Remove the last 2 checks (runtime checks) after
# back back from preferred_dtype= to dtype= in convert_to_tensor. Also
# restrict error check to only TypeError.
error_msg_regex = (
"("
"Expected float32, got 'wrong_type_scalar' of type 'str' instead."
"|"
"Cannot convert provided value to EagerTensor. Provided value: "
"wrong_type_scalar Requested dtype: float"
"|"
"TensorArray dtype is float.* but Op is trying to write dtype string"
"|"
"Invalid data types; op elements string but list elements float"
")")
with self.assertRaisesRegexp(
(TypeError, errors.InvalidArgumentError), error_msg_regex):
self.evaluate(ta.write(0, "wrong_type_scalar").flow)
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to modify element -1 in a list with 3 elements."
else:
error_msg = "index -1"
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.write(-1, 3.0).flow)
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to modify element 3 in a list with 3 elements"
else:
error_msg = ("Tried to write to index 3 but array is not "
"resizeable and size is: 3")
# Test reading from too large an index
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.write(3, 3.0).flow)
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32)
w0 = ta.write(0, [[4.0, 5.0]])
# Test reading wrong datatype (only possible when constructing graphs).
if (not context.executing_eagerly() and
not control_flow_util.ENABLE_CONTROL_FLOW_V2):
r0_bad = gen_data_flow_ops.tensor_array_read_v3(
handle=w0.handle, index=0, dtype=dtypes.float64, flow_in=w0.flow)
with self.assertRaisesOpError(
"TensorArray dtype is float but Op requested dtype double."):
self.evaluate(r0_bad)
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to access element -1 in a list with 3 elements."
else:
error_msg = "index -1"
# Test reading from a negative index, which is not allowed
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.read(-1))
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to access element 3 in a list with 3 elements."
else:
error_msg = "Tried to read from index 3 but array size is: 3"
# Test reading from too large an index
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.read(3))
@test_util.disable_control_flow_v2("v2 allows multiple writes.")
@test_util.run_v1_only("v2 allows multiple writes.")
def testSkipEagerTensorArrayWriteMultipleFails(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not write to TensorArray index 2 because "
"it has already been written to."):
self.evaluate(ta.write(2, 3.0).write(2, 3.0).flow)
def testTensorArrayConcatIncompatibleShapesFails(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w1 = ta.write(0, 3.0)
w2 = w1.write(1, 4.0)
w3 = w2.write(2, [3.0])
with self.assertRaisesOpError(
"Concat saw a scalar shape at index 0 but requires at least vectors"):
self.evaluate(w3.concat())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w1 = ta.write(0, [3.0])
w2 = w1.write(1, [4.0])
w3 = w2.write(2, [[3.0]])
# The exact error messages differ between eager execution and graph
# construction as the former bubbles up the error from array_op.concat.
error_msg = ("Incompatible ranks"
if control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly() else "shape")
with self.assertRaisesRegexp(errors.InvalidArgumentError, error_msg):
self.evaluate(w3.concat())
def testTensorArraySplitIncompatibleShapesFails(self):
with self.session(use_gpu=True):
in_eager_mode = context.executing_eagerly()
ta = _make_ta(3, "foo")
with self.assertRaisesOpError(
r"Expected lengths to be a vector, received shape: \[\]"):
if in_eager_mode:
self.evaluate(ta.split([1.0, 2.0, 3.0], 1))
else:
lengths = array_ops.placeholder(dtypes.int64)
ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})
error_msg = ("Unused values in tensor. Length of tensor: 3 Values used: 1"
if control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not in_eager_mode else
r"Expected sum of lengths to be equal to values.shape\[0\], "
r"but sum of lengths is 1 and value's shape is: \[3\]")
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.split([1.0, 2.0, 3.0], [1]).flow)
ta = _make_ta(1, "baz")
if control_flow_util.ENABLE_CONTROL_FLOW_V2 and not in_eager_mode:
with self.assertRaisesRegexp(
ValueError, "Shape must be at least rank 1 but is rank 0"):
self.evaluate(ta.split(1.0, [1]).flow)
else:
with self.assertRaisesOpError(
r"Expected value to be at least a vector, but received shape: \[\]"
):
self.evaluate(ta.split(1.0, [1]).flow)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2 or in_eager_mode:
ta = _make_ta(2, "buz")
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(2 vs. 1\), and the TensorArray is not marked as "
r"dynamically resizeable"):
self.evaluate(ta.split([1.0], [1]).flow)
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
ta_grad = ta.grad("grad")
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), w2_grad.read(2).eval())
# Assert that if multiple_writes_aggregate is not enabled,
# multiple writes raise an exception.
with self.assertRaisesOpError(
r"TensorArray foo_.*: Could not write to TensorArray index 2 because "
r"it has already been written to."):
w1.flow.eval()
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
with self.assertRaisesOpError(
r"Could not aggregate to TensorArray index 1 because the "
r"existing shape is \[\] but the new input shape is \[1\]"):
wb1_grad.flow.eval()
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128):
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
@test_util.run_v1_only("Low level legacy TA op test.")
def testSkipEagerTensorArrayGradWithShapeKnownElementShape(self):
with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
size=3,
dtype=dtypes.float32,
element_shape=tensor_shape.TensorShape([2, 3]))
handle, flow = data_flow_ops.tensor_array_grad_with_shape(
handle=ta.handle,
flow_in=ta.flow,
shape_to_prepend=tensor_shape.TensorShape([4, 5]),
source="source")
ta_grad = tensor_array_ops.TensorArray(
dtypes.float32, handle=handle, flow=flow)
value = array_ops.placeholder(dtypes.float32)
ta_grad = ta_grad.write(0, value)
read_value = ta_grad.read(0)
# Make sure shape inference worked.
self.assertAllEqual([None, None, 2, 3], read_value.shape.as_list())
# Writing with wrong shape should not work.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Could not write to TensorArray"):
fed_value = np.random.random([2, 3])
sess.run(read_value, feed_dict={value: fed_value})
# Writing with correct shape should work.
fed_value = np.random.random([4, 5, 2, 3])
self.assertAllClose(fed_value,
sess.run(read_value, feed_dict={value: fed_value}))
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
@test_util.run_v1_only("Low level legacy TA op test.")
def testSkipEagerTensorArrayGradWithShapeUnknownElementShape(self):
with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
size=3, dtype=dtypes.float32,
element_shape=None) # Note that element_shape is unknown
handle, flow = data_flow_ops.tensor_array_grad_with_shape(
handle=ta.handle,
flow_in=ta.flow,
shape_to_prepend=tensor_shape.TensorShape([4, 5]),
source="source")
ta_grad = tensor_array_ops.TensorArray(
dtypes.float32, handle=handle, flow=flow)
value = array_ops.placeholder(dtypes.float32)
ta_grad = ta_grad.write(0, value)
read_value = ta_grad.read(0)
# Make sure shape inference worked.
self.assertIsNone(read_value.shape.ndims)
# Write with some shape and check read value.
fed_value = np.random.random([4, 5, 7])
self.assertAllClose(fed_value,
sess.run(read_value, feed_dict={value: fed_value}))
def testMultiTensorArray(self):
with self.session(use_gpu=True):
h1 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
r = r1 + r2
val = self.evaluate(r)
self.assertAllClose(9.0, val)
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.as_dtype(dtype),
tensor_array_name="foo",
size=3,
infer_shape=False)
c = lambda x: np.array(x, dtype=dtype)
value_0 = constant_op.constant(c([[4.0, 5.0]]))
value_1 = constant_op.constant(c(3.0))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = gradients_impl.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_just_r0_vals = session.run(grad_just_r0)
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
grad_r0_r0_2 = gradients_impl.gradients(
ys=[r0, r0_2],
xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_r0_r0_2_vals = session.run(grad_r0_r0_2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
grad_just_r1 = gradients_impl.gradients(
ys=[r1], xs=[value_1], grad_ys=[c(-2.0)])
grad_just_r1_vals = session.run(grad_just_r1)
self.assertAllEqual(c(-2.0), grad_just_r1_vals[0])
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r0, r0_2, r1],
xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]]), c(-2.0)])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c(-2.0), grad_vals[1])
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradientWriteRead(self):
for dtype in (np.float32, np.float64, np.complex64, np.complex128):
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self):
with self.cached_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value_0 = constant_op.constant([-1.0, 1.0])
value_1 = constant_op.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.stack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat()
with ops.control_dependencies([p0, r0, s0]):
grad_r = gradients_impl.gradients(
ys=[p0, r0, s0],
xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # pack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0]
]) # concat gradient
grad_vals = self.evaluate(grad_r) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradientWritePackConcatAndRead(self):
self._testTensorArrayGradientWritePackConcatAndRead()
@test_util.disable_control_flow_v2("v2 does not support clear_after_read.")
@test_util.run_v1_only("v2 does not support clear_after_read.")
def testTensorArrayReadTwice(self):
with self.session(use_gpu=True):
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readonce = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2)
w_readonce = ta_readonce.unstack(value)
r0_readonce = w_readonce.read(0)
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"):
with ops.control_dependencies([r0_readonce]):
self.evaluate(w_readonce.read(0))
ta_readtwice = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unstack(value)
r0_readtwice = w_readtwice.read(0)
with ops.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], self.evaluate(r1_readtwice))
def _testTensorArrayGradientUnpackRead(self):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r0_1, r1],
xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradientUnpackRead(self):
self._testTensorArrayGradientUnpackRead()
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradientSplitConcat(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2,
infer_shape=False)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w = ta.split(value, [2, 1])
r = w.concat()
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r],
xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]],
grad_vals[0])
def _testTensorArrayGradientDynamicUnpackRead(self):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradientDynamicUnpackRead(self):
self._testTensorArrayGradientDynamicUnpackRead()
def testCloseTensorArray(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
self.evaluate(ta.close())
def testSizeTensorArray(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, self.evaluate(s))
def testWriteCloseTensorArray(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [3.0])
self.evaluate(w1.close()) # Expected to run without problems
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
np_dtype = dtype.as_numpy_dtype
with self.cached_session(use_gpu=True):
def func(v0, state0, var):
ta = tensor_array_ops.TensorArray(
dtype=dtype,
tensor_array_name="foo",
size=0 if dynamic_size else 3,
dynamic_size=dynamic_size)
time_0 = array_ops.identity(0)
def body(time, ta_t, state):
sliced = array_ops.slice(
v0, begin=array_ops.stack([time, 0]), size=[1, -1])
sliced = array_ops.squeeze(sliced)
out = sliced + var + state
state += sliced
ta_t = ta_t.write(time, out)
return (time + 1, ta_t, state)
(unused_0, h_final, unused_2) = control_flow_ops.while_loop(
cond=lambda time, unused_1, unused_2: time < 3,
body=body,
loop_vars=(time_0, ta, state0),
shape_invariants=(time_0.get_shape(), tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()),
parallel_iterations=3)
vout = h_final.stack()
return vout
v0 = array_ops.identity(np.arange(3 * 5, dtype=np_dtype).reshape(3, 5))
state0 = array_ops.identity(np.array([1] * 5, dtype=np_dtype))
init_val = np.arange(100, 105, dtype=np_dtype)
var = variable_scope.get_variable(
"var",
shape=init_val.shape,
dtype=np_dtype,
initializer=init_ops.constant_initializer(init_val))
vout = func(v0, state0, var)
grad_val = -np.arange(3 * 5, dtype=np_dtype).reshape(3, 5)
if context.executing_eagerly():
grad_fn = backprop.gradients_function(func)
v0_grad, state0_grad, var_grad = grad_fn(v0, state0, var, dy=grad_val)
else:
v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]
state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
self.evaluate(variables.global_variables_initializer())
state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
self.evaluate(
([state0, var, v0, vout, v0_grad, var_grad, state0_grad])))
just_v0_grad_t = self.evaluate(v0_grad)
# state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# vout = [ v0[0] + var + state[0] |
# v0[1] + var + state[1] |
# v0[2] + var + state[2] ]
# = [ v0[0] + var + state0 |
# v0[1] + var + state0 + v0[0] |
# v0[2] + var + state0 + v0[0] + v0[1] ]
#
# d(vout[0])/d(v0) = [1 | 0 | 0 ]
# d(vout[1])/d(v0) = [1 | 1 | 0 ]
# d(vout[2])/d(v0) = [1 | 1 | 1 ]
# d(vout)/d(var) = [1 | 1 | 1]
# d(vout)/d(state0) = [ 1 | 1 | 1 ]
state_per_time = np.array(
[state0_t, state0_t + v0_t[0, :], state0_t + v0_t[0, :] + v0_t[1, :]])
# Compare forward prop
self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# Compare backward prop
expected_v0_grad_t = np.array([
grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
grad_val[1, :] + grad_val[2, :], grad_val[2, :]
])
self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
def testWhileLoopWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=False, dtype=dtypes.float32)
# TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=tf.int64)
@test_util.run_v1_only("b/117943489")
def testSkipEagerWhileLoopDynamicWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=True, dtype=dtypes.float32)
def testGradSerialTwoLoops(self):
with self.session(use_gpu=True):
def loop(x):
num_steps = 100
acc = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
size=num_steps,
clear_after_read=False,
element_shape=tensor_shape.scalar())
i = constant_op.constant(0, name="i")
c = lambda i, acc: i < 5
def b(i, acc):
x1 = control_flow_ops.cond(
math_ops.equal(i, 0), lambda: x,
lambda: math_ops.multiply(acc.read(i - 1), 2.0))
return i + 1, acc.write(i, x1)
i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])
z = constant_op.constant(0.0)
def fn(i, acc):
return i + 1, acc.write(i, z)
_, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,
[i1, acc1])
r = acc2.stack()
return r
x = constant_op.constant(2.0, name="x")
if context.executing_eagerly():
grad = backprop.gradients_function(loop)(x)[0]
else:
grad = gradients_impl.gradients(loop(x), [x])[0]
self.assertAllClose(31.0, self.evaluate(grad))
@test_util.deprecated_graph_mode_only
def testSkipEagerSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.session(use_gpu=True) as session:
a = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1)
b = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (
ta.read(
0, name="read_a_0") + # a + b
ta.read(
1, name="read_b_0"))
g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = session.run([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly
joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def _grad_source_for_name(self, name):
return tensor_array_grad._GetGradSource(constant_op.constant(0, name=name))
@test_util.deprecated_graph_mode_only
def testSkipEagerGetGradSource_Invalid(self):
with self.assertRaises(ValueError):
self._grad_source_for_name("")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo/bar")
@test_util.deprecated_graph_mode_only
def testSkipEagerGetGradSource_NoEnclosingScope(self):
self.assertEqual("gradients:0", self._grad_source_for_name("gradients"))
self.assertEqual("gradients_0:0", self._grad_source_for_name("gradients_0"))
self.assertEqual("gradients", self._grad_source_for_name("gradients/foo"))
self.assertEqual("gradients_0",
self._grad_source_for_name("gradients_0/foo"))
self.assertEqual("gradients",
self._grad_source_for_name("gradients/foo/bar"))
self.assertEqual("gradients_0",
self._grad_source_for_name("gradients_0/foo/bar"))
@test_util.deprecated_graph_mode_only
def testSkipEagerGetGradSource_EnclosingScope(self):
self.assertEqual("foo/gradients:0",
self._grad_source_for_name("foo/gradients"))
self.assertEqual("foo/gradients_0:0",
self._grad_source_for_name("foo/gradients_0"))
self.assertEqual("foo/gradients",
self._grad_source_for_name("foo/gradients/bar"))
self.assertEqual("foo/gradients_0",
self._grad_source_for_name("foo/gradients_0/bar"))
self.assertEqual("foo/bar/gradients",
self._grad_source_for_name("foo/bar/gradients/baz"))
self.assertEqual("foo/bar/gradients_0",
self._grad_source_for_name("foo/bar/gradients_0/baz"))
@test_util.deprecated_graph_mode_only
def testSkipEagerGetGradSource_NestedUsesInnermost(self):
self.assertEqual(
"foo/gradients/bar/gradients_0",
self._grad_source_for_name("foo/gradients/bar/gradients_0/baz"))
@test_util.deprecated_graph_mode_only
def testSkipEagerWriteShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c0 = constant_op.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = constant_op.constant([6.0, 7.0])
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w0.write(0, c2)
@test_util.deprecated_graph_mode_only
def testSkipEagerPartlyUnknownShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=6)
c0 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual([None, None, None, 3], r0.get_shape().as_list())
c1 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w1 = w0.write(1, c1)
r1 = w1.read(0)
self.assertAllEqual([None, None, None, 3], r1.get_shape().as_list())
# Writing less specific shape (doesn't change type.)
c2 = array_ops.placeholder(dtypes.float32, [None, None, None, None])
w2 = w1.write(2, c2)
r2 = w2.read(0)
self.assertAllEqual([None, None, None, 3], r2.get_shape().as_list())
# Writing more specific shape in one dimension and less specific in
# another.
c3 = array_ops.placeholder(dtypes.float32, [None, None, 2, None])
w3 = w2.write(3, c3)
r3 = w3.read(0)
self.assertAllEqual([None, None, 2, 3], r3.get_shape().as_list())
# Writing partly defined shape using TensorArray.scatter.
c4 = array_ops.placeholder(dtypes.float32, [2, None, 4, 2, 3])
w4 = w3.scatter([4, 5], c4)
r4 = w4.read(0)
self.assertAllEqual([None, 4, 2, 3], r4.get_shape().as_list())
# Writing fully defined shape using TensorArray.split.
c5 = array_ops.placeholder(dtypes.float32, [10, 4, 2, 3])
w5 = w4.split(c5, constant_op.constant([5, 5]))
r5 = w5.read(0)
self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list())
def _testUnpackShape(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=True)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w0 = ta.unstack(value)
r0 = w0.read(0)
self.assertAllEqual((2,), r0.get_shape())
c1 = constant_op.constant([4.0, 5.0])
w1 = w0.write(3, c1)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
# TensorArray v2 does not support clear_after_read.
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"
):
with ops.control_dependencies([r0]):
self.evaluate(w1.read(0))
r1 = w1.read(1)
self.assertAllEqual(c1.get_shape(), r1.shape)
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w1.write(4, c2)
@test_util.run_v1_only("b/117943489")
def testUnpackShape(self):
self._testUnpackShape()
@test_util.deprecated_graph_mode_only
def testSplitShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=True)
value = constant_op.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
w0 = ta.split(value, [1, 1, 1])
r0 = w0.read(0)
self.assertAllEqual((1, 2), r0.get_shape())
ta1 = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo1",
size=0,
dynamic_size=True,
infer_shape=True)
w0 = ta1.split(value, [1, 2])
r0 = w0.read(0)
if context.executing_eagerly():
self.assertEqual((1, 2), r0.get_shape())
self.assertEqual((2, 2), w0.read(1).get_shape())
else:
self.assertEqual(r0.get_shape().ndims, None)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(
tensor_shape.TensorShape(
ta1.handle.op.get_attr("element_shape")).ndims, None)
@test_util.deprecated_graph_mode_only
def testSkipEagerWriteUnknownShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=True)
c0 = array_ops.placeholder(dtypes.float32)
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def _testGradientWhenNotAllComponentsRead(self):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
x = constant_op.constant([2.0, 3.0])
w = ta.unstack(x)
r0 = w.read(0)
# calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0 = gradients_impl.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = session.run(grad_r0)[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
@test_util.deprecated_graph_mode_only
def testSkipEagerGradientWhenNotAllComponentsRead(self):
self._testGradientWhenNotAllComponentsRead()
@test_util.deprecated_graph_mode_only
def testSkipEagerWriteButNotAllComponentsReadGrad(self):
with self.cached_session(use_gpu=True) as session:
x0 = constant_op.constant(5.0)
x1 = constant_op.constant(10.0)
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2).write(0, x0).write(1, x1)
r0 = ta.read(0)
# calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0_x1 = gradients_impl.gradients(ys=[r0], xs=[x0, x1], grad_ys=[1.0])
grad_r0_x1_vals = session.run(grad_r0_x1)
self.assertAllEqual(grad_r0_x1_vals, [1.0, 0.0])
def _testTensorArrayUnpackDynamic(self):
with self.cached_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.unstack(x)
w1 = w0.write(3, 4.0)
r = w1.stack()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), self.evaluate(r))
grad = gradients_impl.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]), self.evaluate(grad)[0])
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayUnpackDynamic(self):
self._testTensorArrayUnpackDynamic()
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArraySplitDynamic(self):
with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.split(x, [1, 1, 1])
w1 = w0.write(3, [4.0])
r = w1.concat()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), self.evaluate(r))
grad = gradients_impl.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]), self.evaluate(grad)[0])
def testStackShape(self):
@def_function.function
def ta_stack():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=3)
x = constant_op.constant([1.0, 2.0, 3.0])
ta = ta.write(0, x)
t = ta.stack()
self.assertEqual(t.shape.as_list(), [None, 3])
return t
ta_stack()
def testReadShape(self):
@def_function.function
def ta_read():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=3)
x = constant_op.constant([1.0, 2.0, 3.0])
ta = ta.write(0, x)
t = ta.read(0)
self.assertEqual(t.shape.as_list(), [3])
return t
ta_read()
def testGatherShape(self):
def ta_gather(indices):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=3)
x = constant_op.constant([1.0, 2.0, 3.0])
ta = ta.write(0, x)
t = ta.gather(indices)
self.assertEqual(t.shape.as_list(), [first_dim, 3])
return t
# This propagates shape of `indices` when compiling ta_gather.
ta_gather_with_known_indices_shape = def_function.function(ta_gather)
first_dim = 1
ta_gather_with_known_indices_shape([0])
# Here were force the shape of `indices` to be [None] during ta_gather's
# compilation.
ta_gather_with_unknown_indices_shape = def_function.function(
ta_gather,
input_signature=[
tensor_spec.TensorSpec(dtype=dtypes.int32, shape=[None])
])
first_dim = None
ta_gather_with_unknown_indices_shape([0])
def _testTensorArrayEvalEmpty(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=False)
v2_msg = ("Tried to stack elements of an empty list with "
"non-fully-defined element_shape")
v1_msg = (
"TensorArray has size zero, but element shape <unknown> is not "
"fully defined. Currently only static shapes are supported when "
"packing zero-size TensorArrays.")
with self.assertRaisesOpError(
v2_msg if control_flow_util.ENABLE_CONTROL_FLOW_V2 else v1_msg):
ta.stack().eval()
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayEvalEmpty(self):
self._testTensorArrayEvalEmpty()
# this test is ill-defined for Eager mode --- unpacking an empty tensor
# gives an empty list / there is not equivalent of "mark_used" in Eager
def _testTensorArrayEvalEmptyWithDefault(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=True)
self.assertEqual(0, ta.size().eval())
# Don't actually perform the pack. This stores the static shape.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
ta = ta.unstack(array_ops.zeros([0, 3, 5]))
else:
ta.unstack(array_ops.zeros([0, 3, 5])).mark_used()
packed = ta.stack()
concatenated = ta.concat()
self.assertAllEqual([0, 3, 5], self.evaluate(packed).shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
self.assertAllEqual([0, 5], self.evaluate(concatenated).shape)
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayEvalEmptyWithDefault(self):
self._testTensorArrayEvalEmptyWithDefault()
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayScatterReadAndGradients(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(1)
r1 = w.read(8)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
read_vals, grad_vals = session.run([[r0, r1], grad])
self.assertEqual(len(read_vals), 2)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([1.0, -1.0], read_vals[0])
self.assertAllEqual([10.0, -10.0], read_vals[1])
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayScatterPartialReadAndGradients(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0], xs=[value], grad_ys=[[2.0, 3.0]])[0]
read_val, grad_val = session.run([r0, grad])
self.assertAllEqual([1.0, -1.0], read_val)
self.assertAllEqual([[2.0, 3.0], [0.0, 0.0]], grad_val)
def testScatterIntoExistingList(self):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=5)
ta = ta.scatter(indices=[3, 4], value=array_ops.ones([2]))
self.assertAllEqual(ta.stack(), [0., 0., 0., 1., 1.])
ta = ta.scatter(indices=[1], value=array_ops.ones([1]))
self.assertAllEqual(ta.stack(), [0., 1., 0., 1., 1.])
ta = ta.scatter(indices=[0, 2], value=[5., 6.])
self.assertAllEqual(ta.stack(), [5., 1., 6., 1., 1.])
@test_util.run_v1_only("b/118890905")
def testTensorArrayWriteGatherAndGradients(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
def func(values):
indices = constant_op.constant([1, 8])
w = ta.unstack(values)
g = w.gather(indices)
return g
values = constant_op.constant([[1.0 * x, -1.0 * x] for x in range(10)])
g = func(values)
grad_ys = [[[2.0, 3.0], [4.0, 5.0]]]
# Test combined gradients + aggregation of read(0)
if context.executing_eagerly():
g_vals = [g]
grad_vals = backprop.gradients_function(func)(
values, dy=constant_op.constant(grad_ys[0], dtype=dtypes.float32))
else:
grad = gradients_impl.gradients(ys=[g], xs=[values], grad_ys=grad_ys)
g_vals, grad_vals = session.run([[g], grad])
# Gradients for 8 of the 10 unread components are zero.
expected_grad = np.zeros((10, 2))
expected_grad[1] = [2.0, 3.0]
expected_grad[8] = [4.0, 5.0]
self.assertEqual(len(g_vals), 1)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[1.0, -1.0], [8.0, -8.0]], g_vals[0])
self.assertAllEqual(expected_grad, grad_vals[0])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayGetsDeviceFromFirstWrite(self):
with ops.device("/job:worker/task:0/cpu:0"):
# this initial device will be ignored.
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
with ops.device("/job:worker/task:1/cpu:0"):
# the first write sets the op's device.
ta = ta.write(0, 1.0)
with ops.device("/job:worker/task:2/cpu:0"):
# subsequent writes do not modify the op's device.
ta = ta.write(1, 1.0)
# The gradient TA will sit on the same device as the forward TA.
ta_grad = ta.grad("grad")
flows = [ta.flow, ta_grad.flow]
# Similar tests for unpack and split
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=3)
with ops.device("/job:worker/task:1/cpu:0"):
ta = ta.unstack([1.0, 2.0])
with ops.device("/job:worker/task:2/cpu:0"):
ta = ta.write(2, 3.0)
flows.append(ta.flow)
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
with ops.device("/job:worker/task:1/cpu:0"):
ta = ta.split([1.0, 2.0], [1, 1])
flows.append(ta.flow)
session = session_lib.Session(self._workers[0].target)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
session.run(flows, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField("step_stats"))
dev_stats = {d.device: d.node_stats
for d in run_metadata.step_stats.dev_stats}
for d in dev_stats:
if "/task:1/" in d:
self.assertTrue(
[s for s in dev_stats[d] if "/TensorArray" in s.node_name])
elif "/host:CPU" not in d:
self.assertFalse(
[s for s in dev_stats[d] if "/TensorArray" in s.node_name])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayGetsDeviceFromFirstWriteInWhileLoop(self):
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
def _body(i, ta_i):
with ops.device("/job:worker/task:1/cpu:0"):
return i + 1, ta_i.write(i, constant_op.constant(0.0))
_, ta_out = control_flow_ops.while_loop(
lambda i, ta: i < 2, _body, loop_vars=[0, ta])
session = session_lib.Session(self._workers[0].target)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
session.run(ta_out.flow, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField("step_stats"))
dev_stats = {d.device: d.node_stats
for d in run_metadata.step_stats.dev_stats}
for d in dev_stats:
if "/task:1/" in d:
self.assertTrue(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
else:
self.assertFalse(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayDisabledColocateWithFirstWriteCall(self):
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, colocate_with_first_write_call=False)
def _body(i, ta_i):
with ops.device("/job:worker/task:1/cpu:0"):
return i + 1, ta_i.write(i, constant_op.constant(0.0))
_, ta_out = control_flow_ops.while_loop(
lambda i, ta: i < 2, _body, loop_vars=[0, ta])
session = session_lib.Session(self._workers[0].target)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
session.run(ta_out.flow, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField("step_stats"))
dev_stats = {d.device: list(d.node_stats)
for d in run_metadata.step_stats.dev_stats}
for d in dev_stats:
if "/task:0/" in d and "CPU" in d: # Skip any GPU node stats
self.assertTrue(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
else:
self.assertFalse(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
def testTensorArrayIdentity(self):
with self.session(use_gpu=True):
ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2,
infer_shape=False)
ta1 = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=4,
infer_shape=True)
ta0 = ta0.write(0, 0.)
ta1 = ta1.write(0, 1)
v0 = variable_scope.get_variable(
"v0", shape=(), initializer=init_ops.zeros_initializer())
v1 = variable_scope.get_variable(
"v1", shape=(), initializer=init_ops.zeros_initializer())
with ops.control_dependencies([v0.assign_add(1)]):
ta0 = ta0.identity()
with ops.control_dependencies([v1.assign_add(1)]):
ta1 = ta1.identity()
read0 = ta0.read(0)
read1 = ta1.read(0)
size0 = ta0.size()
size1 = ta1.size()
# Tests correct properties on new TensorArrays.
self.assertEqual(dtypes.float32, ta0.dtype)
self.assertEqual(dtypes.int32, ta1.dtype)
if context.executing_eagerly():
self.assertEqual(tensor_shape.scalar(), read0.get_shape())
else:
self.assertEqual(tensor_shape.unknown_shape(), read0.get_shape())
self.assertEqual(tensor_shape.scalar(), read1.get_shape())
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
read0_v, read1_v, size0_v, size1_v = self.evaluate((read0, read1, size0,
size1))
# Tests that the control dependencies was added and executed.
self.assertEqual(1, self.evaluate(v0))
self.assertEqual(1, self.evaluate(v1))
# Tests correct TensorArray.
self.assertEqual(read0_v, 0)
self.assertEqual(read1_v, 1)
self.assertEqual(size0_v, 2)
self.assertEqual(size1_v, 4)
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradYsInCorrectScope(self):
n_time = 1
n_dim = 1
x = constant_op.constant([[1.42]])
dy = constant_op.constant([[2.42]])
ta = tensor_array_ops.TensorArray(
dtypes.float32, size=n_time, element_shape=[n_dim])
for t in range(n_time):
ta = ta.write(index=t, value=x[t])
y = ta.stack()
# dy is outside of the gradients name scope; tf.gradients must
# wrap it in the correct name scope.
dx, = gradients_impl.gradients(ys=[y], xs=[x], grad_ys=[dy])
with self.cached_session(use_gpu=True) as sess:
vdx, vdy = self.evaluate([dx, dy])
self.assertAllClose(vdx, vdy)
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayInt64GPU(self):
if not test.is_gpu_available():
return
with self.session(use_gpu=True, force_gpu=True) as sess:
value = array_ops.placeholder(dtypes.int64)
ta = tensor_array_ops.TensorArray(dtype=dtypes.int64, size=2)
ta = ta.scatter([0, 1], value)
r0 = ta.read(0)
r1 = ta.read(1)
v0, v1 = sess.run([r0, r1], feed_dict={value: [-3, 100]})
self.assertAllEqual(v0, -3)
self.assertAllEqual(v1, 100)
class TensorArrayBenchmark(test.Benchmark):
def _tensorArrayWriteInWhile(self):
size = 10000
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=size)
(_, ta) = control_flow_ops.while_loop(
lambda i, _: i < size,
lambda i, ta: (i + 1, ta.write(i, 0.)), [0, ta],
parallel_iterations=1)
return ta.stack()
def _benchmarkWriteInWhile(self):
ops.reset_default_graph()
op = self._tensorArrayWriteInWhile()
self.run_op_benchmark(session_lib.Session(), op)
def benchmarkWriteInWhile(self):
self._benchmarkWriteInWhile()
@test_util.enable_control_flow_v2
def benchmarkWriteInWhileWithControlFlowV2(self):
self._benchmarkWriteInWhile()
def benchmarkWriteInDatasetMapFn(self):
ds = dataset_ops.Dataset.from_tensors(array_ops.zeros([10])).repeat()
ds = ds.map(lambda _: self._tensorArrayWriteInWhile())
op = ds.make_one_shot_iterator().get_next()
self.run_op_benchmark(session_lib.Session(), op)
def benchmarkWriteInDatasetParallelMapFn(self):
ds = dataset_ops.Dataset.from_tensors(array_ops.zeros([10])).repeat()
ds = ds.map(lambda _: self._tensorArrayWriteInWhile(), num_parallel_calls=2)
op = ds.make_one_shot_iterator().get_next()
self.run_op_benchmark(session_lib.Session(), op)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/tensor_array_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.ops.losses import util
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
@test_util.run_deprecated_v1
class AbsoluteDifferenceLossTest(test.TestCase):
def setUp(self):
super(AbsoluteDifferenceLossTest, self).setUp()
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.cached_session():
with self.assertRaises(ValueError):
losses.absolute_difference(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = losses.absolute_difference(self._predictions, self._predictions)
with self.cached_session():
self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
def testNonZeroLoss(self):
loss = losses.absolute_difference(self._labels, self._predictions)
with self.cached_session():
self.assertAlmostEqual(5.5, self.evaluate(loss), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(5.5 * weights, self.evaluate(loss), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = losses.absolute_difference(self._labels, self._predictions,
constant_op.constant(weights))
with self.cached_session():
self.assertAlmostEqual(5.5 * weights, self.evaluate(loss), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant((1.2, 0.0), shape=(2, 1))
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(5.6, self.evaluate(loss), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2, 1])
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(5.6, self.evaluate(loss), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(16.6, self.evaluate(loss), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(6.0, self.evaluate(loss), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testEagerNoMemoryLeaked(self):
# This is a somewhat convoluted way of testing that nothing gets added to
# a global collection.
predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
losses.absolute_difference(labels, predictions)
class SoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
with self.cached_session():
with self.assertRaises(ValueError):
losses.softmax_cross_entropy(labels, logits, weights=None)
@test_util.run_deprecated_v1
def testAllCorrect(self):
with self.cached_session():
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
loss = losses.softmax_cross_entropy(labels, logits)
self.assertEquals('softmax_cross_entropy_loss/value', loss.op.name)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
@test_util.run_deprecated_v1
def testAllWrong(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
with self.cached_session():
loss = losses.softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
@test_util.run_deprecated_v1
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = 2.3
with self.cached_session():
loss = losses.softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(weights * 10.0, self.evaluate(loss), 3)
@test_util.run_deprecated_v1
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = 2.3
with self.cached_session():
loss = losses.softmax_cross_entropy(labels, logits,
constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, self.evaluate(loss), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = constant_op.constant((1.2, 3.4, 5.6))
with self.cached_session():
loss = losses.softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0,
self.evaluate(loss), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.cached_session():
loss = losses.softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.cached_session():
loss = losses.softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(12.0, self.evaluate(loss), 3)
def testSoftmaxWithMeasurementSpecificWeightsRaisesException(self):
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
with self.assertRaises(ValueError):
losses.softmax_cross_entropy(labels, logits, weights=weights).eval()
@test_util.run_deprecated_v1
def testSoftmaxLabelSmoothing(self):
with self.cached_session():
# Softmax Cross Entropy Loss is:
# -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100] the log partition function
# becomes \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# so our cross entropy loss is:
# -(1 - L + L/n) * 0 + 400 * L/n = 400 L/n
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 0]])
label_smoothing = 0.1
loss = losses.softmax_cross_entropy(
labels, logits, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]])
with self.cached_session():
with self.assertRaises(ValueError):
losses.sparse_softmax_cross_entropy(labels, logits, weights=None)
@test_util.run_deprecated_v1
def testAllCorrectInt32Labels(self):
with self.cached_session():
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int32)
loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testEagerNoMemoryLeaked(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int32)
losses.sparse_softmax_cross_entropy(labels, logits)
@test_util.run_deprecated_v1
def testAllCorrectInt64Labels(self):
with self.cached_session():
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
@test_util.run_deprecated_v1
def testAllCorrectNonColumnLabels(self):
with self.cached_session():
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([0, 1, 2])
loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
@test_util.run_deprecated_v1
def testAllWrongInt32Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int32)
with self.cached_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
@test_util.run_deprecated_v1
def testAllWrongInt64Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int64)
with self.cached_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
@test_util.run_deprecated_v1
def testAllWrongNonColumnLabels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
with self.cached_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
@test_util.run_deprecated_v1
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.cached_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(weights * 10.0, self.evaluate(loss), 3)
@test_util.run_deprecated_v1
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.cached_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits,
constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, self.evaluate(loss), 3)
def testNonZeroLossWith1DTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.cached_session():
loss = losses.sparse_softmax_cross_entropy(
labels, logits, constant_op.constant((weights,)))
self.assertAlmostEqual(weights * 10.0, self.evaluate(loss), 3)
@test_util.run_deprecated_v1
def testNonZeroLossWithPlaceholderForWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = array_ops.placeholder(dtypes.float32)
with self.cached_session() as sess:
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
loss_val = sess.run(loss,
feed_dict={weights: ((1.2,), (3.4,), (5.6,))})
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss_val, 3)
@test_util.run_deprecated_v1
def testUnknownShapePlaceholderForLogitsLabelsButScalarWeights(self):
logits = array_ops.placeholder(dtypes.float32)
labels = array_ops.placeholder(dtypes.int32)
weights = 1.0
with self.cached_session() as sess:
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
loss_val = sess.run(loss,
feed_dict={
logits: [[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]],
labels: [[2], [0], [1]],
})
self.assertAlmostEqual((1.0 + 1.0 + 1.0) * 10.0 / 3.0, loss_val, 3)
@test_util.run_deprecated_v1
def testNonZeroLossWithPlaceholderForLogitsLabelsAndWeights(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 3))
labels = array_ops.placeholder(dtypes.int32, shape=(None, 1))
weights = array_ops.placeholder(dtypes.float32)
with self.cached_session() as sess:
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
loss_val = sess.run(loss,
feed_dict={
logits: [[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]],
labels: [[2], [0], [1]],
weights: ((1.2,), (3.4,), (5.6,)),
})
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss_val, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=(3, 1))
with self.cached_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0,
self.evaluate(loss), 3)
def testNonZeroLossWithColumnWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([[1.2], [3.4], [5.6]])
with self.cached_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0,
self.evaluate(loss), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([0, 0, 0], shape=(3, 1))
with self.cached_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 0, 0], shape=(3, 1))
with self.cached_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(12.0, self.evaluate(loss), 3)
@test_util.run_deprecated_v1
def testMeasurementSpecificWeightsRaisesException(self):
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
with self.assertRaises(ValueError):
losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
def testInconsistentWeightSizeRaisesException(self):
"""The weight tensor has incorrect number of elements."""
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(ValueError):
losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
def testInconsistentLabelSizeRaisesException(self):
"""The label tensor has incorrect number of elements."""
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([1.2, 3.4, 5.6])
with self.assertRaises(ValueError):
losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
@test_util.run_deprecated_v1
def testInconsistentWeightShapeRaisesException(self):
"""The weight tensor has incorrect shape."""
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([[1.2, 3.4], [5.6, 7.8]])
with self.assertRaises(ValueError):
losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
@test_util.run_deprecated_v1
def testInconsistentLabelShapeRaisesException(self):
"""The label tensor has incorrect shape."""
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 1], [2, 3]])
weights = constant_op.constant(1.2)
with self.assertRaisesRegexp(ValueError, 'dimension'):
losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
class SigmoidCrossEntropyLossTest(test.TestCase):
@test_util.run_deprecated_v1
def testAllCorrectSigmoid(self):
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
loss = losses.sigmoid_cross_entropy(labels, logits)
self.assertEquals(logits.dtype, loss.dtype)
self.assertEquals('sigmoid_cross_entropy_loss/value', loss.op.name)
self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
@test_util.run_deprecated_v1
def testLossWithSingleDimPlaceholderForLogitsAndWeights1(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 1))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 1))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = losses.sigmoid_cross_entropy(labels, logits, weights)
self.assertEquals(logits.dtype, loss.dtype)
with self.cached_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 1)),
labels: np.ones((32, 1)),
})
self.assertAlmostEqual(0.313, loss, 3)
@test_util.run_deprecated_v1
def testLossWithSingleDimPlaceholderForLogitsAndWeights2(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 2))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 2))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = losses.sigmoid_cross_entropy(labels, logits, weights)
self.assertEquals(logits.dtype, loss.dtype)
with self.cached_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 2)),
labels: np.ones((32, 2)),
})
self.assertAlmostEqual(0.313, loss, 3)
@test_util.run_deprecated_v1
def testAllWrongSigmoid(self):
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
loss = losses.sigmoid_cross_entropy(labels, logits)
self.assertEquals(logits.dtype, loss.dtype)
self.assertEquals('sigmoid_cross_entropy_loss/value', loss.op.name)
self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3)
@test_util.run_deprecated_v1
def testAllWrongSigmoidWithMeasurementSpecificWeights(self):
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
loss = losses.sigmoid_cross_entropy(labels, logits, weights)
self.assertEquals(logits.dtype, loss.dtype)
self.assertEquals('sigmoid_cross_entropy_loss/value', loss.op.name)
self.assertAlmostEqual(1700.0 / 7.0, self.evaluate(loss), 3)
@test_util.run_deprecated_v1
def testMultiCorrectSigmoid(self):
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0],
[-100.0, 100.0, 100.0]])
labels = constant_op.constant([[1, 0, 1], [1, 1, 0], [0, 1, 1]])
loss = losses.sigmoid_cross_entropy(labels, logits)
self.assertEquals(logits.dtype, loss.dtype)
self.assertEquals('sigmoid_cross_entropy_loss/value', loss.op.name)
with self.cached_session():
self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
def testSigmoidFloat64(self):
logits = constant_op.constant((
(100.0, -100.0, 100.0),
(100.0, -100.0, 100.0),
(100.0, 100.0, -100.0)
), dtype=dtypes.float64)
labels = constant_op.constant((
(1, 0, 1), (1, 1, 0), (0, 1, 1)
), dtype=dtypes.int64)
loss = losses.sigmoid_cross_entropy(labels, logits)
self.assertEquals(logits.dtype, loss.dtype)
with self.cached_session():
self.assertAlmostEqual(44.444, self.evaluate(loss), 3)
def testSigmoidNoReduction(self):
logits = constant_op.constant((
(100.0, -100.0, 100.0),
(100.0, -100.0, 100.0),
(100.0, 100.0, -100.0)))
labels = constant_op.constant(((1, 0, 1), (1, 1, 0), (0, 1, 1)))
loss = losses.sigmoid_cross_entropy(
labels, logits, reduction=losses.Reduction.NONE)
self.assertEquals(logits.dtype, loss.dtype)
with self.cached_session():
self.assertAllClose(((0., 0., 0.), (0., 100., 100.), (100., 0., 100.)),
self.evaluate(loss), 3)
@test_util.run_deprecated_v1
def testSigmoidLabelSmoothingCorrect(self):
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 1]])
# Sigmoid cross entropy loss is:
# max(x,0) - x*z + log(1 + exp(-abs(x)))
# The new labels are:
# z' = z * (1 - L) + 0.5 L
# 1 -> 1 - 0.5 L
# 0 -> 0.5 L
# here we expect:
# 1/3 * (100 - 100 * (1 - 0.5 L) + 0
# + 0 + 100 * (0.5 L) + 0
# + 0 + 100 * (1 - 0.5 L) + 0)
# = 1/3 * (100 + 50 L)
label_smoothing = 0.1
loss = losses.sigmoid_cross_entropy(
labels, logits, label_smoothing=label_smoothing)
self.assertEquals(logits.dtype, loss.dtype)
self.assertEquals('sigmoid_cross_entropy_loss/value', loss.op.name)
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
@test_util.run_deprecated_v1
def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel(self):
with self.cached_session():
label_smoothing = 0.1
sigmoid_logits = constant_op.constant([[100.0, -100.0, -100.0]])
sigmoid_labels = constant_op.constant([[1, 0, 1]])
sigmoid_loss = losses.sigmoid_cross_entropy(
sigmoid_labels, sigmoid_logits, label_smoothing=label_smoothing)
self.assertEquals(sigmoid_logits.dtype, sigmoid_loss.dtype)
softmax_logits = constant_op.constant(
[[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]])
softmax_labels = constant_op.constant([[0, 1], [1, 0], [0, 1]])
softmax_loss = losses.softmax_cross_entropy(
softmax_labels, softmax_logits, label_smoothing=label_smoothing)
self.assertAlmostEqual(sigmoid_loss.eval(), self.evaluate(softmax_loss),
3)
@test_util.run_deprecated_v1
class LogLossTest(test.TestCase):
def setUp(self):
super(LogLossTest, self).setUp()
predictions = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
labels = np.asarray([1.0, 0.0, 1.0, 1.0, 0.0, 0.0]).reshape((2, 3))
self._np_predictions = predictions
self._np_labels = labels
epsilon = 1e-7
self._expected_losses = np.multiply(
labels, np.log(predictions + epsilon)) + np.multiply(
1 - labels, np.log(1 - predictions + epsilon))
self._predictions = constant_op.constant(predictions)
self._labels = constant_op.constant(labels)
def testValueErrorThrownWhenWeightIsNone(self):
with self.cached_session():
with self.assertRaises(ValueError):
losses.log_loss(self._labels, self._labels, weights=None)
def testAllCorrectNoLossWeight(self):
loss = losses.log_loss(self._labels, self._labels)
with self.cached_session():
self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
def testAllCorrectNoLossWeightWithPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_labels.shape)
loss = losses.log_loss(self._labels, tf_predictions)
with self.cached_session():
self.assertAlmostEqual(
0.0, loss.eval(feed_dict={tf_predictions: self._np_labels}), 3)
def testNonZeroLoss(self):
loss = losses.log_loss(self._labels, self._predictions)
with self.cached_session():
self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0,
self.evaluate(loss), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = losses.log_loss(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
self.evaluate(loss), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = losses.log_loss(self._labels, self._predictions,
constant_op.constant(weights))
with self.cached_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
self.evaluate(loss), 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_predictions.shape)
weights = 2.3
loss = losses.log_loss(self._labels, tf_predictions,
constant_op.constant(weights))
with self.cached_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self):
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[None, None])
weights = 2.3
loss = losses.log_loss(self._labels, tf_predictions,
constant_op.constant(weights))
with self.cached_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant((1.2, 3.4), shape=(2, 1))
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
loss = losses.log_loss(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 6.0,
self.evaluate(loss), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant((1.2, 0), shape=(2, 1))
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = losses.log_loss(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0,
self.evaluate(loss), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2, 1])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = losses.log_loss(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0,
self.evaluate(loss), 3)
def testWeightsWithSameNumDimsButWrongShapeThrowsException(self):
weights = constant_op.constant(np.random.normal(size=(2, 4)), shape=[2, 4])
with self.cached_session():
with self.assertRaises(ValueError):
losses.log_loss(self._labels, self._predictions, weights)
def testNonZeroLossWithMeasurementSpecificWeights(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = losses.log_loss(
self._labels,
self._predictions,
constant_op.constant(
weights, shape=(2, 3)))
with self.cached_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0,
self.evaluate(loss), 3)
def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
loss = losses.log_loss(
self._labels,
tf_predictions,
constant_op.constant(
weights, shape=(2, 3)))
with self.cached_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss, 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = losses.log_loss(
self._labels,
self._predictions,
constant_op.constant(
weights, shape=(2, 3)))
with self.cached_session():
self.assertAlmostEqual(-np.sum(expected_losses), self.evaluate(loss), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
tf_weights = constant_op.constant(weights, shape=(2, 3))
loss = losses.log_loss(self._labels, tf_predictions, tf_weights)
with self.cached_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses), loss, 3)
def testLossWithSampleSpecificWeightsAllZero(self):
tf_weights = array_ops.zeros(shape=(2, 3))
loss = losses.log_loss(self._labels, self._predictions, tf_weights)
with self.cached_session():
self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
class HingeLossTest(test.TestCase):
def testIncompatibleShapes(self):
with self.cached_session():
logits = constant_op.constant([[-1.0], [2.1]])
labels = constant_op.constant([0.0, 1.0])
with self.assertRaises(ValueError):
_ = losses.hinge_loss(labels, logits).eval()
@test_util.run_deprecated_v1
def testAllOutsideMargin(self):
with self.cached_session():
logits = constant_op.constant([1.2, -1.4, -1.0, 2.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
loss = losses.hinge_loss(labels, logits)
self.assertAllClose(loss.eval(), 0.0, atol=1e-3)
@test_util.run_deprecated_v1
def testSomeInsideMargin(self):
with self.cached_session():
logits = constant_op.constant([[-0.7], [-1.4], [1.4], [0.6]])
labels = constant_op.constant([[0.0], [0.0], [1.0], [1.0]])
loss = losses.hinge_loss(labels, logits)
# Examples 1 and 4 are on the correct side of the hyperplane but within
# the margin so they incur some (small) loss.
self.assertAllClose(loss.eval(), 0.175, atol=1e-3)
@test_util.run_deprecated_v1
def testSomeMisclassified(self):
with self.cached_session():
logits = constant_op.constant([[[1.2], [0.4], [-1.0], [-1.1]]])
labels = constant_op.constant([[[1.0], [0.0], [0.0], [1.0]]])
loss = losses.hinge_loss(labels, logits)
# Examples 2 and 4 are on the wrong side of the hyperplane so they incur
# some (fairly large) loss.
self.assertAllClose(loss.eval(), 0.875, atol=1e-3)
class HuberLossTest(test.TestCase):
def testIncompatibleShapes(self):
with self.cached_session():
predictions = constant_op.constant([[-1.0], [2.1]])
labels = constant_op.constant([0.0, 1.0])
with self.assertRaises(ValueError):
_ = losses.huber_loss(labels, predictions).eval()
@test_util.run_deprecated_v1
def testAllQuadratic(self):
with self.cached_session():
predictions = constant_op.constant([1.5, -1.4, -1.0, 0.0])
labels = constant_op.constant([1.0, -1.0, 0.0, 0.5])
loss = losses.huber_loss(labels, predictions)
self.assertAllClose(loss.eval(),
0.5 * (0.25 + 0.16 + 1.0 + 0.25) / 4., atol=1e-5)
@test_util.run_deprecated_v1
def testAllLinear(self):
with self.cached_session():
predictions = constant_op.constant([1.5, -1.4, -1.0, 0.0])
labels = constant_op.constant([0.0, 1.0, 0.0, 1.5])
loss = losses.huber_loss(labels, predictions)
self.assertAllClose(loss.eval(),
(1.5 + 2.4 + 1.0 + 1.5) / 4. - 0.5, atol=1e-5)
@test_util.run_deprecated_v1
def testMixedQuadraticLinear(self):
with self.cached_session():
predictions = constant_op.constant([[1.5, -1.4, -1.0, 0.0],
[1.5, -1.4, -1.0, 0.0]])
labels = constant_op.constant([[1.0, -1.0, 0.0, 0.5],
[0.0, 1.0, 0.0, 1.5]])
loss = losses.huber_loss(labels, predictions)
quadratic = 0.5 * (0.25 + 0.16 + 1.0 + 0.25) / 4.
linear = (1.5 + 2.4 + 1.0 + 1.5) / 4. - 0.5
expected_loss = (quadratic + linear) / 2.
self.assertAllClose(loss.eval(), expected_loss, atol=1e-5)
def testAllQuadraticDelta(self):
with self.cached_session():
delta = 0.5
predictions = constant_op.constant([1.5, -1.4, -0.5, 0.0])
labels = constant_op.constant([1.0, -1.0, 0.0, 0.5])
expected = 0.5 * np.array([0.5**2, 0.4**2, 0.5**2, 0.5**2]).mean()
loss = losses.huber_loss(labels, predictions, delta=delta)
self.assertAllClose(expected, self.evaluate(loss), atol=1e-5)
def testAllLinearDelta(self):
delta = 0.5
predictions = constant_op.constant([1.5, -1.4, -1.0, 0.0])
labels = constant_op.constant([0.0, 1.0, 0.0, 1.5])
expected = delta * np.array([1.5, 2.4, 1.0, 1.5]).mean()
expected -= 0.5 * delta**2
loss = losses.huber_loss(labels, predictions, delta=delta)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(loss), atol=1e-5)
@test_util.run_deprecated_v1
class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
super(MeanSquaredErrorTest, self).setUp()
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.cached_session():
with self.assertRaises(ValueError):
losses.mean_squared_error(
self._predictions, self._predictions, weights=None)
@test_util.run_deprecated_v1
def testScalar(self):
with self.cached_session():
self.assertEqual(
0.0,
losses.mean_squared_error(predictions=constant_op.constant(0),
labels=constant_op.constant(0)).eval())
@test_util.run_deprecated_v1
def testAllCorrectNoLossWeight(self):
loss = losses.mean_squared_error(self._predictions, self._predictions)
with self.cached_session():
self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
@test_util.run_deprecated_v1
def testNonZeroLoss(self):
loss = losses.mean_squared_error(self._labels, self._predictions)
with self.cached_session():
self.assertAlmostEqual(49.5, self.evaluate(loss), 3)
@test_util.run_deprecated_v1
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(49.5 * weights, self.evaluate(loss), 3)
@test_util.run_deprecated_v1
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = losses.mean_squared_error(self._labels, self._predictions,
constant_op.constant(weights))
with self.cached_session():
self.assertAlmostEqual(49.5 * weights, self.evaluate(loss), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(767.8 / 6.0, self.evaluate(loss), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2, 1])
loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(767.8 / 6.0, self.evaluate(loss), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(587 / 5.0, self.evaluate(loss), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(18.0, self.evaluate(loss), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
@test_util.run_deprecated_v1
class MeanPairwiseSquaredErrorTest(test.TestCase):
def setUp(self):
super(MeanPairwiseSquaredErrorTest, self).setUp()
self._predictions = np.array([[4, 8, 12], [8, 1, 3]])
self._labels = np.array([[1, 9, 2], [-5, -5, 7]])
batch_size, dims = self._labels.shape
# Compute the expected loss 'manually'.
total = np.zeros((batch_size,))
for b in range(batch_size):
for i in range(dims - 1):
for j in range(i + 1, dims):
x = self._predictions[b, i].item() - self._predictions[b, j].item()
y = self._labels[b, i].item() - self._labels[b, j].item()
diff = (x - y)
total[b] += (diff * diff)
self._expected_losses = np.divide(total, 3.0)
def testValueErrorThrownWhenWeightIsNone(self):
with self.cached_session():
with self.assertRaises(ValueError):
losses.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
weights=None)
def _test_valid_weights(
self, labels, predictions, expected_loss, weights=1.0):
with self.cached_session():
static_inputs_op = losses.mean_pairwise_squared_error(
predictions=predictions, labels=labels, weights=weights)
self.assertAlmostEqual(
expected_loss, self.evaluate(static_inputs_op), places=3)
predictions_placeholder = array_ops.placeholder(
dtypes.float32, shape=np.asarray(predictions.shape))
labels_placeholder = array_ops.placeholder(
dtypes.int32, shape=np.asarray(labels.shape))
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=np.asarray(weights).shape)
dynamic_inputs_op = losses.mean_pairwise_squared_error(
predictions=predictions_placeholder,
labels=labels_placeholder,
weights=weights_placeholder)
feed_dict = {
predictions_placeholder: predictions,
labels_placeholder: labels,
weights_placeholder: weights,
}
self.assertAlmostEqual(
expected_loss, dynamic_inputs_op.eval(feed_dict=feed_dict), places=3)
def testAllCorrectNoLossWeight(self):
self._test_valid_weights(
self._labels, self._labels, expected_loss=0.0)
def testNonZeroLoss(self):
self._test_valid_weights(
self._labels, self._predictions,
expected_loss=np.sum(self._expected_losses))
def testGradientWithZeroWeight(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
inputs = array_ops.ones((2, 3))
weights = variable_scope.get_variable(
'weights',
shape=[3, 4],
initializer=init_ops.truncated_normal_initializer())
predictions = math_ops.matmul(inputs, weights)
optimizer = momentum_lib.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
loss = losses.mean_pairwise_squared_error(predictions, predictions, 0)
gradients_to_variables = optimizer.compute_gradients(loss)
init_op = variables.global_variables_initializer()
with self.cached_session() as sess:
self.evaluate(init_op)
for grad, _ in gradients_to_variables:
np_grad = self.evaluate(grad)
self.assertFalse(np.isnan(np_grad).any())
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
self._test_valid_weights(
self._labels, self._predictions,
expected_loss=weight * np.sum(self._expected_losses),
weights=weight)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = losses.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.cached_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
self.evaluate(loss), 3)
def testNonZeroLossWithScalarZeroWeight(self):
self._test_valid_weights(
self._labels, self._predictions, expected_loss=0.0, weights=0.0)
def test3d(self):
labels = np.array([
[[1, 9, 2], [12, 11, 10], [9, 8, 7]],
[[-5, -5, 7], [6, 5, 4], [3, 2, 1]],
])
predictions = np.array([
[[4, 8, 12], [1, 2, 3], [4, 5, 6]],
[[8, 1, 3], [7, 8, 9], [10, 11, 12]],
])
self._test_valid_weights(labels, predictions, expected_loss=137.5)
def test3dWeightedScalar(self):
labels = np.array([
[[1, 9, 2], [12, 11, 10], [9, 8, 7]],
[[-5, -5, 7], [6, 5, 4], [3, 2, 1]],
])
predictions = np.array([
[[4, 8, 12], [1, 2, 3], [4, 5, 6]],
[[8, 1, 3], [7, 8, 9], [10, 11, 12]],
])
weight = 3.0
self._test_valid_weights(
labels, predictions, expected_loss=weight * 137.5, weights=weight)
def _test_invalid_weights(
self, labels, predictions, weights=1.0):
expected_error_msg = 'weights can not be broadcast to values'
# Static check.
with self.assertRaisesRegexp(ValueError, expected_error_msg):
losses.mean_pairwise_squared_error(
predictions=predictions, labels=labels, weights=weights)
# Dynamic check.
predictions_placeholder = array_ops.placeholder(dtypes.float32)
labels_placeholder = array_ops.placeholder(dtypes.int32)
weights_placeholder = array_ops.placeholder(dtypes.float32)
dynamic_inputs_op = losses.mean_pairwise_squared_error(
predictions=predictions_placeholder,
labels=labels_placeholder,
weights=weights_placeholder)
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
dynamic_inputs_op.eval(feed_dict={
predictions_placeholder: predictions,
labels_placeholder: labels,
weights_placeholder: weights,
})
def testInvalid3dWeighted2x0(self):
labels = np.array([
[[1, 9, 2], [12, 11, 10], [9, 8, 7]],
[[-5, -5, 7], [6, 5, 4], [3, 2, 1]],
])
predictions = np.array([
[[4, 8, 12], [1, 2, 3], [4, 5, 6]],
[[8, 1, 3], [7, 8, 9], [10, 11, 12]],
])
self._test_invalid_weights(
labels, predictions, weights=np.asarray((1.2, 3.4)))
def test3dWeighted2x3x3(self):
labels = np.array([
[[1, 9, 2], [12, 11, 10], [9, 8, 7]],
[[-5, -5, 7], [6, 5, 4], [3, 2, 1]],
])
predictions = np.array([
[[4, 8, 12], [1, 2, 3], [4, 5, 6]],
[[8, 1, 3], [7, 8, 9], [10, 11, 12]],
])
self._test_valid_weights(
# TODO(ptucker): This doesn't look right.
labels,
predictions,
expected_loss=9 * 137.5,
weights=np.ones((2, 3, 3)))
def testLossWithAllZeroBatchSpecificWeights(self):
self._test_valid_weights(
self._labels, self._predictions, expected_loss=0.0,
weights=np.zeros((2, 1)))
def testLossIsAssociativeAcrossBatchElements(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
height = 3
width = 4
shape = (1, height, width, 1)
labels0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
labels1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
loss0 = losses.mean_pairwise_squared_error(
labels=labels0,
predictions=predictions0)
loss1 = losses.mean_pairwise_squared_error(
labels=labels1,
predictions=predictions1)
loss0_1 = losses.mean_pairwise_squared_error(
labels=array_ops.concat([labels0, labels1], 0),
predictions=array_ops.concat([predictions0, predictions1], 0))
with self.cached_session() as session:
loss0, loss1, loss0_1 = session.run([loss0, loss1, loss0_1])
self.assertTrue(loss0 > 0)
self.assertTrue(loss1 > 0)
self.assertAlmostEqual(loss0 + loss1, loss0_1, 5)
@test_util.run_deprecated_v1
class CosineDistanceLossTest(test.TestCase):
def setUp(self):
super(CosineDistanceLossTest, self).setUp()
self._predictions = np.asarray([
[1, 0, 0], # Batch 1
[0, 0, -1],
[1, 0, 0], # Batch 2
[1, 0, 0],
[0, 0, -1], # Batch 3
[1, 0, 0]
]).reshape((3, 2, 3))
self._labels = np.asarray([[1, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0],
[0, 0, 1], [0, 1, 0]]).reshape((3, 2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.cached_session():
with self.assertRaises(ValueError):
losses.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2,
weights=None)
def testAllCorrectNoWeights(self):
loss = losses.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2)
with self.cached_session():
self.assertAlmostEqual(0, self.evaluate(loss), 5)
def testPartiallyCorrectWithIntegerValues(self):
loss = losses.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2)
with self.cached_session():
self.assertAlmostEqual(1, self.evaluate(loss), 5)
def testPartiallyCorrectFloatingPointValues(self):
predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
labels = np.matrix(('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
tf_preds = constant_op.constant(
predictions, shape=(3, 1, 3), dtype=dtypes.float32)
tf_labels = constant_op.constant(
labels, shape=(3, 1, 3), dtype=dtypes.float32)
loss = losses.cosine_distance(tf_labels, tf_preds, dim=2)
with self.cached_session():
self.assertAlmostEqual(1.0, self.evaluate(loss), 5)
def testSampleSpecificWeights(self):
loss = losses.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=np.asarray((1, 0, 0)).reshape((3, 1, 1)))
with self.cached_session():
self.assertEqual(1.0, self.evaluate(loss))
def testMeasurementSpecificWeights(self):
loss = losses.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2, 1)))
with self.cached_session():
self.assertEqual(3.0 / 4.0, self.evaluate(loss))
def testMeasurementSpecificWeightsWithPlaceholderWithShape(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._labels.shape)
loss = losses.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2, 1)))
with self.cached_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._predictions})
self.assertEqual(3.0 / 4.0, loss)
def testZeroLossWhenAllSampleSpecificWeightsAreZero(self):
loss = losses.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3, 1, 1)))
with self.cached_session():
self.assertEqual(0, self.evaluate(loss))
def testZeroLossWhenAllMeasurementSpecificWeightsAreZero(self):
loss = losses.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3, 2, 1)))
with self.cached_session():
self.assertEqual(0, self.evaluate(loss))
class AddLossTest(test.TestCase):
def testNoCollectLossesBatch2(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]] * 2)
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]] * 2)
self.assertFalse(util.get_losses())
losses.absolute_difference(logits, labels, loss_collection=None)
losses.log_loss(logits, labels, loss_collection=None)
losses.mean_squared_error(logits, labels, loss_collection=None)
losses.sigmoid_cross_entropy(logits, labels, loss_collection=None)
losses.softmax_cross_entropy(logits, labels, loss_collection=None)
self.assertFalse(util.get_losses())
class ComputeWeightedLossTest(test.TestCase):
def setUp(self):
super(ComputeWeightedLossTest, self).setUp()
self._shape = (3, 2, 4)
raw_losses = np.zeros(self._shape)
next_loss = 0.0
for i in range(self._shape[0]):
for j in range(self._shape[1]):
for k in range(self._shape[2]):
raw_losses[i][j][k] = next_loss
next_loss += 1.0
raw_losses.setflags(write=False)
self._raw_losses = raw_losses
def testUnweighted(self):
for reduction in losses.Reduction.all():
with ops.Graph().as_default() as g:
self.assertEqual(0, len(util.get_losses()))
raw_losses = self._raw_losses
unweighted_losses = (
losses.compute_weighted_loss(raw_losses, reduction=reduction),
losses.compute_weighted_loss(
raw_losses, weights=np.ones((1, 1, 1)), reduction=reduction),
losses.compute_weighted_loss(
raw_losses, weights=np.ones((1, 1, 4)), reduction=reduction),
losses.compute_weighted_loss(
raw_losses, weights=np.ones((1, 2, 1)), reduction=reduction),
losses.compute_weighted_loss(
raw_losses, weights=np.ones((1, 2, 4)), reduction=reduction),
losses.compute_weighted_loss(
raw_losses, weights=np.ones((3, 1, 1)), reduction=reduction),
losses.compute_weighted_loss(
raw_losses, weights=np.ones((3, 1, 4)), reduction=reduction),
losses.compute_weighted_loss(
raw_losses, weights=np.ones((3, 2, 1)), reduction=reduction),
losses.compute_weighted_loss(
raw_losses, weights=np.ones(self._shape), reduction=reduction)
)
self.assertEqual(9, len(util.get_losses()))
with self.session(g):
for unweighted_loss in unweighted_losses:
if reduction == losses.Reduction.NONE:
self.assertAllClose(self._raw_losses,
self.evaluate(unweighted_loss))
elif reduction == losses.Reduction.SUM:
self.assertAllClose(
np.sum(self._raw_losses), self.evaluate(unweighted_loss))
else:
# reduction one of MEAN, SUM_OVER_NONZERO_WEIGHTS,
# SUM_BY_NONZERO_WEIGHTS or SUM_OVER_BATCH_SIZE.
self.assertAllClose(
np.mean(self._raw_losses), self.evaluate(unweighted_loss))
def testUnweightedFromPlaceholder(self):
for reduction in losses.Reduction.all():
with ops.Graph().as_default() as g:
self.assertEqual(0, len(util.get_losses()))
raw_losses = array_ops.placeholder(dtype=dtypes.float32)
feed_dict = {raw_losses: self._raw_losses}
unweighted_losses = (
losses.compute_weighted_loss(raw_losses, reduction=reduction),
losses.compute_weighted_loss(
raw_losses, weights=np.ones((1, 1, 1)), reduction=reduction),
losses.compute_weighted_loss(
raw_losses, weights=np.ones((1, 1, 4)), reduction=reduction),
)
self.assertEqual(3, len(util.get_losses()))
with self.session(g):
for unweighted_loss in unweighted_losses:
if reduction == losses.Reduction.NONE:
self.assertAllClose(
self._raw_losses, unweighted_loss.eval(feed_dict))
elif reduction == losses.Reduction.SUM:
self.assertAllClose(
np.sum(self._raw_losses), unweighted_loss.eval(feed_dict))
else:
# reduction one of MEAN, SUM_OVER_NONZERO_WEIGHTS,
# SUM_BY_NONZERO_WEIGHTS or SUM_OVER_BATCH_SIZE.
self.assertAllClose(
np.mean(self._raw_losses), unweighted_loss.eval(feed_dict))
def testScalarWeight(self):
with ops.Graph().as_default():
self.assertEqual(0, len(util.get_losses()))
weight = 17.0
weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weight)
self.assertEqual(1, len(util.get_losses()))
with self.cached_session():
self.assertAllClose(
np.mean(weight * self._raw_losses), self.evaluate(weighted_loss))
def _test_invalid_weights(self, weights):
with ops.Graph().as_default():
self.assertEqual(0, len(util.get_losses()))
expected_error_msg = 'weights can not be broadcast to values'
# Static check.
with self.assertRaisesRegexp(ValueError, expected_error_msg):
losses.compute_weighted_loss(self._raw_losses, weights=weights)
# Dynamic check.
weights_placeholder = array_ops.placeholder(dtypes.float32)
weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weights_placeholder)
self.assertEqual(1, len(util.get_losses()))
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
weighted_loss.eval(feed_dict={weights_placeholder: weights})
def testInvalidWeightTooManyDims(self):
self._test_invalid_weights(np.zeros(shape=(2, 2, 2, 2)))
def testInvalidWeightMismatchedDim(self):
with ops.Graph().as_default():
raw_losses = array_ops.reshape(self._raw_losses, shape=(3, 2, 4, 1))
weights = np.ones(shape=(3, 2, 4, 2))
expected_error_msg = 'weights can not be broadcast to values'
self.assertEqual(0, len(util.get_losses()))
# Static check.
with self.assertRaisesRegexp(ValueError, expected_error_msg):
losses.compute_weighted_loss(raw_losses, weights=weights)
# Dynamic check.
weights_placeholder = array_ops.placeholder(dtypes.float32)
weighted_loss = losses.compute_weighted_loss(
raw_losses, weights=weights_placeholder)
self.assertEqual(1, len(util.get_losses()))
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
weighted_loss.eval(feed_dict={weights_placeholder: weights})
def testInvalid3Weight(self):
self._test_invalid_weights((17.0, 5.0, 2.0))
def testInvalid3x1Weight(self):
self._test_invalid_weights(((17.0,), (5.0,), (2.0,),))
def testInvalid3x2Weight(self):
self._test_invalid_weights((
(17.0, 3.0),
(5.0, 31.0),
(2.0, 7.0),))
def testInvalid1x2Weight(self):
self._test_invalid_weights((17.0, 3.0,),)
def testInvalidScalar1DWeight(self):
self._test_invalid_weights((17.0,),)
def _test_valid_weights(self, weights):
for reduction in losses.Reduction.all():
with ops.Graph().as_default() as g:
self.assertEqual(0, len(util.get_losses()))
weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weights, reduction=reduction)
self.assertEqual(1, len(util.get_losses()))
with self.session(g):
weighted_losses = weights * self._raw_losses
weighted_sum = np.sum(weighted_losses)
if reduction == losses.Reduction.NONE:
self.assertAllClose(weighted_losses, self.evaluate(weighted_loss))
elif reduction == losses.Reduction.SUM:
self.assertAllClose(weighted_sum, self.evaluate(weighted_loss))
else:
broadcast_weights = weights * np.ones_like(self._raw_losses)
if reduction == losses.Reduction.MEAN:
self.assertAllClose(weighted_sum / np.sum(broadcast_weights),
self.evaluate(weighted_loss))
elif (reduction == losses.Reduction.SUM_OVER_NONZERO_WEIGHTS or
reduction == losses.Reduction.SUM_BY_NONZERO_WEIGHTS):
self.assertAllClose(
weighted_sum / np.count_nonzero(broadcast_weights),
self.evaluate(weighted_loss))
elif reduction == losses.Reduction.SUM_OVER_BATCH_SIZE:
self.assertAllClose(weighted_sum / self._raw_losses.size,
self.evaluate(weighted_loss))
def test1x1x1Weight(self):
self._test_valid_weights((((17.0,),),))
def test1x2x1Weight(self):
self._test_valid_weights((((17.0,), (3.0,),),))
def test1x1x4Weight(self):
self._test_valid_weights((((17.0, 0.0, 2.0, 5.0),),))
def test3x1x1Weight(self):
self._test_valid_weights((((17.0,),), ((5.0,),), ((2.0,),),))
def test3x2x1Weight(self):
self._test_valid_weights((
((17.0,), (3.0,)),
((5.0,), (31.0,)),
((2.0,), (7.0,)),
))
def test3x1x4Weight(self):
self._test_valid_weights((
((17.0, 0.0, 2.0, 5.0),),
((5.0, 31.0, 17.0, 5.0),),
((7.0, 3.0, 11.0, 5.0),),
))
def test1x2x4Weight(self):
self._test_valid_weights(((
(17.0, 0.0, 2.0, 5.0),
(3.0, 13.0, 11.0, 2.0),
),))
def test3x2x4Weight(self):
self._test_valid_weights((
((17.0, 0.0, 2.0, 5.0), (3.0, 13.0, 11.0, 2.0),),
((5.0, 31.0, 17.0, 5.0), (13.0, 3.0, 0.0, 11.0),),
((0.0, 3.0, 11.0, 5.0), (13.0, 11.0, 1.0, 7.0),),
))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/losses_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests which set DEBUG_SAVEALL and assert no garbage was created.
This flag seems to be sticky, so these tests have been isolated for now.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class NoReferenceCycleTests(test_util.TensorFlowTestCase):
@test_util.assert_no_garbage_created
def testEagerResourceVariables(self):
with context.eager_mode():
resource_variable_ops.ResourceVariable(1.0, name="a")
@test_util.assert_no_garbage_created
def testTensorArrays(self):
with context.eager_mode():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/garbage_collection_test.py
|
# -*- coding: utf-8 -*-
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.strings.to_bytes op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import test
class StringsToBytesOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters(
# Scalar input -> vector output
(b'hello', [b'h', b'e', b'l', b'l', b'o']),
# Vector input -> 2D ragged output
([b'hello', b'123'],
[[b'h', b'e', b'l', b'l', b'o'], [b'1', b'2', b'3']]),
# 2D tensor input -> 3D ragged output
([[b'abc', b'de'], [b'fgh', b'']],
[[[b'a', b'b', b'c'], [b'd', b'e']], [[b'f', b'g', b'h'], []]]),
# 2D ragged input -> 3D ragged output
(ragged_factory_ops.constant_value([[b'abc', b'de'], [b'f']]),
[[[b'a', b'b', b'c'], [b'd', b'e']], [[b'f']]]),
# 3D input -> 4D ragged output
(ragged_factory_ops.constant_value(
[[[b'big', b'small'], [b'red']], [[b'cat', b'dog'], [b'ox']]]),
[[[[b'b', b'i', b'g'], [b's', b'm', b'a', b'l', b'l']],
[[b'r', b'e', b'd']]],
[[[b'c', b'a', b't'], [b'd', b'o', b'g']],
[[b'o', b'x']]]]),
# Empty string
(b'', []),
# Null byte
(b'\x00', [b'\x00']),
# Unicode
(u'仅今年前'.encode('utf-8'),
[b'\xe4', b'\xbb', b'\x85', b'\xe4', b'\xbb', b'\x8a', b'\xe5',
b'\xb9', b'\xb4', b'\xe5', b'\x89', b'\x8d']),
)
def testStringToBytes(self, source, expected):
expected = ragged_factory_ops.constant_value(expected, dtype=object)
result = ragged_string_ops.string_bytes_split(source)
self.assertRaggedEqual(expected, result)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/string_bytes_split_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def RandMatrix(rows, cols, tr, round_bfloat=False):
if tr:
rows, cols = cols, rows
rand_func = np.random.randint if round_bfloat else np.random.uniform
return (np.clip(
rand_func(
low=-256.0, high=256.0, size=rows * cols), -64,
64) / 128.0).reshape([rows, cols]).astype(np.float32)
class SparseMatMulTest(test.TestCase):
def _testCpuMatmul(self,
x,
y,
tr_a=False,
tr_b=False,
sp_a=True,
sp_b=False,
x_dtype=dtypes.float32,
y_dtype=dtypes.float32):
with self.cached_session(use_gpu=False):
tf_x = math_ops.cast(x, x_dtype)
tf_y = math_ops.cast(y, y_dtype)
tf_ans = math_ops.matmul(
tf_x,
tf_y,
transpose_a=tr_a,
transpose_b=tr_b,
a_is_sparse=sp_a,
b_is_sparse=sp_b)
out = self.evaluate(tf_ans)
np_x = math_ops.cast(tf_x, dtypes.float32).eval()
np_y = math_ops.cast(tf_y, dtypes.float32).eval()
if tr_a:
np_x = np.transpose(np_x)
if tr_b:
np_y = np.transpose(np_y)
np_ans = np.matrix(np_x) * np.matrix(np_y)
self.assertShapeEqual(np_ans, tf_ans)
self.assertAllCloseAccordingToType(np_ans, out, rtol=1e-4, atol=1e-4)
@test_util.run_deprecated_v1
def testBasic(self):
x = np.arange(0., 4.).reshape([4, 1]).astype(np.float32)
y = np.arange(-1., 1.).reshape([1, 2]).astype(np.float32)
for x_dtype in (dtypes.float32, dtypes.bfloat16):
for y_dtype in (dtypes.float32, dtypes.bfloat16):
self._testCpuMatmul(x, y, x_dtype=x_dtype, y_dtype=y_dtype)
@test_util.run_deprecated_v1
def testZeroDim(self):
x = np.ones((4, 0)).astype(np.float32)
y = np.ones((0, 3)).astype(np.float32)
for x_dtype in (dtypes.float32, dtypes.bfloat16):
for y_dtype in (dtypes.float32, dtypes.bfloat16):
self._testCpuMatmul(x, y, x_dtype=x_dtype, y_dtype=y_dtype)
@test_util.run_deprecated_v1
def testEmpty(self):
x = np.ones((0, 0)).astype(np.float32)
y = np.ones((0, 0)).astype(np.float32)
for x_dtype in (dtypes.float32, dtypes.bfloat16):
for y_dtype in (dtypes.float32, dtypes.bfloat16):
self._testCpuMatmul(x, y, x_dtype=x_dtype, y_dtype=y_dtype)
# Tests setting one dimension to be a high value.
@test_util.run_deprecated_v1
def testLarge(self):
r1 = np.random.randint(6000, 20000)
r2 = np.random.randint(1, 10)
r3 = np.random.randint(1, 10)
for m, k, n in [(r1, r2, r3), (r2, r1, r3), (r2, r3, r1)]:
for x_dtype in (dtypes.float32, dtypes.bfloat16):
for y_dtype in (dtypes.float32, dtypes.bfloat16):
x = RandMatrix(m, k, False)
y = RandMatrix(k, n, False)
self._testCpuMatmul(x, y, x_dtype=x_dtype, y_dtype=y_dtype)
# Tests random sized matrices.
@test_util.run_deprecated_v1
def testRandom(self):
for tr_a in [True, False]:
for tr_b in [True, False]:
for sp_a in [True, False]:
for sp_b in [True, False]:
for x_dtype in (dtypes.float32, dtypes.bfloat16):
for y_dtype in (dtypes.float32, dtypes.bfloat16):
n, k, m = np.random.randint(1, 100, size=3)
x = RandMatrix(n, k, tr_a)
y = RandMatrix(k, m, tr_b)
self._testCpuMatmul(
x,
y,
tr_a,
tr_b,
sp_a,
sp_b,
x_dtype=x_dtype,
y_dtype=y_dtype)
class MatMulGradientTest(test.TestCase):
def _testGradients(self, tr_a, tr_b, sp_a, sp_b, a_dtype, b_dtype, delta,
name):
with self.cached_session():
a = constant_op.constant(
RandMatrix(
3, 2, tr_a, round_bfloat=True), dtype=dtypes.float32)
b = constant_op.constant(
RandMatrix(
2, 4, tr_b, round_bfloat=True), dtype=dtypes.float32)
tf_a = math_ops.cast(a, a_dtype) if a_dtype != dtypes.float32 else a
tf_b = math_ops.cast(b, b_dtype) if b_dtype != dtypes.float32 else b
m = math_ops.matmul(
tf_a,
tf_b,
name=name,
transpose_a=tr_a,
transpose_b=tr_b,
a_is_sparse=sp_a,
b_is_sparse=sp_b)
err = (gradient_checker.compute_gradient_error(
a, [2, 3] if tr_a else [3, 2],
m, [3, 4],
x_init_value=a.eval(),
delta=delta) + gradient_checker.compute_gradient_error(
b, [4, 2] if tr_b else [2, 4],
m, [3, 4],
x_init_value=b.eval(),
delta=delta))
self.assertLessEqual(err, delta / 2.)
@test_util.run_deprecated_v1
def testGradientInput(self):
for tr_a in [True, False]:
for tr_b in [True, False]:
for sp_a in [True, False]:
for sp_b in [True, False]:
for a_dtype in (dtypes.float32, dtypes.bfloat16):
for b_dtype in (dtypes.float32, dtypes.bfloat16):
# Note: bfloat16 only has 7 mantissa bits, versus float32 with
# 10. Hence, we shift by 2 bits to pass the test.
if a_dtype == dtypes.bfloat16 and b_dtype == dtypes.bfloat16:
delta = 1 / 16.
else:
delta = 1 / 64.
name = "sparse_matmul_%s_%s_%s_%s" % (tr_a, tr_b, sp_a, sp_b)
self._testGradients(tr_a, tr_b, sp_a, sp_b, a_dtype, b_dtype,
delta, name)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_matmul_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.util import compat
# TODO(josh11b): add tests with lists/tuples, Shape.
# TODO(ashankar): Collapse with tests in constant_op_test.py and use something
# like the test_util.run_in_graph_and_eager_modes decorator to confirm
# equivalence between graph and eager execution.
class ConstantTest(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with context.device("/device:CPU:0"):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
device = test_util.gpu_device_name()
if device:
np_ans = np.array(x)
with context.device(device):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
orig = [-1.0, 2.0, 0.0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints
orig = [-1.5, 2, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints that don't fit in int32
orig = [1, 2**42, 0.5]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig, dtypes_lib.float64)
self.assertEqual(dtypes_lib.float64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# This integer is not exactly representable as a double, gets rounded.
tf_ans = constant_op.constant(2**54 + 1, dtypes_lib.float64)
self.assertEqual(2**54, tf_ans.numpy())
# This integer is larger than all non-infinite numbers representable
# by a double, raises an exception.
with self.assertRaisesRegexp(ValueError, "out-of-range integer"):
constant_op.constant(10**310, dtypes_lib.float64)
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
self._testAll([-1, 2])
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
# Should detect out of range for int32 and use int64 instead.
orig = [2, 2**48, -2**48]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.int64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Out of range for an int64
with self.assertRaisesRegexp(ValueError, "out-of-range integer"):
constant_op.constant([2**72])
def testComplex64(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
def testComplex128(self):
self._testAll(
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5
]).astype(np.complex128))
self._testAll(
np.complex(1, 2) * np.random.normal(size=30).reshape(
[2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
def testString(self):
val = [compat.as_bytes(str(x)) for x in np.arange(-15, 15)]
self._testCpu(np.array(val).reshape([2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
def testStringWithNulls(self):
val = ops.convert_to_tensor(b"\0\0\0\0").numpy()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
val = ops.convert_to_tensor(b"xx\0xx").numpy()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
val = ops.convert_to_tensor(nested).numpy()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
def testImplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeFill(self):
c = constant_op.constant(12, shape=[7])
self.assertEqual(c.get_shape(), [7])
self.assertAllEqual([12, 12, 12, 12, 12, 12, 12], c.numpy())
def testExplicitShapeReshape(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[5, 2, 3])
self.assertEqual(c.get_shape(), [5, 2, 3])
def testImplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeTooBig(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
def testShapeTooSmall(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testShapeWrong(self):
with self.assertRaisesRegexp(TypeError, None):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testShape(self):
self._testAll(constant_op.constant([1]).get_shape())
def testDimension(self):
x = constant_op.constant([1]).shape[0]
self._testAll(x)
def testDimensionList(self):
x = [constant_op.constant([1]).shape[0]]
self._testAll(x)
# Mixing with regular integers is fine too
self._testAll([1] + x)
self._testAll(x + [1])
def testDimensionTuple(self):
x = constant_op.constant([1]).shape[0]
self._testAll((x,))
self._testAll((1, x))
self._testAll((x, 1))
def testInvalidLength(self):
class BadList(list):
def __init__(self):
super(BadList, self).__init__([1, 2, 3]) # pylint: disable=invalid-length-returned
def __len__(self):
return -1
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([BadList()])
with self.assertRaisesRegexp(ValueError, "mixed types"):
constant_op.constant([1, 2, BadList()])
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant(BadList())
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([[BadList(), 2], 3])
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([BadList(), [1, 2, 3]])
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([BadList(), []])
# TODO(allenl, josh11b): These cases should return exceptions rather than
# working (currently shape checking only checks the first element of each
# sequence recursively). Maybe the first one is fine, but the second one
# silently truncating is rather bad.
# with self.assertRaisesRegexp(ValueError, "should return >= 0"):
# constant_op.constant([[3, 2, 1], BadList()])
# with self.assertRaisesRegexp(ValueError, "should return >= 0"):
# constant_op.constant([[], BadList()])
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegexp(ValueError, "non-rectangular Python sequence"):
constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegexp(ValueError, None):
constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegexp(ValueError, None):
constant_op.constant([[1, 2], [3], [4, 5]])
# TODO(ashankar): This test fails with graph construction since
# tensor_util.make_tensor_proto (invoked from constant_op.constant)
# does not handle iterables (it relies on numpy conversion).
# For consistency, should graph construction handle Python objects
# that implement the sequence protocol (but not numpy conversion),
# or should eager execution fail on such sequences?
def testCustomSequence(self):
# This is inspired by how many objects in pandas are implemented:
# - They implement the Python sequence protocol
# - But may raise a KeyError on __getitem__(self, 0)
# See https://github.com/tensorflow/tensorflow/issues/20347
class MySeq(object):
def __getitem__(self, key):
if key != 1 and key != 3:
raise KeyError(key)
return key
def __len__(self):
return 2
def __iter__(self):
l = list([1, 3])
return l.__iter__()
self.assertAllEqual([1, 3], self.evaluate(constant_op.constant(MySeq())))
class AsTensorTest(test.TestCase):
def testAsTensorForTensorInput(self):
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, ops.EagerTensor))
class ZerosTest(test.TestCase):
def _Zeros(self, shape):
ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(
np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = array_ops.zeros(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
class ZerosLikeTest(test.TestCase):
def _compareZeros(self, dtype, use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
# NOTE(kearnes): The default numpy dtype associated with tf.string is
# np.object (and can't be changed without breaking a lot things), which
# causes a TypeError in constant_op.constant below. Here we catch the
# special case of tf.string and set the numpy dtype appropriately.
if dtype == dtypes_lib.string:
numpy_dtype = np.string_
else:
numpy_dtype = dtype.as_numpy_dtype
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
# Test that the shape is correct
self.assertEqual([2, 3], z_var.get_shape())
# Test that the value is correct
z_value = z_var.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
def testZerosLikeCPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=False)
def testZerosLikeGPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.bool, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=True)
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = array_ops.zeros_like(x, dtype=out_type).numpy()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
class OnesTest(test.TestCase):
def _Ones(self, shape):
ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testScalar(self):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = array_ops.ones(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.ones([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
# Test explicit type control
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool):
z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
class OnesLikeTest(test.TestCase):
def testOnesLike(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
]:
numpy_dtype = dtype.as_numpy_dtype
# Creates a tensor of non-zero values with shape 2 x 3.
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.numpy()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
ctx = context.context()
device = "GPU:0" if (use_gpu and ctx.num_gpus()) else "CPU:0"
with ops.device(device):
tf_ans = array_ops.fill(dims, val, name="fill")
out = tf_ans.numpy()
self.assertAllClose(np_ans, out)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillComplex128(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex128)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").numpy()
self.assertAllEqual(np_ans, tf_ans)
def testFillNegative(self):
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill(shape, 7)
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([3, 2], [1.0, 2.0])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/constant_op_eager_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Unstack Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(
arr, axis=(axis,)) for arr in np.split(
array, axis_len, axis=axis)
]
class UnstackOpTest(test.TestCase):
def testSimple(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
for dtype in [
np.bool, np.float16, np.float32, np.float64, np.uint8, np.int32,
np.int64
]:
data = np.random.randn(*shape).astype(dtype)
# Convert data to a single tensorflow tensor
x = constant_op.constant(data)
# Unstack into a list of tensors
cs = array_ops.unstack(x, num=shape[0])
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
cs = [self.evaluate(c) for c in cs]
self.assertAllEqual(cs, data)
def testSimpleGpu(self):
if not test_util.is_gpu_available():
self.skipTest('No GPU available')
np.random.seed(7)
with test_util.force_gpu():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
for dtype in [
np.bool, np.float16, np.float32, np.float64, np.uint8, np.int32,
np.int64
]:
data = np.random.randn(*shape).astype(dtype)
# Convert data to a single tensorflow tensor
x = constant_op.constant(data)
# Unstack into a list of tensors
cs = array_ops.unstack(x, num=shape[0])
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
cs = [self.evaluate(c) for c in cs]
self.assertAllEqual(cs, data)
@test_util.run_deprecated_v1
def testGradientsAxis0(self):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
for i in xrange(shape[0]):
with self.cached_session():
x = constant_op.constant(data)
cs = array_ops.unstack(x, num=shape[0])
err = gradient_checker.compute_gradient_error(x, shape, cs[i],
shapes[i])
self.assertLess(err, 1e-6)
@test_util.run_deprecated_v1
def testGradientsAxis1(self):
for shape in (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
out_shape = list(shape)
del out_shape[1]
for i in xrange(shape[1]):
with self.cached_session():
x = constant_op.constant(data)
cs = array_ops.unstack(x, num=shape[1], axis=1)
err = gradient_checker.compute_gradient_error(x, shape, cs[i],
out_shape)
self.assertLess(err, 1e-6)
@test_util.run_deprecated_v1
def testInferNum(self):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
x = array_ops.placeholder(np.float32, shape=shape)
cs = array_ops.unstack(x)
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
@test_util.run_deprecated_v1
def testCannotInferNumFromUnknownShape(self):
x = array_ops.placeholder(np.float32)
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape <unknown>'):
array_ops.unstack(x)
@test_util.run_deprecated_v1
def testUnknownShapeOkWithNum(self):
x = array_ops.placeholder(np.float32)
array_ops.unstack(x, num=2)
@test_util.run_deprecated_v1
def testCannotInferNumFromNoneShape(self):
x = array_ops.placeholder(np.float32, shape=(None,))
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape \((\?|None),\)'):
array_ops.unstack(x)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for i in range(1, 6):
a = np.random.random(np.random.permutation(i) + 1)
# For all the possible axis to split it, including negative indices.
for j in range(-i, i):
expected = np_split_squeeze(a, j)
actual_unstack = self.evaluate(array_ops.unstack(a, axis=j))
self.assertAllEqual(expected, actual_unstack)
def testAxis0Default(self):
a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
unstacked = self.evaluate(array_ops.unstack(a))
self.assertEqual(len(unstacked), 2)
self.assertAllEqual(unstacked[0], [1, 2, 3])
self.assertAllEqual(unstacked[1], [4, 5, 6])
def testAxisOutOfRange(self):
a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
with self.assertRaisesRegexp(ValueError, r'axis = 2 not in \[-2, 2\)'):
array_ops.unstack(a, axis=2)
def testAxisOutOfNegativeRange(self):
a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
with self.assertRaisesRegexp(ValueError, r'axis = -3 not in \[-2, 2\)'):
array_ops.unstack(a, axis=-3)
def testZeroLengthDim(self):
x = array_ops.zeros(shape=(0, 1, 2))
y = self.evaluate(array_ops.unstack(x, axis=1)[0])
self.assertEqual(y.shape, (0, 2))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/unstack_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.norm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test as test_lib
def _AddTest(test, test_name, fn):
test_name = "_".join(["test", test_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class NormOpTest(test_lib.TestCase):
@test_util.run_v1_only("b/120545219")
def testBadOrder(self):
matrix = [[0., 1.], [2., 3.]]
for ord_ in "fro", -7, -1.1, 0:
with self.assertRaisesRegexp(ValueError,
"'ord' must be a supported vector norm"):
linalg_ops.norm(matrix, ord=ord_)
for ord_ in "fro", -7, -1.1, 0:
with self.assertRaisesRegexp(ValueError,
"'ord' must be a supported vector norm"):
linalg_ops.norm(matrix, ord=ord_, axis=-1)
for ord_ in "foo", -7, -1.1, 1.1:
with self.assertRaisesRegexp(ValueError,
"'ord' must be a supported matrix norm"):
linalg_ops.norm(matrix, ord=ord_, axis=[-2, -1])
@test_util.run_v1_only("b/120545219")
def testInvalidAxis(self):
matrix = [[0., 1.], [2., 3.]]
for axis_ in [], [1, 2, 3], [[1]], [[1], [2]], [3.1415], [1, 1]:
error_prefix = ("'axis' must be None, an integer, or a tuple of 2 unique "
"integers")
with self.assertRaisesRegexp(ValueError, error_prefix):
linalg_ops.norm(matrix, axis=axis_)
def _GetNormOpTest(dtype_, shape_, ord_, axis_, keep_dims_, use_static_shape_):
def _CompareNorm(self, matrix):
np_norm = np.linalg.norm(matrix, ord=ord_, axis=axis_, keepdims=keep_dims_)
with self.cached_session(use_gpu=True) as sess:
if use_static_shape_:
tf_matrix = constant_op.constant(matrix)
tf_norm = linalg_ops.norm(
tf_matrix, ord=ord_, axis=axis_, keepdims=keep_dims_)
tf_norm_val = self.evaluate(tf_norm)
else:
tf_matrix = array_ops.placeholder(dtype_)
tf_norm = linalg_ops.norm(
tf_matrix, ord=ord_, axis=axis_, keepdims=keep_dims_)
tf_norm_val = sess.run(tf_norm, feed_dict={tf_matrix: matrix})
self.assertAllClose(np_norm, tf_norm_val, rtol=1e-5, atol=1e-5)
@test_util.run_v1_only("b/120545219")
def Test(self):
is_matrix_norm = (isinstance(axis_, tuple) or
isinstance(axis_, list)) and len(axis_) == 2
is_fancy_p_norm = np.isreal(ord_) and np.floor(ord_) != ord_
if ((not is_matrix_norm and ord_ == "fro") or
(is_matrix_norm and is_fancy_p_norm)):
self.skipTest("Not supported by neither numpy.linalg.norm nor tf.norm")
if ord_ == "euclidean" or (axis_ is None and len(shape) > 2):
self.skipTest("Not supported by numpy.linalg.norm")
matrix = np.random.randn(*shape_).astype(dtype_)
if dtype_ in (np.complex64, np.complex128):
matrix += 1j * np.random.randn(*shape_).astype(dtype_)
_CompareNorm(self, matrix)
return Test
# pylint: disable=redefined-builtin
if __name__ == "__main__":
for use_static_shape in False, True:
for dtype in np.float32, np.float64, np.complex64, np.complex128:
for rows in 2, 5:
for cols in 2, 5:
for batch in [], [2], [2, 3]:
shape = batch + [rows, cols]
for ord in "euclidean", "fro", 0.5, 1, 2, np.inf:
for axis in [
None, (-2, -1), (-1, -2), -len(shape), 0, len(shape) - 1
]:
for keep_dims in False, True:
name = "%s_%s_ord_%s_axis_%s_%s_%s" % (
dtype.__name__, "_".join(map(str, shape)), ord, axis,
keep_dims, use_static_shape)
_AddTest(NormOpTest, "Norm_" + name,
_GetNormOpTest(dtype, shape, ord, axis, keep_dims,
use_static_shape))
test_lib.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/norm_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReorder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseSplitOpTest(test.TestCase):
def _SparseTensor_4x6(self):
# [0 | |2 | |4 |5 ]
# [ |11| |13|14| ]
# [20| | |23| |25]
# [30| |32|33| |35]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 20, 23, 25, 30, 32, 33, 35]).astype(np.int64)
shape = np.array([4, 6]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensor_5x7(self):
# [0 | |2 | |4 |5 | ]
# [ |11| |13|14| |16]
# [20| | |23| |25| ]
# [30| |32|33| |35| ]
# [ |41| | |44| |46]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[1, 6], [2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5], [4, 1], [4, 4], [4, 6]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25, 30, 32, 33, 35, 41, 44,
46]).astype(np.int64)
shape = np.array([5, 7]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensorValue_3x4x2(self):
# slice(:,:, 0)
# ['a0'| |'b0'| ]
# [ |'c0'| |'d0']
# [ | |'e0'| ]
# slice(:,:, 1)
# ['a1'| |'b1'| ]
# [ |'c1'| |'d1']
# [ | |'e1'| ]
ind = np.array([[0, 0, 0], [0, 0, 1], [0, 2, 0], [0, 2, 1], [1, 1, 0],
[1, 1, 1], [1, 3, 0], [1, 3, 1], [2, 2, 0],
[2, 2, 1]]).astype(np.int64)
val = np.array(['a0', 'a1', 'b0', 'b1', 'c0', 'c1', 'd0', 'd1', 'e0', 'e1'])
shape = np.array([3, 4, 2]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensor_3x4x2(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x4x2(
))
@test_util.run_deprecated_v1
def testSplitMatrixRows(self):
with self.session(use_gpu=False):
sp_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=2, axis=0)
self.assertAllEqual(len(sp_tensors), 2)
self.assertAllEqual(sp_tensors[0].indices.eval(), [[0, 0], [0, 2], [0, 4],
[0, 5], [1, 1], [1, 3],
[1, 4]])
self.assertAllEqual(sp_tensors[0].values.eval(), [0, 2, 4, 5, 11, 13, 14])
self.assertAllEqual(sp_tensors[0].dense_shape.eval(), [2, 6])
self.assertAllEqual(sp_tensors[1].indices.eval(), [[0, 0], [0, 3], [0, 5],
[1, 0], [1, 2], [1, 3],
[1, 5]])
self.assertAllEqual(sp_tensors[1].values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensors[1].dense_shape.eval(), [2, 6])
@test_util.run_deprecated_v1
def testSplitMatrixUnevenCols(self):
with self.session(use_gpu=False):
sp_tensors_3 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=3, axis=1)
self.assertAllEqual(len(sp_tensors_3), 3)
self.assertAllEqual(sp_tensors_3[0].indices.eval(),
[[0, 0], [0, 2], [1, 1], [2, 0], [3, 0], [3, 2],
[4, 1]])
self.assertAllEqual(sp_tensors_3[0].values.eval(),
[0, 2, 11, 20, 30, 32, 41])
self.assertAllEqual(sp_tensors_3[0].dense_shape.eval(), [5, 3])
self.assertAllEqual(sp_tensors_3[1].indices.eval(),
[[0, 1], [1, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensors_3[1].values.eval(),
[4, 13, 14, 23, 33, 44])
self.assertAllEqual(sp_tensors_3[1].dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensors_3[2].indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensors_3[2].values.eval(), [5, 16, 25, 35, 46])
self.assertAllEqual(sp_tensors_3[2].dense_shape.eval(), [5, 2])
sp_tensors_4 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=4, axis=1)
self.assertAllEqual(len(sp_tensors_4), 4)
self.assertAllEqual(sp_tensors_4[0].indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensors_4[0].values.eval(), [0, 11, 20, 30, 41])
self.assertAllEqual(sp_tensors_4[0].dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensors_4[1].indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sp_tensors_4[1].values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sp_tensors_4[1].dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensors_4[2].indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1], [4, 0]])
self.assertAllEqual(sp_tensors_4[2].values.eval(), [4, 5, 14, 25, 35, 44])
self.assertAllEqual(sp_tensors_4[2].dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensors_4[3].indices.eval(), [[1, 0], [4, 0]])
self.assertAllEqual(sp_tensors_4[3].values.eval(), [16, 46])
self.assertAllEqual(sp_tensors_4[3].dense_shape.eval(), [5, 1])
@test_util.run_deprecated_v1
def testSplitMatrixUnevenRows(self):
with self.session(use_gpu=False):
sp_tensors_2 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=2, axis=0)
self.assertAllEqual(sp_tensors_2[0].indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6], [2, 0], [2, 3], [2, 5]])
self.assertAllEqual(sp_tensors_2[0].values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25])
self.assertAllEqual(sp_tensors_2[0].dense_shape.eval(), [3, 7])
self.assertAllEqual(sp_tensors_2[1].indices.eval(),
[[0, 0], [0, 2], [0, 3], [0, 5], [1, 1], [1, 4],
[1, 6]])
self.assertAllEqual(sp_tensors_2[1].values.eval(),
[30, 32, 33, 35, 41, 44, 46])
self.assertAllEqual(sp_tensors_2[1].dense_shape.eval(), [2, 7])
self.assertAllEqual(len(sp_tensors_2), 2)
sp_tensors_3 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=3, axis=0)
self.assertAllEqual(len(sp_tensors_3), 3)
self.assertAllEqual(sp_tensors_3[0].indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6]])
self.assertAllEqual(sp_tensors_3[0].values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16])
self.assertAllEqual(sp_tensors_3[0].dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensors_3[1].values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensors_3[1].dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensors_3[2].indices.eval(), [[0, 1], [0, 4],
[0, 6]])
self.assertAllEqual(sp_tensors_3[2].values.eval(), [41, 44, 46])
self.assertAllEqual(sp_tensors_3[2].dense_shape.eval(), [1, 7])
return
@test_util.run_deprecated_v1
def testSplitAllRows(self):
with self.session(use_gpu=False):
sp_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=4, axis=0)
self.assertAllEqual(len(sp_tensors), 4)
self.assertAllEqual(sp_tensors[0].indices.eval(), [[0, 0], [0, 2], [0, 4],
[0, 5]])
self.assertAllEqual(sp_tensors[0].values.eval(), [0, 2, 4, 5])
self.assertAllEqual(sp_tensors[0].dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensors[1].indices.eval(), [[0, 1], [0, 3], [0,
4]])
self.assertAllEqual(sp_tensors[1].values.eval(), [11, 13, 14])
self.assertAllEqual(sp_tensors[1].dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensors[2].indices.eval(), [[0, 0], [0, 3], [0,
5]])
self.assertAllEqual(sp_tensors[2].values.eval(), [20, 23, 25])
self.assertAllEqual(sp_tensors[2].dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensors[3].indices.eval(), [[0, 0], [0, 2], [0, 3],
[0, 5]])
self.assertAllEqual(sp_tensors[3].values.eval(), [30, 32, 33, 35])
self.assertAllEqual(sp_tensors[3].dense_shape.eval(), [1, 6])
@test_util.run_deprecated_v1
def testSplitColumns(self):
with self.session(use_gpu=False):
sparse_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=3, axis=1)
self.assertAllEqual(len(sparse_tensors), 3)
self.assertAllEqual(sparse_tensors[0].indices.eval(), [[0, 0], [1, 1],
[2, 0], [3, 0]])
self.assertAllEqual(sparse_tensors[0].values.eval(), [0, 11, 20, 30])
self.assertAllEqual(sparse_tensors[0].dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensors[1].indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sparse_tensors[1].values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sparse_tensors[1].dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensors[2].indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1]])
self.assertAllEqual(sparse_tensors[2].values.eval(), [4, 5, 14, 25, 35])
self.assertAllEqual(sparse_tensors[2].dense_shape.eval(), [4, 2])
@test_util.run_deprecated_v1
def testSplitAllColumns(self):
with self.session(use_gpu=False):
sparse_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=6, axis=1)
self.assertAllEqual(len(sparse_tensors), 6)
self.assertAllEqual(sparse_tensors[0].indices.eval(), [[0, 0], [2, 0],
[3, 0]])
self.assertAllEqual(sparse_tensors[0].values.eval(), [0, 20, 30])
self.assertAllEqual(sparse_tensors[0].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[1].indices.eval(), [[1, 0]])
self.assertAllEqual(sparse_tensors[1].values.eval(), [11])
self.assertAllEqual(sparse_tensors[1].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[2].indices.eval(), [[0, 0], [3, 0]])
self.assertAllEqual(sparse_tensors[2].values.eval(), [2, 32])
self.assertAllEqual(sparse_tensors[2].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[3].indices.eval(), [[1, 0], [2, 0],
[3, 0]])
self.assertAllEqual(sparse_tensors[3].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[3].values.eval(), [13, 23, 33])
self.assertAllEqual(sparse_tensors[4].indices.eval(), [[0, 0], [1, 0]])
self.assertAllEqual(sparse_tensors[4].values.eval(), [4, 14])
self.assertAllEqual(sparse_tensors[4].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[5].indices.eval(), [[0, 0], [2, 0],
[3, 0]])
self.assertAllEqual(sparse_tensors[5].values.eval(), [5, 25, 35])
self.assertAllEqual(sparse_tensors[5].dense_shape.eval(), [4, 1])
@test_util.run_deprecated_v1
def testSliceConcat(self):
for sp_input in (self._SparseTensorValue_3x4x2(),
self._SparseTensor_3x4x2()):
with self.cached_session(use_gpu=False):
sparse_tensors = sparse_ops.sparse_split(
sp_input=sp_input, num_split=2, axis=1)
concat_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
expected_output = self._SparseTensor_3x4x2()
self.assertAllEqual(concat_tensor.indices.eval(),
expected_output.indices.eval())
def testArgumentErrors(self):
with self.assertRaisesRegexp(ValueError, 'Keyword arguments are required'):
sparse_ops.sparse_split(3, 2, 1)
with self.assertRaisesRegexp(ValueError, 'sp_input is required'):
sparse_ops.sparse_split()
with self.assertRaisesRegexp(ValueError, 'num_split is required'):
sparse_ops.sparse_split(sp_input=1)
with self.assertRaisesRegexp(ValueError, 'axis is required'):
sparse_ops.sparse_split(num_split=2, sp_input=1)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_split_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Reader ops from io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gzip
import os
import shutil
import threading
import zlib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.util import compat
prefix_path = "tensorflow/core/lib"
# pylint: disable=invalid-name
TFRecordCompressionType = tf_record.TFRecordCompressionType
# pylint: enable=invalid-name
# Edgar Allan Poe's 'Eldorado'
_TEXT = b"""Gaily bedight,
A gallant knight,
In sunshine and in shadow,
Had journeyed long,
Singing a song,
In search of Eldorado.
But he grew old
This knight so bold
And o'er his heart a shadow
Fell as he found
No spot of ground
That looked like Eldorado.
And, as his strength
Failed him at length,
He met a pilgrim shadow
'Shadow,' said he,
'Where can it be
This land of Eldorado?'
'Over the Mountains
Of the Moon'
Down the Valley of the Shadow,
Ride, boldly ride,'
The shade replied,
'If you seek for Eldorado!'
"""
class TFCompressionTestCase(test.TestCase):
def setUp(self):
super(TFCompressionTestCase, self).setUp()
self._num_files = 2
self._num_records = 7
def _Record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _CreateFiles(self, options=None, prefix=""):
filenames = []
for i in range(self._num_files):
name = prefix + "tfrecord.%d.txt" % i
records = [self._Record(i, j) for j in range(self._num_records)]
fn = self._WriteRecordsToFile(records, name, options)
filenames.append(fn)
return filenames
def _WriteRecordsToFile(self, records, name="tfrecord", options=None):
fn = os.path.join(self.get_temp_dir(), name)
with tf_record.TFRecordWriter(fn, options=options) as writer:
for r in records:
writer.write(r)
return fn
def _ZlibCompressFile(self, infile, name="tfrecord.z"):
# zlib compress the file and write compressed contents to file.
with open(infile, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), name)
with open(zfn, "wb") as f:
f.write(cdata)
return zfn
def _GzipCompressFile(self, infile, name="tfrecord.gz"):
# gzip compress the file and write compressed contents to file.
with open(infile, "rb") as f:
cdata = f.read()
gzfn = os.path.join(self.get_temp_dir(), name)
with gzip.GzipFile(gzfn, "wb") as f:
f.write(cdata)
return gzfn
def _ZlibDecompressFile(self, infile, name="tfrecord"):
with open(infile, "rb") as f:
cdata = zlib.decompress(f.read())
fn = os.path.join(self.get_temp_dir(), name)
with open(fn, "wb") as f:
f.write(cdata)
return fn
def _GzipDecompressFile(self, infile, name="tfrecord"):
with gzip.GzipFile(infile, "rb") as f:
cdata = f.read()
fn = os.path.join(self.get_temp_dir(), name)
with open(fn, "wb") as f:
f.write(cdata)
return fn
class IdentityReaderTest(test.TestCase):
def _ExpectRead(self, key, value, expected):
k, v = self.evaluate([key, value])
self.assertAllEqual(expected, k)
self.assertAllEqual(expected, v)
@test_util.run_deprecated_v1
def testOneEpoch(self):
reader = io_ops.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
self.assertAllEqual(0, self.evaluate(work_completed))
self.assertAllEqual(0, self.evaluate(produced))
self.assertAllEqual(0, self.evaluate(queued_length))
self.evaluate(queue.enqueue_many([["A", "B", "C"]]))
self.evaluate(queue.close())
self.assertAllEqual(3, self.evaluate(queued_length))
self._ExpectRead(key, value, b"A")
self.assertAllEqual(1, self.evaluate(produced))
self._ExpectRead(key, value, b"B")
self._ExpectRead(key, value, b"C")
self.assertAllEqual(3, self.evaluate(produced))
self.assertAllEqual(0, self.evaluate(queued_length))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
self.evaluate([key, value])
self.assertAllEqual(3, self.evaluate(work_completed))
self.assertAllEqual(3, self.evaluate(produced))
self.assertAllEqual(0, self.evaluate(queued_length))
@test_util.run_deprecated_v1
def testMultipleEpochs(self):
reader = io_ops.IdentityReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
enqueue = queue.enqueue_many([["DD", "EE"]])
key, value = reader.read(queue)
self.evaluate(enqueue)
self._ExpectRead(key, value, b"DD")
self._ExpectRead(key, value, b"EE")
self.evaluate(enqueue)
self._ExpectRead(key, value, b"DD")
self._ExpectRead(key, value, b"EE")
self.evaluate(enqueue)
self._ExpectRead(key, value, b"DD")
self._ExpectRead(key, value, b"EE")
self.evaluate(queue.close())
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
self.evaluate([key, value])
@test_util.run_deprecated_v1
def testSerializeRestore(self):
reader = io_ops.IdentityReader("test_reader")
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
self.evaluate(queue.enqueue_many([["X", "Y", "Z"]]))
key, value = reader.read(queue)
self._ExpectRead(key, value, b"X")
self.assertAllEqual(1, self.evaluate(produced))
state = self.evaluate(reader.serialize_state())
self._ExpectRead(key, value, b"Y")
self._ExpectRead(key, value, b"Z")
self.assertAllEqual(3, self.evaluate(produced))
self.evaluate(queue.enqueue_many([["Y", "Z"]]))
self.evaluate(queue.close())
self.evaluate(reader.restore_state(state))
self.assertAllEqual(1, self.evaluate(produced))
self._ExpectRead(key, value, b"Y")
self._ExpectRead(key, value, b"Z")
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
self.evaluate([key, value])
self.assertAllEqual(3, self.evaluate(produced))
self.assertEqual(bytes, type(state))
with self.assertRaises(ValueError):
reader.restore_state([])
with self.assertRaises(ValueError):
reader.restore_state([state, state])
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
self.evaluate(reader.restore_state(state[1:]))
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
self.evaluate(reader.restore_state(state[:-1]))
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
self.evaluate(reader.restore_state(state + b"ExtraJunk"))
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
self.evaluate(reader.restore_state(b"PREFIX" + state))
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
self.evaluate(reader.restore_state(b"BOGUS" + state[5:]))
@test_util.run_deprecated_v1
def testReset(self):
reader = io_ops.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
self.evaluate(queue.enqueue_many([["X", "Y", "Z"]]))
self._ExpectRead(key, value, b"X")
self.assertLess(0, self.evaluate(queued_length))
self.assertAllEqual(1, self.evaluate(produced))
self._ExpectRead(key, value, b"Y")
self.assertLess(0, self.evaluate(work_completed))
self.assertAllEqual(2, self.evaluate(produced))
self.evaluate(reader.reset())
self.assertAllEqual(0, self.evaluate(work_completed))
self.assertAllEqual(0, self.evaluate(produced))
self.assertAllEqual(1, self.evaluate(queued_length))
self._ExpectRead(key, value, b"Z")
self.evaluate(queue.enqueue_many([["K", "L"]]))
self._ExpectRead(key, value, b"K")
class WholeFileReaderTest(test.TestCase):
def setUp(self):
super(WholeFileReaderTest, self).setUp()
self._filenames = [
os.path.join(self.get_temp_dir(), "whole_file.%d.txt" % i)
for i in range(3)
]
self._content = [b"One\na\nb\n", b"Two\nC\nD", b"Three x, y, z"]
for fn, c in zip(self._filenames, self._content):
with open(fn, "wb") as h:
h.write(c)
def tearDown(self):
for fn in self._filenames:
os.remove(fn)
super(WholeFileReaderTest, self).tearDown()
def _ExpectRead(self, key, value, index):
k, v = self.evaluate([key, value])
self.assertAllEqual(compat.as_bytes(self._filenames[index]), k)
self.assertAllEqual(self._content[index], v)
@test_util.run_deprecated_v1
def testOneEpoch(self):
reader = io_ops.WholeFileReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
self.evaluate(queue.enqueue_many([self._filenames]))
self.evaluate(queue.close())
key, value = reader.read(queue)
self._ExpectRead(key, value, 0)
self._ExpectRead(key, value, 1)
self._ExpectRead(key, value, 2)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
self.evaluate([key, value])
@test_util.run_deprecated_v1
def testInfiniteEpochs(self):
reader = io_ops.WholeFileReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
enqueue = queue.enqueue_many([self._filenames])
key, value = reader.read(queue)
self.evaluate(enqueue)
self._ExpectRead(key, value, 0)
self._ExpectRead(key, value, 1)
self.evaluate(enqueue)
self._ExpectRead(key, value, 2)
self._ExpectRead(key, value, 0)
self._ExpectRead(key, value, 1)
self.evaluate(enqueue)
self._ExpectRead(key, value, 2)
self._ExpectRead(key, value, 0)
class TextLineReaderTest(test.TestCase):
def setUp(self):
super(TextLineReaderTest, self).setUp()
self._num_files = 2
self._num_lines = 5
def _LineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _CreateFiles(self, crlf=False):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
for j in range(self._num_lines):
f.write(self._LineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it sometimes.
if j + 1 != self._num_lines or i == 0:
f.write(b"\r\n" if crlf else b"\n")
return filenames
def _testOneEpoch(self, files):
reader = io_ops.TextLineReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
self.evaluate(queue.enqueue_many([files]))
self.evaluate(queue.close())
for i in range(self._num_files):
for j in range(self._num_lines):
k, v = self.evaluate([key, value])
self.assertAllEqual("%s:%d" % (files[i], j + 1), compat.as_text(k))
self.assertAllEqual(self._LineText(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = self.evaluate([key, value])
@test_util.run_deprecated_v1
def testOneEpochLF(self):
self._testOneEpoch(self._CreateFiles(crlf=False))
@test_util.run_deprecated_v1
def testOneEpochCRLF(self):
self._testOneEpoch(self._CreateFiles(crlf=True))
@test_util.run_deprecated_v1
def testSkipHeaderLines(self):
files = self._CreateFiles()
reader = io_ops.TextLineReader(skip_header_lines=1, name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
self.evaluate(queue.enqueue_many([files]))
self.evaluate(queue.close())
for i in range(self._num_files):
for j in range(self._num_lines - 1):
k, v = self.evaluate([key, value])
self.assertAllEqual("%s:%d" % (files[i], j + 2), compat.as_text(k))
self.assertAllEqual(self._LineText(i, j + 1), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = self.evaluate([key, value])
class FixedLengthRecordReaderTest(TFCompressionTestCase):
def setUp(self):
super(FixedLengthRecordReaderTest, self).setUp()
self._num_files = 2
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
self._hop_bytes = 2
def _Record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _OverlappedRecord(self, f, r):
record_str = "".join([
str(i)[0]
for i in range(r * self._hop_bytes,
r * self._hop_bytes + self._record_bytes)
])
return compat.as_bytes(record_str)
# gap_bytes=hop_bytes-record_bytes
def _CreateFiles(self, num_records, gap_bytes):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_records > 0:
f.write(self._Record(i, 0))
for j in range(1, num_records):
if gap_bytes > 0:
f.write(b"G" * gap_bytes)
f.write(self._Record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
def _CreateOverlappedRecordFiles(self, num_overlapped_records):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(),
"fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_overlapped_records > 0:
all_records_str = "".join([
str(i)[0]
for i in range(self._record_bytes + self._hop_bytes *
(num_overlapped_records - 1))
])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
return filenames
# gap_bytes=hop_bytes-record_bytes
def _CreateGzipFiles(self, num_records, gap_bytes):
filenames = self._CreateFiles(num_records, gap_bytes)
for fn in filenames:
# compress inplace.
self._GzipCompressFile(fn, fn)
return filenames
# gap_bytes=hop_bytes-record_bytes
def _CreateZlibFiles(self, num_records, gap_bytes):
filenames = self._CreateFiles(num_records, gap_bytes)
for fn in filenames:
# compress inplace.
self._ZlibCompressFile(fn, fn)
return filenames
def _CreateGzipOverlappedRecordFiles(self, num_overlapped_records):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(),
"fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with gzip.GzipFile(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_overlapped_records > 0:
all_records_str = "".join([
str(i)[0]
for i in range(self._record_bytes + self._hop_bytes *
(num_overlapped_records - 1))
])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
return filenames
def _CreateZlibOverlappedRecordFiles(self, num_overlapped_records):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(),
"fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with open(fn + ".tmp", "wb") as f:
f.write(b"H" * self._header_bytes)
if num_overlapped_records > 0:
all_records_str = "".join([
str(i)[0]
for i in range(self._record_bytes + self._hop_bytes *
(num_overlapped_records - 1))
])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
self._ZlibCompressFile(fn + ".tmp", fn)
return filenames
# gap_bytes=hop_bytes-record_bytes
def _TestOneEpoch(self, files, num_records, gap_bytes, encoding=None):
hop_bytes = 0 if gap_bytes == 0 else self._record_bytes + gap_bytes
reader = io_ops.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
hop_bytes=hop_bytes,
encoding=encoding,
name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
self.evaluate(queue.enqueue_many([files]))
self.evaluate(queue.close())
for i in range(self._num_files):
for j in range(num_records):
k, v = self.evaluate([key, value])
self.assertAllEqual("%s:%d" % (files[i], j), compat.as_text(k))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = self.evaluate([key, value])
def _TestOneEpochWithHopBytes(self,
files,
num_overlapped_records,
encoding=None):
reader = io_ops.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
hop_bytes=self._hop_bytes,
encoding=encoding,
name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
self.evaluate(queue.enqueue_many([files]))
self.evaluate(queue.close())
for i in range(self._num_files):
for j in range(num_overlapped_records):
k, v = self.evaluate([key, value])
self.assertAllEqual("%s:%d" % (files[i], j), compat.as_text(k))
self.assertAllEqual(self._OverlappedRecord(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = self.evaluate([key, value])
@test_util.run_deprecated_v1
def testOneEpoch(self):
for num_records in [0, 7]:
# gap_bytes=0: hop_bytes=0
# gap_bytes=1: hop_bytes=record_bytes+1
for gap_bytes in [0, 1]:
files = self._CreateFiles(num_records, gap_bytes)
self._TestOneEpoch(files, num_records, gap_bytes)
@test_util.run_deprecated_v1
def testGzipOneEpoch(self):
for num_records in [0, 7]:
# gap_bytes=0: hop_bytes=0
# gap_bytes=1: hop_bytes=record_bytes+1
for gap_bytes in [0, 1]:
files = self._CreateGzipFiles(num_records, gap_bytes)
self._TestOneEpoch(files, num_records, gap_bytes, encoding="GZIP")
@test_util.run_deprecated_v1
def testZlibOneEpoch(self):
for num_records in [0, 7]:
# gap_bytes=0: hop_bytes=0
# gap_bytes=1: hop_bytes=record_bytes+1
for gap_bytes in [0, 1]:
files = self._CreateZlibFiles(num_records, gap_bytes)
self._TestOneEpoch(files, num_records, gap_bytes, encoding="ZLIB")
@test_util.run_deprecated_v1
def testOneEpochWithHopBytes(self):
for num_overlapped_records in [0, 2]:
files = self._CreateOverlappedRecordFiles(num_overlapped_records)
self._TestOneEpochWithHopBytes(files, num_overlapped_records)
@test_util.run_deprecated_v1
def testGzipOneEpochWithHopBytes(self):
for num_overlapped_records in [0, 2]:
files = self._CreateGzipOverlappedRecordFiles(num_overlapped_records,)
self._TestOneEpochWithHopBytes(
files, num_overlapped_records, encoding="GZIP")
@test_util.run_deprecated_v1
def testZlibOneEpochWithHopBytes(self):
for num_overlapped_records in [0, 2]:
files = self._CreateZlibOverlappedRecordFiles(num_overlapped_records)
self._TestOneEpochWithHopBytes(
files, num_overlapped_records, encoding="ZLIB")
class TFRecordReaderTest(TFCompressionTestCase):
def setUp(self):
super(TFRecordReaderTest, self).setUp()
@test_util.run_deprecated_v1
def testOneEpoch(self):
files = self._CreateFiles()
reader = io_ops.TFRecordReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
self.evaluate(queue.enqueue_many([files]))
self.evaluate(queue.close())
for i in range(self._num_files):
for j in range(self._num_records):
k, v = self.evaluate([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = self.evaluate([key, value])
@test_util.run_deprecated_v1
def testReadUpTo(self):
files = self._CreateFiles()
reader = io_ops.TFRecordReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
batch_size = 3
key, value = reader.read_up_to(queue, batch_size)
self.evaluate(queue.enqueue_many([files]))
self.evaluate(queue.close())
num_k = 0
num_v = 0
while True:
try:
k, v = self.evaluate([key, value])
# Test reading *up to* batch_size records
self.assertLessEqual(len(k), batch_size)
self.assertLessEqual(len(v), batch_size)
num_k += len(k)
num_v += len(v)
except errors_impl.OutOfRangeError:
break
# Test that we have read everything
self.assertEqual(self._num_files * self._num_records, num_k)
self.assertEqual(self._num_files * self._num_records, num_v)
@test_util.run_deprecated_v1
def testReadZlibFiles(self):
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
files = self._CreateFiles(options)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
self.evaluate(queue.enqueue_many([files]))
self.evaluate(queue.close())
for i in range(self._num_files):
for j in range(self._num_records):
k, v = self.evaluate([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
@test_util.run_deprecated_v1
def testReadGzipFiles(self):
options = tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)
files = self._CreateFiles(options)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
self.evaluate(queue.enqueue_many([files]))
self.evaluate(queue.close())
for i in range(self._num_files):
for j in range(self._num_records):
k, v = self.evaluate([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
class AsyncReaderTest(test.TestCase):
@test_util.run_deprecated_v1
def testNoDeadlockFromQueue(self):
"""Tests that reading does not block main execution threads."""
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
with self.session(config=config) as sess:
thread_data_t = collections.namedtuple("thread_data_t",
["thread", "queue", "output"])
thread_data = []
# Create different readers, each with its own queue.
for i in range(3):
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
reader = io_ops.TextLineReader()
_, line = reader.read(queue)
output = []
t = threading.Thread(
target=AsyncReaderTest._RunSessionAndSave,
args=(sess, [line], output))
thread_data.append(thread_data_t(t, queue, output))
# Start all readers. They are all blocked waiting for queue entries.
self.evaluate(variables.global_variables_initializer())
for d in thread_data:
d.thread.start()
# Unblock the readers.
for i, d in enumerate(reversed(thread_data)):
fname = os.path.join(self.get_temp_dir(), "deadlock.%s.txt" % i)
with open(fname, "wb") as f:
f.write(("file-%s" % i).encode())
self.evaluate(d.queue.enqueue_many([[fname]]))
d.thread.join()
self.assertEqual([[("file-%s" % i).encode()]], d.output)
@staticmethod
def _RunSessionAndSave(sess, args, output):
output.append(sess.run(args))
class LMDBReaderTest(test.TestCase):
def setUp(self):
super(LMDBReaderTest, self).setUp()
# Copy database out because we need the path to be writable to use locks.
path = os.path.join(prefix_path, "lmdb", "testdata", "data.mdb")
self.db_path = os.path.join(self.get_temp_dir(), "data.mdb")
shutil.copy(path, self.db_path)
@test_util.run_deprecated_v1
def testReadFromFile(self):
reader = io_ops.LMDBReader(name="test_read_from_file")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
self.evaluate(queue.enqueue([self.db_path]))
self.evaluate(queue.close())
for i in range(10):
k, v = self.evaluate([key, value])
self.assertAllEqual(compat.as_bytes(k), compat.as_bytes(str(i)))
self.assertAllEqual(
compat.as_bytes(v), compat.as_bytes(str(chr(ord("a") + i))))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = self.evaluate([key, value])
@test_util.run_deprecated_v1
def testReadFromSameFile(self):
with self.cached_session() as sess:
reader1 = io_ops.LMDBReader(name="test_read_from_same_file1")
reader2 = io_ops.LMDBReader(name="test_read_from_same_file2")
filename_queue = input_lib.string_input_producer(
[self.db_path], num_epochs=None)
key1, value1 = reader1.read(filename_queue)
key2, value2 = reader2.read(filename_queue)
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
for _ in range(3):
for _ in range(10):
k1, v1, k2, v2 = self.evaluate([key1, value1, key2, value2])
self.assertAllEqual(compat.as_bytes(k1), compat.as_bytes(k2))
self.assertAllEqual(compat.as_bytes(v1), compat.as_bytes(v2))
coord.request_stop()
coord.join(threads)
@test_util.run_deprecated_v1
def testReadFromFolder(self):
reader = io_ops.LMDBReader(name="test_read_from_folder")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
self.evaluate(queue.enqueue([self.db_path]))
self.evaluate(queue.close())
for i in range(10):
k, v = self.evaluate([key, value])
self.assertAllEqual(compat.as_bytes(k), compat.as_bytes(str(i)))
self.assertAllEqual(
compat.as_bytes(v), compat.as_bytes(str(chr(ord("a") + i))))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = self.evaluate([key, value])
@test_util.run_deprecated_v1
def testReadFromFileRepeatedly(self):
with self.cached_session() as sess:
reader = io_ops.LMDBReader(name="test_read_from_file_repeated")
filename_queue = input_lib.string_input_producer(
[self.db_path], num_epochs=None)
key, value = reader.read(filename_queue)
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
# Iterate over the lmdb 3 times.
for _ in range(3):
# Go over all 10 records each time.
for j in range(10):
k, v = self.evaluate([key, value])
self.assertAllEqual(compat.as_bytes(k), compat.as_bytes(str(j)))
self.assertAllEqual(
compat.as_bytes(v), compat.as_bytes(str(chr(ord("a") + j))))
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/reader_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv2DBackpropFilterGradTest(test.TestCase):
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
for padding in [
"SAME",
"VALID",
[(0, 0), (1, 2), (3, 4), (0, 0)],
[(0, 0), (0, 3), (4, 2), (0, 0)]
]:
for stride in [1, 2]:
np.random.seed(1)
in_shape = [5, 8, 6, 4]
in_val = constant_op.constant(
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 4, 6]
# Make a convolution op with the current settings, just to easily get
# the shape of the output.
conv_out = nn_ops.conv2d(
in_val,
array_ops.zeros(filter_shape),
strides=[1, stride, stride, 1],
padding=padding)
out_backprop_shape = conv_out.get_shape().as_list()
out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
dtype=dtypes.float32)
output = nn_ops.conv2d_backprop_filter(
in_val,
filter_shape,
out_backprop_val,
strides=[1, stride, stride, 1],
padding=padding)
err = gradient_checker.compute_gradient_error(
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
output, filter_shape)
print("conv2d_backprop_filter gradient err = %g " % err)
err_tolerance = 2e-3
self.assertLess(err, err_tolerance)
@test_util.run_deprecated_v1
def testGradientDilatedConv(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
for padding in [
"SAME",
"VALID",
[(0, 0), (3, 5), (2, 1), (0, 0)],
[(0, 0), (5, 2), (5, 1), (0, 0)]
]:
for stride in [1, 2]:
np.random.seed(1)
in_shape = [5, 8, 6, 4]
in_val = constant_op.constant(
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 4, 6]
# Make a convolution op with the current settings,
# just to easily get the shape of the output.
conv_out = nn_ops.conv2d(
in_val,
array_ops.zeros(filter_shape),
dilations=[1, 2, 2, 1],
strides=[1, stride, stride, 1],
padding=padding)
out_backprop_shape = conv_out.get_shape().as_list()
out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
dtype=dtypes.float32)
output = nn_ops.conv2d_backprop_filter(
in_val,
filter_shape,
out_backprop_val,
dilations=[1, 2, 2, 1],
strides=[1, stride, stride, 1],
padding=padding)
err = gradient_checker.compute_gradient_error(
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
output, filter_shape)
print("conv2d_backprop_filter gradient err = %g " % err)
err_tolerance = 1e-2
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RegexFullMatch op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
@parameterized.parameters(
(gen_string_ops.regex_full_match),
(gen_string_ops.static_regex_full_match))
class RegexFullMatchOpVariantsTest(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def testRegexFullMatch(self, op):
values = ["abaaba", "abcdabcde"]
with self.cached_session():
input_tensor = constant_op.constant(values, dtypes.string)
matched = op(input_tensor, "a.*a").eval()
self.assertAllEqual([True, False], matched)
@test_util.run_deprecated_v1
def testRegexFullMatchTwoDims(self, op):
values = [["abaaba", "abcdabcde"], ["acdcba", "ebcda"]]
with self.cached_session():
input_tensor = constant_op.constant(values, dtypes.string)
matched = op(input_tensor, "a.*a").eval()
self.assertAllEqual([[True, False], [True, False]], matched)
@test_util.run_deprecated_v1
def testEmptyMatch(self, op):
values = ["abc", "1"]
with self.cached_session():
input_tensor = constant_op.constant(values, dtypes.string)
matched = op(input_tensor, "").eval()
self.assertAllEqual([False, False], matched)
@test_util.run_deprecated_v1
def testInvalidPattern(self, op):
values = ["abc", "1"]
with self.cached_session():
input_tensor = constant_op.constant(values, dtypes.string)
invalid_pattern = "A["
matched = op(input_tensor, invalid_pattern)
with self.assertRaisesOpError("Invalid pattern"):
self.evaluate(matched)
class RegexFullMatchOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testRegexFullMatchDelegation(self):
with compat.forward_compatibility_horizon(2018, 11, 1):
with self.cached_session():
input_tensor = constant_op.constant("foo", dtypes.string)
pattern = "[a-z]"
op = string_ops.regex_full_match(input_tensor, pattern)
self.assertTrue(op.name.startswith("RegexFullMatch"), op.name)
pattern_tensor = constant_op.constant("[a-z]*", dtypes.string)
op_tensor = string_ops.regex_full_match(input_tensor, pattern_tensor)
self.assertTrue(op_tensor.name.startswith("RegexFullMatch"), op.name)
@test_util.run_deprecated_v1
def testStaticRegexFullMatchDelegation(self):
with compat.forward_compatibility_horizon(2018, 11, 20):
with self.cached_session():
input_tensor = constant_op.constant("foo", dtypes.string)
pattern = "[a-z]*"
op = string_ops.regex_full_match(input_tensor, pattern)
self.assertTrue(op.name.startswith("StaticRegexFullMatch"), op.name)
pattern_tensor = constant_op.constant("[a-z]*", dtypes.string)
op_vec = string_ops.regex_full_match(input_tensor, pattern_tensor)
self.assertTrue(op_vec.name.startswith("RegexFullMatch"), op.name)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/regex_full_match_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.functional_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
# pylint: disable=invalid-name
def simple_scoped_fn(a, x):
"""Simple function: (a, x) -> 2(x+a), but with "2" as a variable in scope."""
with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
two = variable_scope.get_variable(
"two", [],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(2))
return math_ops.multiply(math_ops.add(a, x), two)
@test_util.with_control_flow_v2
class MapFnTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testMap_Simple(self):
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, name="data")
r = map_fn.map_fn(
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), elems)
self.assertAllEqual(
np.array([(x + 3) * 2 for x in nums]), self.evaluate(r))
def testMapDtypeEager(self):
with context.eager_mode():
dtype = map_fn.map_fn(lambda x: constant_op.constant(""),
constant_op.constant([]),
dtype=dtypes.string).dtype
self.assertEqual(dtype, dtypes.string)
def testMapSparseTensor(self):
with self.cached_session():
with self.assertRaises(TypeError):
map_fn.map_fn(
lambda x: x,
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=constant_op.constant([0, 1, 2]),
dense_shape=[2, 2]))
@test_util.run_in_graph_and_eager_modes
def testMapOverScalarErrors(self):
with self.assertRaisesRegexp(ValueError, "not scalars"):
map_fn.map_fn(lambda x: x, [1, 2])
with self.assertRaisesRegexp(ValueError, "not a scalar"):
map_fn.map_fn(lambda x: x, 1)
@test_util.run_deprecated_v1
def testMap_Scoped(self):
with self.cached_session() as sess:
def double_scoped(x):
"""2x with a dummy 2 that is scoped."""
with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
two = variable_scope.get_variable(
"two", [],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(2))
return math_ops.multiply(x, two)
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
doubles = np.array([2 * x for x in [1, 2, 3, 4, 5, 6]])
r = map_fn.map_fn(double_scoped, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(doubles, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = map_fn.map_fn(double_scoped, elems)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(doubles, self.evaluate(r))
@test_util.run_deprecated_v1
def testMap_Grad(self):
with self.cached_session():
param = constant_op.constant(2.0)
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
y = map_fn.map_fn(
lambda x: math_ops.multiply(math_ops.square(x), param), elems)
r = gradients_impl.gradients(y, param)[0]
self.assertAllEqual(91.0, self.evaluate(r))
r = gradients_impl.gradients(y, elems)[0]
self.assertAllEqual([4.0, 8.0, 12.0, 16.0, 20.0, 24.0], self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testMap_SimpleNotTensor(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = map_fn.map_fn(
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), nums)
self.assertAllEqual(
np.array([(x + 3) * 2 for x in nums]), self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testMap_SingleInputMultiOutput(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = map_fn.map_fn(
lambda x: ((x + 3) * 2, -(x + 3) * 2),
nums,
dtype=(dtypes.int64, dtypes.int64))
self.assertEqual(2, len(r))
self.assertEqual((6,), r[0].get_shape())
self.assertEqual((6,), r[1].get_shape())
received = self.evaluate(r)
self.assertAllEqual((nums + 3) * 2, received[0])
self.assertAllEqual(-(nums + 3) * 2, received[1])
@test_util.run_in_graph_and_eager_modes
def testMap_MultiOutputMismatchedDtype(self):
nums = np.array([1, 2, 3, 4, 5, 6])
with self.assertRaisesRegexp(
TypeError, r"two structures don't have the same nested structure"):
# lambda emits tuple, but dtype is a list
map_fn.map_fn(
lambda x: ((x + 3) * 2, -(x + 3) * 2),
nums,
dtype=[dtypes.int64, dtypes.int64])
@test_util.run_in_graph_and_eager_modes
def testMap_MultiInputSingleOutput(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = map_fn.map_fn(
lambda x: x[0] * x[1][0] + x[1][1], (nums, (nums, -nums)),
dtype=dtypes.int64)
self.assertEqual((6,), r.get_shape())
received = self.evaluate(r)
self.assertAllEqual(nums * nums + (-nums), received)
@test_util.run_in_graph_and_eager_modes
def testMap_MultiInputSameStructureOutput(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = map_fn.map_fn(lambda x: (x[1][0], (x[1][1], x[0])),
(nums, (2 * nums, -nums)))
r = [r[0], r[1][0], r[1][1]]
self.assertEqual((6,), r[0].get_shape())
self.assertEqual((6,), r[1].get_shape())
self.assertEqual((6,), r[2].get_shape())
received = self.evaluate(r)
self.assertAllEqual(2 * nums, received[0])
self.assertAllEqual(-nums, received[1])
self.assertAllEqual(nums, received[2])
@test_util.run_in_graph_and_eager_modes
def testMapShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
y = map_fn.map_fn(lambda e: e, x)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
@test_util.run_deprecated_v1
def testMapUnknownShape(self):
x = array_ops.placeholder(dtypes.float32)
y = map_fn.map_fn(lambda e: e, x)
self.assertIs(None, y.get_shape().dims)
# TODO(b/124383826): this test fails in eager: the iterable is of length 0 so
# so the body of the while loop never executes
@test_util.run_v1_only("b/120545219")
def testMapEmptyScalar(self):
map_return = map_fn.map_fn(lambda x: 1,
constant_op.constant([], dtype=dtypes.int32))
self.assertAllEqual([0], map_return.get_shape().dims)
self.assertAllEqual([0], self.evaluate(map_return).shape)
# TODO(b/124383826): this test fails in eager: the iterable is of length 0 so
# so the body of the while loop never executes
@test_util.run_v1_only("b/120545219")
def testMapEmptyTensor(self):
with self.cached_session():
map_return = map_fn.map_fn(lambda x: array_ops.zeros([3, 2]),
constant_op.constant([]))
self.assertAllEqual([0, 3, 2], map_return.get_shape().dims)
self.assertAllEqual([0, 3, 2], self.evaluate(map_return).shape)
if __name__ == "__main__":
test.main()
# pylint: enable=invalid-name
|
tensorflow-master
|
tensorflow/python/kernel_tests/map_fn_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for barrier ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
class BarrierTest(test.TestCase):
def testConstructorWithShapes(self):
with ops.Graph().as_default():
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((1, 2, 3), (8,)),
shared_name="B",
name="B")
self.assertTrue(isinstance(b.barrier_ref, ops.Tensor))
self.assertProtoEquals("""
name:'B' op:'Barrier'
attr {
key: "capacity"
value {
i: -1
}
}
attr { key: 'component_types'
value { list { type: DT_FLOAT type: DT_FLOAT } } }
attr {
key: 'shapes'
value {
list {
shape {
dim { size: 1 } dim { size: 2 } dim { size: 3 }
}
shape {
dim { size: 8 }
}
}
}
}
attr { key: 'container' value { s: "" } }
attr { key: 'shared_name' value: { s: 'B' } }
""", b.barrier_ref.op.node_def)
@test_util.run_deprecated_v1
def testInsertMany(self):
with self.cached_session():
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
self.assertEqual([], size_t.get_shape())
keys = [b"a", b"b", b"c"]
insert_0_op = b.insert_many(0, keys, [10.0, 20.0, 30.0])
insert_1_op = b.insert_many(1, keys, [100.0, 200.0, 300.0])
self.assertEquals(size_t.eval(), [0])
insert_0_op.run()
self.assertEquals(size_t.eval(), [0])
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
def testInsertManyEmptyTensor(self):
with self.cached_session():
error_message = ("Empty tensors are not supported, but received shape "
r"\'\(0,\)\' at index 1")
with self.assertRaisesRegexp(ValueError, error_message):
data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((1,), (0,)), name="B")
@test_util.run_deprecated_v1
def testInsertManyEmptyTensorUnknown(self):
with self.cached_session():
b = data_flow_ops.Barrier((dtypes.float32, dtypes.float32), name="B")
size_t = b.ready_size()
self.assertEqual([], size_t.get_shape())
keys = [b"a", b"b", b"c"]
insert_0_op = b.insert_many(0, keys, np.array([[], [], []], np.float32))
self.assertEquals(size_t.eval(), [0])
with self.assertRaisesOpError(
".*Tensors with no elements are not supported.*"):
insert_0_op.run()
@test_util.run_deprecated_v1
def testTakeMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
take_t = b.take_many(3)
insert_0_op.run()
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
@test_util.run_deprecated_v1
def testTakeManySmallBatch(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
size_i = b.incomplete_size()
keys = [b"a", b"b", b"c", b"d"]
values_0 = [10.0, 20.0, 30.0, 40.0]
values_1 = [100.0, 200.0, 300.0, 400.0]
insert_0_op = b.insert_many(0, keys, values_0)
# Split adding of the second component into two independent operations.
# After insert_1_1_op, we'll have two ready elements in the barrier,
# 2 will still be incomplete.
insert_1_1_op = b.insert_many(1, keys[0:2], values_1[0:2]) # add "a", "b"
insert_1_2_op = b.insert_many(1, keys[2:3], values_1[2:3]) # add "c"
insert_1_3_op = b.insert_many(1, keys[3:], values_1[3:]) # add "d"
insert_empty_op = b.insert_many(0, [], [])
close_op = b.close()
close_op_final = b.close(cancel_pending_enqueues=True)
index_t, key_t, value_list_t = b.take_many(3, allow_small_batch=True)
insert_0_op.run()
insert_1_1_op.run()
close_op.run()
# Now we have a closed barrier with 2 ready elements. Running take_t
# should return a reduced batch with 2 elements only.
self.assertEquals(size_i.eval(), [2]) # assert that incomplete size = 2
self.assertEquals(size_t.eval(), [2]) # assert that ready size = 2
_, keys_val, values_0_val, values_1_val = sess.run(
[index_t, key_t, value_list_t[0], value_list_t[1]])
# Check that correct values have been returned.
for k, v0, v1 in zip(keys[0:2], values_0[0:2], values_1[0:2]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# The next insert completes the element with key "c". The next take_t
# should return a batch with just 1 element.
insert_1_2_op.run()
self.assertEquals(size_i.eval(), [1]) # assert that incomplete size = 1
self.assertEquals(size_t.eval(), [1]) # assert that ready size = 1
_, keys_val, values_0_val, values_1_val = sess.run(
[index_t, key_t, value_list_t[0], value_list_t[1]])
# Check that correct values have been returned.
for k, v0, v1 in zip(keys[2:3], values_0[2:3], values_1[2:3]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# Adding nothing ought to work, even if the barrier is closed.
insert_empty_op.run()
# currently keys "a" and "b" are not in the barrier, adding them
# again after it has been closed, ought to cause failure.
with self.assertRaisesOpError("is closed"):
insert_1_1_op.run()
close_op_final.run()
# These ops should fail because the barrier has now been closed with
# cancel_pending_enqueues = True.
with self.assertRaisesOpError("is closed"):
insert_empty_op.run()
with self.assertRaisesOpError("is closed"):
insert_1_3_op.run()
@test_util.run_deprecated_v1
def testUseBarrierWithShape(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((2, 2), (8,)), name="B")
size_t = b.ready_size()
keys = [b"a", b"b", b"c"]
values_0 = np.array(
[[[10.0] * 2] * 2, [[20.0] * 2] * 2, [[30.0] * 2] * 2], np.float32)
values_1 = np.array([[100.0] * 8, [200.0] * 8, [300.0] * 8], np.float32)
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
take_t = b.take_many(3)
insert_0_op.run()
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
self.assertShapeEqual(keys_val, take_t[1])
self.assertShapeEqual(values_0_val, take_t[2][0])
self.assertShapeEqual(values_1_val, take_t[2][1])
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertAllEqual(values_0_val[idx], v0)
self.assertAllEqual(values_1_val[idx], v1)
@test_util.run_deprecated_v1
def testParallelInsertMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
size_t = b.ready_size()
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_ops = [b.insert_many(0, [k], [v]) for k, v in zip(keys, values)]
take_t = b.take_many(10)
self.evaluate(insert_ops)
self.assertEquals(size_t.eval(), [10])
indices_val, keys_val, values_val = sess.run(
[take_t[0], take_t[1], take_t[2][0]])
self.assertAllEqual(indices_val, [-2**63 + x for x in range(10)])
for k, v in zip(keys, values):
idx = keys_val.tolist().index(k)
self.assertEqual(values_val[idx], v)
@test_util.run_deprecated_v1
def testParallelTakeMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
size_t = b.ready_size()
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_op = b.insert_many(0, keys, values)
take_t = [b.take_many(1) for _ in keys]
insert_op.run()
self.assertEquals(size_t.eval(), [10])
index_fetches = []
key_fetches = []
value_fetches = []
for ix_t, k_t, v_t in take_t:
index_fetches.append(ix_t)
key_fetches.append(k_t)
value_fetches.append(v_t[0])
vals = sess.run(index_fetches + key_fetches + value_fetches)
index_vals = vals[:len(keys)]
key_vals = vals[len(keys):2 * len(keys)]
value_vals = vals[2 * len(keys):]
taken_elems = []
for k, v in zip(key_vals, value_vals):
taken_elems.append((k[0], v[0]))
self.assertAllEqual(np.hstack(index_vals), [-2**63] * 10)
self.assertItemsEqual(
zip(keys, values), [(k[0], v[0]) for k, v in zip(key_vals, value_vals)])
@test_util.run_deprecated_v1
def testBlockingTakeMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_ops = [b.insert_many(0, [k], [v]) for k, v in zip(keys, values)]
take_t = b.take_many(10)
def take():
indices_val, keys_val, values_val = sess.run(
[take_t[0], take_t[1], take_t[2][0]])
self.assertAllEqual(indices_val,
[int(x.decode("ascii")) - 2**63 for x in keys_val])
self.assertItemsEqual(zip(keys, values), zip(keys_val, values_val))
t = self.checkedThread(target=take)
t.start()
time.sleep(0.1)
for insert_op in insert_ops:
insert_op.run()
t.join()
@test_util.run_deprecated_v1
def testParallelInsertManyTakeMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 100
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(0, keys_i(i), values_0 + i)
for i in range(num_iterations)
]
insert_1_ops = [
b.insert_many(1, keys_i(i), values_1 + i)
for i in range(num_iterations)
]
take_ops = [b.take_many(10) for _ in range(num_iterations)]
def take(sess, i, taken):
indices_val, keys_val, values_0_val, values_1_val = sess.run([
take_ops[i][0], take_ops[i][1], take_ops[i][2][0], take_ops[i][2][1]
])
taken.append({
"indices": indices_val,
"keys": keys_val,
"values_0": values_0_val,
"values_1": values_1_val
})
def insert(sess, i):
sess.run([insert_0_ops[i], insert_1_ops[i]])
taken = []
take_threads = [
self.checkedThread(
target=take, args=(sess, i, taken)) for i in range(num_iterations)
]
insert_threads = [
self.checkedThread(
target=insert, args=(sess, i)) for i in range(num_iterations)
]
for t in take_threads:
t.start()
time.sleep(0.1)
for t in insert_threads:
t.start()
for t in take_threads:
t.join()
for t in insert_threads:
t.join()
self.assertEquals(len(taken), num_iterations)
flatten = lambda l: [item for sublist in l for item in sublist]
all_indices = sorted(flatten([t_i["indices"] for t_i in taken]))
all_keys = sorted(flatten([t_i["keys"] for t_i in taken]))
expected_keys = sorted(
flatten([keys_i(i) for i in range(num_iterations)]))
expected_indices = sorted(
flatten([-2**63 + j] * 10 for j in range(num_iterations)))
self.assertAllEqual(all_indices, expected_indices)
self.assertAllEqual(all_keys, expected_keys)
for taken_i in taken:
outer_indices_from_keys = np.array(
[int(k.decode("ascii").split(":")[0]) for k in taken_i["keys"]])
inner_indices_from_keys = np.array(
[int(k.decode("ascii").split(":")[1]) for k in taken_i["keys"]])
self.assertAllEqual(taken_i["values_0"],
outer_indices_from_keys + inner_indices_from_keys)
expected_values_1 = np.vstack(
(1 + outer_indices_from_keys + inner_indices_from_keys,
2 + outer_indices_from_keys + inner_indices_from_keys)).T
self.assertAllEqual(taken_i["values_1"], expected_values_1)
@test_util.run_deprecated_v1
def testClose(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
incomplete_t = b.incomplete_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
close_op = b.close()
fail_insert_op = b.insert_many(0, ["f"], [60.0])
take_t = b.take_many(3)
take_too_many_t = b.take_many(4)
self.assertEquals(size_t.eval(), [0])
self.assertEquals(incomplete_t.eval(), [0])
insert_0_op.run()
self.assertEquals(size_t.eval(), [0])
self.assertEquals(incomplete_t.eval(), [3])
close_op.run()
# This op should fail because the barrier is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should succeed because the barrier has not canceled
# pending enqueues
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
self.assertEquals(incomplete_t.eval(), [0])
# This op should fail because the barrier is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should fail because we requested more elements than are
# available in incomplete + ready queue.
with self.assertRaisesOpError(r"is closed and has insufficient elements "
r"\(requested 4, total size 3\)"):
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
# This op should succeed because there are still completed elements
# to process.
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# This op should fail because there are no more completed elements and
# the queue is closed.
with self.assertRaisesOpError("is closed and has insufficient elements"):
sess.run(take_t[0])
@test_util.run_deprecated_v1
def testCancel(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
incomplete_t = b.incomplete_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys[0:2], values_1[0:2])
insert_2_op = b.insert_many(1, keys[2:], values_1[2:])
cancel_op = b.close(cancel_pending_enqueues=True)
fail_insert_op = b.insert_many(0, ["f"], [60.0])
take_t = b.take_many(2)
take_too_many_t = b.take_many(3)
self.assertEquals(size_t.eval(), [0])
insert_0_op.run()
insert_1_op.run()
self.assertEquals(size_t.eval(), [2])
self.assertEquals(incomplete_t.eval(), [1])
cancel_op.run()
# This op should fail because the queue is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should fail because the queue is canceled.
with self.assertRaisesOpError("is closed"):
insert_2_op.run()
# This op should fail because we requested more elements than are
# available in incomplete + ready queue.
with self.assertRaisesOpError(r"is closed and has insufficient elements "
r"\(requested 3, total size 2\)"):
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
# This op should succeed because there are still completed elements
# to process.
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 2)
for k, v0, v1 in zip(keys[0:2], values_0[0:2], values_1[0:2]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# This op should fail because there are no more completed elements and
# the queue is closed.
with self.assertRaisesOpError("is closed and has insufficient elements"):
sess.run(take_t[0])
def _testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(self, cancel):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
take_t = b.take_many(1, allow_small_batch=True)
self.evaluate(b.close(cancel))
with self.assertRaisesOpError("is closed and has insufficient elements"):
self.evaluate(take_t)
@test_util.run_deprecated_v1
def testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(self):
self._testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(cancel=False)
self._testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(cancel=True)
def _testParallelInsertManyTakeManyCloseHalfwayThrough(self, cancel):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 50
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(0, keys_i(i), values_0 + i)
for i in range(num_iterations)
]
insert_1_ops = [
b.insert_many(1, keys_i(i), values_1 + i)
for i in range(num_iterations)
]
take_ops = [b.take_many(10) for _ in range(num_iterations)]
close_op = b.close(cancel_pending_enqueues=cancel)
def take(sess, i, taken):
try:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run([
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
take_ops[i][2][1]
])
taken.append(len(indices_val))
except errors_impl.OutOfRangeError:
taken.append(0)
def insert(sess, i):
try:
sess.run([insert_0_ops[i], insert_1_ops[i]])
except errors_impl.CancelledError:
pass
taken = []
take_threads = [
self.checkedThread(
target=take, args=(sess, i, taken)) for i in range(num_iterations)
]
insert_threads = [
self.checkedThread(
target=insert, args=(sess, i)) for i in range(num_iterations)
]
first_half_insert_threads = insert_threads[:num_iterations // 2]
second_half_insert_threads = insert_threads[num_iterations // 2:]
for t in take_threads:
t.start()
for t in first_half_insert_threads:
t.start()
for t in first_half_insert_threads:
t.join()
close_op.run()
for t in second_half_insert_threads:
t.start()
for t in take_threads:
t.join()
for t in second_half_insert_threads:
t.join()
self.assertEqual(
sorted(taken),
[0] * (num_iterations // 2) + [10] * (num_iterations // 2))
@test_util.run_deprecated_v1
def testParallelInsertManyTakeManyCloseHalfwayThrough(self):
self._testParallelInsertManyTakeManyCloseHalfwayThrough(cancel=False)
@test_util.run_deprecated_v1
def testParallelInsertManyTakeManyCancelHalfwayThrough(self):
self._testParallelInsertManyTakeManyCloseHalfwayThrough(cancel=True)
def _testParallelPartialInsertManyTakeManyCloseHalfwayThrough(self, cancel):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 100
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(
0, keys_i(i), values_0 + i, name="insert_0_%d" % i)
for i in range(num_iterations)
]
close_op = b.close(cancel_pending_enqueues=cancel)
take_ops = [
b.take_many(
10, name="take_%d" % i) for i in range(num_iterations)
]
# insert_1_ops will only run after closure
insert_1_ops = [
b.insert_many(
1, keys_i(i), values_1 + i, name="insert_1_%d" % i)
for i in range(num_iterations)
]
def take(sess, i, taken):
if cancel:
try:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run(
[
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
take_ops[i][2][1]
])
taken.append(len(indices_val))
except errors_impl.OutOfRangeError:
taken.append(0)
else:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run([
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
take_ops[i][2][1]
])
taken.append(len(indices_val))
def insert_0(sess, i):
insert_0_ops[i].run(session=sess)
def insert_1(sess, i):
if cancel:
try:
insert_1_ops[i].run(session=sess)
except errors_impl.CancelledError:
pass
else:
insert_1_ops[i].run(session=sess)
taken = []
take_threads = [
self.checkedThread(
target=take, args=(sess, i, taken)) for i in range(num_iterations)
]
insert_0_threads = [
self.checkedThread(
target=insert_0, args=(sess, i)) for i in range(num_iterations)
]
insert_1_threads = [
self.checkedThread(
target=insert_1, args=(sess, i)) for i in range(num_iterations)
]
for t in insert_0_threads:
t.start()
for t in insert_0_threads:
t.join()
for t in take_threads:
t.start()
close_op.run()
for t in insert_1_threads:
t.start()
for t in take_threads:
t.join()
for t in insert_1_threads:
t.join()
if cancel:
self.assertEqual(taken, [0] * num_iterations)
else:
self.assertEqual(taken, [10] * num_iterations)
@test_util.run_deprecated_v1
def testParallelPartialInsertManyTakeManyCloseHalfwayThrough(self):
self._testParallelPartialInsertManyTakeManyCloseHalfwayThrough(cancel=False)
@test_util.run_deprecated_v1
def testParallelPartialInsertManyTakeManyCancelHalfwayThrough(self):
self._testParallelPartialInsertManyTakeManyCloseHalfwayThrough(cancel=True)
@test_util.run_deprecated_v1
def testIncompatibleSharedBarrierErrors(self):
with self.cached_session():
# Do component types and shapes.
b_a_1 = data_flow_ops.Barrier(
(dtypes.float32,), shapes=(()), shared_name="b_a")
b_a_2 = data_flow_ops.Barrier(
(dtypes.int32,), shapes=(()), shared_name="b_a")
b_a_1.barrier_ref.eval()
with self.assertRaisesOpError("component types"):
b_a_2.barrier_ref.eval()
b_b_1 = data_flow_ops.Barrier(
(dtypes.float32,), shapes=(()), shared_name="b_b")
b_b_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int32), shapes=((), ()), shared_name="b_b")
b_b_1.barrier_ref.eval()
with self.assertRaisesOpError("component types"):
b_b_2.barrier_ref.eval()
b_c_1 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_c")
b_c_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shared_name="b_c")
b_c_1.barrier_ref.eval()
with self.assertRaisesOpError("component shapes"):
b_c_2.barrier_ref.eval()
b_d_1 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), shared_name="b_d")
b_d_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_d")
b_d_1.barrier_ref.eval()
with self.assertRaisesOpError("component shapes"):
b_d_2.barrier_ref.eval()
b_e_1 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_e")
b_e_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 5), (8,)),
shared_name="b_e")
b_e_1.barrier_ref.eval()
with self.assertRaisesOpError("component shapes"):
b_e_2.barrier_ref.eval()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/barrier_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OiR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-long-lambda
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import sys
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import while_v2 # pylint: disable=unused-import
# pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
import tensorflow.python.ops.tensor_array_grad
# pylint: enable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
for k, v in consumer_count.items():
if len(k.consumers()) != v:
return False
return True
def all_fetchables():
tensor_names = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.outputs:
if graph.is_fetchable(t):
tensor_names.append(t.name)
return tensor_names
def all_feedables():
feedable_tensors = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_feedable(t):
feedable_tensors.append(t)
return feedable_tensors
def opt_cfg():
return config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True,
do_constant_folding=True)))
def isum(s, maximum_iterations=None):
i = constant_op.constant(0, name="i")
c = lambda i, s: math_ops.less(i, 10)
b = lambda i, s: [math_ops.add(i, 1), math_ops.add(i, s)]
_, r_s = control_flow_ops.while_loop(
c, b, [i, s], maximum_iterations=maximum_iterations)
return r_s
def enqueue_print_op(s):
"""Enqueues an op that prints a message to be captured in the test."""
return logging_ops.print_v2("ControlFlowOpsTest: " + s)
def filter_test_messages(s):
"""Returns a list of messages printed by enqueue_print_op."""
prefix = "ControlFlowOpsTest: "
return [l[len(prefix):] for l in s.split("\n") if l.startswith(prefix)]
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase, parameterized.TestCase):
@test_util.run_v1_only("b/120545219")
def testRefIdentity(self):
with self.cached_session():
v = variables.VariableV1(7)
v = control_flow_ops._Identity(v)
op = state_ops.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(isinstance(v2, ops.Tensor))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v2))
@test_util.run_v1_only("b/120545219")
def testRefEnter(self):
with self.cached_session():
v = variables.VariableV1(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = constant_op.constant(9)
enter_nine = gen_control_flow_ops.enter(nine, "foo_1")
op = state_ops.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v3))
@test_util.run_v1_only("b/120545219")
def testRefSwitch(self):
with self.cached_session():
v = variables.VariableV1(7)
p = constant_op.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access
v2 = state_ops.assign(v1[1], 9)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v2))
def testEnterMulExit(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = gen_control_flow_ops.enter(data, "foo_1", False)
five = constant_op.constant(5)
enter_five = gen_control_flow_ops.enter(five, "foo_1", False)
mul_op = math_ops.multiply(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = self.evaluate(exit_op)
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_deprecated_v1
def testEnterShapePropagation(self):
with self.cached_session():
v = variables.Variable([0.0, 0.0], dtype=dtypes.float32)
# If is_constant=True, the shape information should be propagated.
enter_v_constant = gen_control_flow_ops.enter(
v, "frame1", is_constant=True)
self.assertEqual(enter_v_constant.shape, [2])
# Otherwise, the shape should be unknown.
enter_v_non_constant = gen_control_flow_ops.enter(
v, "frame2", is_constant=False)
self.assertEqual(enter_v_non_constant.shape, None)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([1, 2, 3, 4, 5, 6])
indices = constant_op.constant([0, 2, 4, 6, 8, 10])
data = ops.IndexedSlices(values, indices)
pred = ops.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values
ind = merge_op.indices
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
@test_util.run_v1_only("b/120545219")
def testSwitchDeadBranch(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = array_ops.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Retval[0] does not have value" in str(e)):
self.evaluate(dead_branch)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeLess(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
zero = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
less_op = math_ops.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.arange(1, 7), result)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeAddIdentity(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
id_op = array_ops.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeAddMul(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
five = constant_op.constant(5)
mul_op = math_ops.multiply(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_v1_only("b/120545219")
def testLoop_false(self):
with self.cached_session():
false = ops.convert_to_tensor(False)
n = constant_op.constant(10)
enter_false = gen_control_flow_ops.enter(false, "foo_1", False)
enter_n = gen_control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = self.evaluate(exit_n)
self.assertAllEqual(10, result)
@test_util.run_deprecated_v1
def testLoop_1(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
with ops.device(test.gpu_device_name()):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = self.evaluate(exit_i)
self.assertAllEqual(10, result)
@test_util.run_v1_only("b/120545219")
def testLoop_2(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
with ops.device(test.gpu_device_name()):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = self.evaluate(exit_i)
self.assertAllEqual(10, result)
@test_util.run_v1_only("b/120545219")
def testDifferentFrame(self):
with self.cached_session():
data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = gen_control_flow_ops.enter(data, "foo_1", False)
enter_2 = gen_control_flow_ops.enter(data, "foo_2", False)
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError("has inputs from different frames"):
res.eval(feed_dict={data: 1.0})
@test_util.run_deprecated_v1
def testCondBool(self):
values = constant_op.constant(10)
fn1 = lambda: math_ops.add(values, 1)
fn2 = lambda: math_ops.subtract(values, 1)
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
_ = control_flow_ops.cond(False, fn1, fn2)
@test_util.run_deprecated_v1
def testCondInt(self):
p = array_ops.placeholder(dtypes.bool, shape=[])
v = constant_op.constant(10)
fn1 = lambda: math_ops.add(v, 1)
fn2 = lambda: math_ops.subtract(v, 1)
y = control_flow_ops.cond(p, fn1, fn2)
grad = gradients_impl.gradients(y, [v])
self.assertAllEqual([None], grad)
def testCondOutputShape(self):
x = constant_op.constant(1.0)
b = control_flow_ops.cond(
constant_op.constant(True), lambda: math_ops.square(x),
lambda: math_ops.subtract(x, 1.))
self.assertEqual(b.shape, tensor_shape.scalar())
@test_util.run_v1_only("b/120545219")
def testFetchable(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
control_flow_ops.cond(
constant_op.constant(True), lambda: x + 2, lambda: x + 0)
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_fetchable(t.op):
sess.run(t, feed_dict={x: 3})
else:
with self.assertRaisesRegexp(ValueError,
"has been marked as not fetchable"):
sess.run(t, feed_dict={x: 3})
@test_util.disable_control_flow_v2("Not relevant")
@test_util.run_v1_only("b/120545219")
def testFeedable(self):
with self.cached_session() as sess:
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
self.assertEqual(1000, r.eval(feed_dict={i0: 0}))
feedable_tensors = all_feedables()
for t in feedable_tensors:
sess.run(r, feed_dict={t: 3})
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if t not in feedable_tensors and t.dtype is dtypes.int32:
with self.assertRaisesRegexp(ValueError, "may not be fed"):
sess.run(r, feed_dict={t: 3})
@test_util.run_v1_only("b/120545219")
def testCondIndexedSlices(self):
with self.cached_session():
values = constant_op.constant(10)
indices = constant_op.constant(0)
x = ops.IndexedSlices(values, indices)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), indices)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values
ind = r.indices
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
def testCondMismatchedIndexedSlices(self):
@def_function.function
def foo():
values = constant_op.constant(10)
indices = constant_op.constant(0)
x = ops.IndexedSlices(values, indices)
with self.assertRaisesRegexp(
TypeError, "Cannot reconcile tf.cond 0-th outputs"):
control_flow_ops.cond(
constant_op.constant(True),
lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices),
lambda: math_ops.add(x.values, 1), indices)
foo()
def testCondSparseTensor(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
pred = math_ops.less(1, 2)
fn1 = lambda: sparse_tensor.SparseTensor(
indices + 1, x.values + 1, dense_shape=shape)
fn2 = lambda: sparse_tensor.SparseTensor(
indices, x.values - 1, dense_shape=shape)
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3.0, 5.0], r.values)
self.assertAllEqual([[1], [4]], r.indices)
self.assertAllEqual(r.values.get_shape(), (2,))
def testCondRaggedTensor(self):
rt = ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]])
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.concat([rt + 2, [[100]]], axis=0)
fn2 = lambda: rt[:2] - 2
result = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3, 4, 5, 6, 7, 8, 100], result.values)
self.assertAllEqual([0, 2, 3, 6, 7], result.row_splits)
@test_util.run_v1_only("b/120545219")
def testCondResource(self):
with self.cached_session():
rv = resource_variable_ops.ResourceVariable(True)
self.evaluate(variables.global_variables_initializer())
t = ops.convert_to_tensor(1.0)
def case():
assign = resource_variable_ops.assign_variable_op(rv.handle, False)
with ops.control_dependencies([assign]):
return array_ops.identity(t)
self.assertEqual(
1.0, self.evaluate(control_flow_ops.cond(rv, case, lambda: t)))
@test_util.run_v1_only("b/120545219")
def testCondWithTensorArrayGrad(self):
with self.cached_session() as sess:
with ops.device(test.gpu_device_name()):
pred = array_ops.placeholder(dtypes.bool, [])
x = constant_op.constant([1.0, 2.0, 3.0])
y = control_flow_ops.cond(
pred, lambda: map_fn.map_fn(lambda z: z * 2.0, x),
lambda: constant_op.constant([1.0, 1.0, 1.0]))
g = gradients_impl.gradients(y, x)[0]
self.assertAllEqual(sess.run(g, {pred: True}), [2.0, 2.0, 2.0])
self.assertAllEqual(sess.run(g, {pred: False}), [0.0, 0.0, 0.0])
@test_util.run_v1_only("b/120545219")
def testCondIndexedSlicesDifferentTypes(self):
with self.cached_session():
values = constant_op.constant(10)
i_32 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int32)
i_64 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int64)
x = ops.IndexedSlices(values, i_32)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), i_64)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values
ind = r.indices
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64)
@test_util.run_v1_only("b/120545219")
def testCondColocation(self):
with self.session(use_gpu=True):
with ops.device("/cpu:0"):
v = variables.Variable(7.0)
x = constant_op.constant(10.0)
pred = math_ops.less(1.0, 2.0)
fn1 = lambda: math_ops.add(v, 1.0)
fn2 = lambda: math_ops.subtract(x, 1.0)
r = control_flow_ops.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
r = control_flow_ops.cond(pred, fn1, fn2)
result = self.evaluate(r)
self.assertAllEqual(11, result)
def testCond_1(self):
self._testCond_1(use_gpu=False)
# TODO(b/116526896): Enable GPU tests.
# self._testCond_1(use_gpu=True)
def testCond_2(self):
with self.cached_session():
x = constant_op.constant(10)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
result = self.evaluate(r)
self.assertAllEqual(9, result)
def testCond_3(self):
with self.cached_session():
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = self.evaluate(r)
self.assertAllEqual(12, result)
@test_util.disable_xla("b/128638446")
@test_util.run_in_graph_and_eager_modes
def testCondPruning(self):
v1 = variables.Variable(7)
v2 = variables.Variable(7)
v3 = variables.Variable(7)
def f():
age = constant_op.constant(3)
max_age = constant_op.constant(2)
pred = math_ops.greater(age, max_age)
fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op]
fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertEqual(len(r), 2)
return r[1]
f_defun = eager_function.defun(f)
if not context.executing_eagerly():
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
result = self.evaluate(f())
self.assertEqual(True, result)
# Only second cond result was fetched, so v1 assign shouldn't run.
self.assertEqual(7, self.evaluate(v1))
self.assertEqual(2, self.evaluate(v2))
self.assertEqual(7, self.evaluate(v3))
result = f_defun()
self.assertEqual(True, self.evaluate(result))
# Both v1 and v2 branch assignments should be run in defun.
self.assertEqual(1, self.evaluate(v1))
self.assertEqual(2, self.evaluate(v2))
self.assertEqual(7, self.evaluate(v3))
def testCond_5(self):
with self.cached_session():
alive = constant_op.constant(True, name="alive")
count = constant_op.constant(0, name="count")
def body(i):
return control_flow_ops.cond(
alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)],
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, self.evaluate(count))
@test_util.run_v1_only("b/120545219")
def testCond_6(self):
with self.cached_session():
v1 = variables.Variable([7])
age = constant_op.constant(3)
pred = math_ops.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(variables.global_variables_initializer())
result = self.evaluate(r)
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], self.evaluate(r))
@parameterized.parameters(dtypes.float32, dtypes.float64)
@test_util.run_v1_only("Uses tf.gradients")
def testCondResourceGrad(self, dtype):
init = constant_op.constant([7.], dtype=dtype)
v1 = variables.Variable(init)
age = constant_op.constant(3., dtype=dtype)
pred = math_ops.greater(age, 4.)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, v1)[0]
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(grad, [1.])
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCond_Device(self):
x = constant_op.constant(-10.)
# True branch function defined outside of device scope
def true_fn():
return math_ops.exp(x)
with ops.device("CPU:0"):
r = control_flow_ops.cond(
constant_op.constant(True), true_fn, lambda: 0.)
self.assertIn("cpu", r.device.lower())
with session.Session() as sess:
options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(r, options=options, run_metadata=run_metadata)
# We expect that everything runs on CPU, even if GPU is available.
self.assertEqual(len(run_metadata.partition_graphs), 1)
def _count_matching_switch_nodes_on_device(self, run_metadata, device_str):
# Returns the number of Switch nodes with type float32 placed on
# `device_str`.
device_graphs = [
g for g in run_metadata.partition_graphs
if device_str in g.node[0].device
]
self.assertLen(device_graphs, 1)
switch_nodes = [
n for n in device_graphs[0].node if n.op == "Switch" and
n.attr["T"].type == dtypes.float32.as_datatype_enum
]
return len(switch_nodes)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCondSwitchColocatedWithInputWhenInputOnCPU(self):
x = array_ops.placeholder(dtypes.float32)
# `arg` is used in the cond then branch so a Switch node is created for it.
# We test that the Switch node gets placed on the same device as `arg`.
# We force `arg` to be on CPU here.
with ops.device("CPU:0"):
arg = x + 10.
def true_fn():
with ops.device("CPU:0"):
return arg + 1
r = control_flow_ops.cond(constant_op.constant(True), true_fn, lambda: 0.)
with session.Session() as sess:
run_metadata = config_pb2.RunMetadata()
options = config_pb2.RunOptions(output_partition_graphs=True)
sess.run(
r, feed_dict={x: -10.}, options=options, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 2)
# Check that the Switch for `arg` gets placed on CPU.
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "CPU"), 1)
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "GPU"), 0)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCondSwitchColocatedWithInputWhenInputOnGPU(self):
x = array_ops.placeholder(dtypes.float32)
# `arg` is used in the cond then branch so a Switch node is created for it.
# We test that the Switch node gets placed on the same device as `arg`.
# Note: `arg` gets placed on GPU by default by the placer.
arg = x + 10.
def true_fn():
with ops.device("CPU:0"):
return arg + 1
r = control_flow_ops.cond(constant_op.constant(True), true_fn, lambda: 0.)
with session.Session() as sess:
run_metadata = config_pb2.RunMetadata()
options = config_pb2.RunOptions(output_partition_graphs=True)
sess.run(
r, feed_dict={x: -10.}, options=options, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 2)
# Check that the Switch for `arg` gets placed on GPU.
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "CPU"), 0)
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "GPU"), 1)
def testCondAccessTrueBranchTensorInFalseBranchRaises(self):
@def_function.function
def f():
c = constant_op.constant(1.)
inputs = {"c": c}
def true_fn(inputs):
inputs["c"] = array_ops.identity(inputs["c"], name="true_branch")
return inputs["c"]
def false_fn(inputs):
return array_ops.identity(inputs["c"])
pred = constant_op.constant(True)
return control_flow_ops.cond(
pred, lambda: true_fn(inputs), lambda: false_fn(inputs))
with self.assertRaisesRegexp(
ValueError,
"Tensor true_branch:0 in true_fn is accessed from false_fn."):
f()
def testSwitchCaseAccessBranch1TensorInBranch4Raises(self):
@def_function.function
def f():
c = constant_op.constant(1.)
inputs = {"c": c}
def br1_fn(inputs):
inputs["c"] = array_ops.identity(inputs["c"], name="br1_identity")
return inputs["c"]
def br4_fn(inputs):
return array_ops.identity(inputs["c"])
def other_fn():
return array_ops.identity(c)
return control_flow_ops.switch_case(
constant_op.constant(2),
[other_fn, lambda: br1_fn(inputs), other_fn, other_fn,
lambda: br4_fn(inputs)])
with self.assertRaisesRegexp(
ValueError,
"Tensor br1_identity:0 in branch 1 is accessed from branch 4."):
f()
def testCondListOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, y), math_ops.add(x, y)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertListEqual([210, 210], test_result)
def testTupleOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: (math_ops.add(x, y), math_ops.add(x, y))
fn2 = lambda: (y, y)
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertTupleEqual((210, 210), test_result)
def testDictOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": math_ops.add(x, y), "b": math_ops.add(x, y)}
fn2 = lambda: {"a": y, "b": y}
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertDictEqual({"a": 210, "b": 210}, test_result)
def testEmbeddedListOutput(self):
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [[math_ops.add(x, y), math_ops.add(x, y)]]
fn2 = lambda: [[y, y]]
# Pass strict=True flag as cond_v2 allows for tensors to be
# in nested output structures as singletons
r = control_flow_ops.cond(pred, fn1, fn2, strict=True)
test_result = self.evaluate(r)
self.assertListEqual([[210, 210]], test_result)
def testEmbeddedTupleOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: ((math_ops.add(x, y), math_ops.add(x, y)))
fn2 = lambda: ((y, y))
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertTupleEqual(((210, 210)), test_result)
def testEmbeddedDictOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": {"c": math_ops.add(x, y)},
"b": {"d": math_ops.add(x, y)}}
fn2 = lambda: {"a": {"c": y},
"b": {"d": y}}
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertDictEqual({"a": {"c": 210}, "b": {"d": 210}}, test_result)
@test_util.run_v1_only("b/120545219")
def testCheckNestedOutputStruct(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": math_ops.add(x, y), "b": math_ops.add(x, y)}
fn2 = lambda: {"c": y, "d": y}
v1_msg = "The two structures don't have the same nested structure"
v2_msg = ("true_fn and false_fn arguments to tf.cond must have the same "
"number, type, and overall structure of return values.")
with self.assertRaisesRegexp(
TypeError if control_flow_util.ENABLE_CONTROL_FLOW_V2 else ValueError,
v2_msg if control_flow_util.ENABLE_CONTROL_FLOW_V2 else v1_msg):
control_flow_ops.cond(pred, fn1, fn2)
@test_util.run_deprecated_v1
def testCondRef(self):
with self.cached_session():
x = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="x",
container="",
shared_name="")
true_fn = lambda: x
false_fn = lambda: constant_op.constant([2.0])
r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], self.evaluate(r))
@test_util.disable_control_flow_v2("b/79881896 (placeholder)")
@test_util.run_v1_only("b/120545219")
def testCondWithControl(self):
with self.cached_session():
control_holder = array_ops.placeholder(dtypes.float32, shape=())
a = constant_op.constant(3)
def true_branch():
with ops.control_dependencies([control_holder]):
_ = a + 1
return a + 2
r = control_flow_ops.cond(
constant_op.constant(True), true_branch,
lambda: constant_op.constant(1))
self.assertEqual(5, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testUninitializedRefIdentity(self):
with self.cached_session() as sess:
v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="v",
container="",
shared_name="")
inited = state_ops.is_variable_initialized(v)
v_f, v_t = control_flow_ops.ref_switch(v, inited)
# Both v_f and v_t are uninitialized references. However, an actual use
# of the reference in the 'true' branch in the 'tf.identity' op will
# not 'fire' when v is uninitialized, so this is a valid construction.
# This test tests that ref_identity allows uninitialized ref as input
# so that this construction is allowed.
v_f_op = gen_array_ops.ref_identity(v_f)
v_t_op = gen_array_ops.ref_identity(v_t)
with ops.control_dependencies([v_f_op]):
assign_v = state_ops.assign(v, [1.0])
with ops.control_dependencies([v_t_op]):
orig_v = array_ops.identity(v)
merged_op = control_flow_ops.merge([assign_v, orig_v])
self.assertAllEqual([1.0], self.evaluate(merged_op.output))
def testCondSwitchIdentity(self):
# Make sure the recv identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(r)
def testCondRecvIdentity(self):
# Make sure the switch identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
with ops.device(test.gpu_device_name()):
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
with ops.device("/cpu:0"):
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(r)
@test_util.run_v1_only("b/120545219")
def testCondGrad_1(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.identity(x)
fn2 = lambda: array_ops.identity(x)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(1.0, self.evaluate(grad))
@test_util.run_deprecated_v1
def testCondGrad_2(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
x = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
fn1 = lambda: math_ops.multiply(x, 42.0)
fn2 = lambda: math_ops.multiply(x, 3.0)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
@test_util.disable_control_flow_v2(
"b/110550782 (gradient w.r.t external variable)")
@test_util.run_deprecated_v1
def testCondGrad_3(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
ox = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
def fn1(x):
m = x * x
return gradients_impl.gradients(m, [ox])[0]
fn2 = lambda: math_ops.multiply(ox, 3.0)
y = math_ops.multiply(7.0, ox)
r = control_flow_ops.cond(pred, lambda: fn1(y), fn2)
self.assertAllEqual(980.0, r.eval(feed_dict={c: 1}))
self.assertAllEqual(30.0, r.eval(feed_dict={c: 3}))
@test_util.run_deprecated_v1
def testCondGradMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 2},
allow_soft_placement=True)
with self.cached_session(use_gpu=True, config=config) as sess:
pred = array_ops.placeholder(dtypes.bool, [])
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
with ops.device("/cpu:0"):
z = control_flow_ops.cond(pred, lambda: x * y * 2.0, lambda: 2.0)
with ops.device("/cpu:1"):
grad = gradients_impl.gradients(z, x)[0]
with ops.device("/cpu:0"):
grad_grad = gradients_impl.gradients(grad, x)[0]
self.assertEqual(sess.run(grad, {pred: True, x: 1.0, y: 2.0}), 4.0)
self.assertEqual(sess.run(grad, {pred: False, x: 1.0, y: 2.0}), 0.0)
# v1 control flow gets None second derivative for some reason.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertIsNone(grad_grad)
return
self.assertEqual(sess.run(grad_grad, {pred: True, x: 1.0, y: 2.0}), 0.0)
self.assertEqual(sess.run(grad_grad, {pred: False, x: 1.0, y: 2.0}), 0.0)
@test_util.run_v1_only("b/120545219")
def testNestedCond_Simple(self):
with self.cached_session():
x = constant_op.constant(0., name="X")
y = control_flow_ops.cond(
constant_op.constant(True), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(y, x)[0]
self.assertEqual(1.0, self.evaluate(result))
z = control_flow_ops.cond(
constant_op.constant(False), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(z, x)[0]
self.assertEqual(1.0, self.evaluate(result))
@test_util.run_v1_only("b/120545219")
def testCondGrad_Gather(self):
with self.cached_session() as sess:
v1 = variables.Variable([1.0, 42.0])
c = array_ops.placeholder(dtypes.int32, shape=[])
pred = math_ops.less(c, 2)
fn1 = lambda: array_ops.identity(v1)
fn2 = lambda: array_ops.gather(v1, [1, 1])
r = control_flow_ops.cond(pred, fn1, fn2)
# The following `grad` is a Tensor since it is the aggregation of an
# IndexedSlice and a Tensor. It is an `IndexedSlices` with control flow
# v2.
grad = gradients_impl.gradients(r, [v1])[0]
self.evaluate(variables.global_variables_initializer())
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertIsInstance(grad, ops.IndexedSlices)
grad_value = sess.run(grad, feed_dict={c: 1})
self.assertAllEqual(gradient_checker_v2._to_numpy(grad_value), [1.0, 1.0])
grad_value = sess.run(grad, feed_dict={c: 3})
self.assertAllEqual(gradient_checker_v2._to_numpy(grad_value), [0.0, 2.0])
@test_util.run_deprecated_v1
def testCondGrad_ResourceVarSparseRead(self):
# NOTE(skyewm): this test is interesting because the
# ResourceVariable.sparse_read gradient function returns IndexedSlices.
var = resource_variable_ops.ResourceVariable(
np.ones((4, 2), dtype=np.float32))
x = constant_op.constant(1.0)
r = control_flow_ops.cond(
constant_op.constant(True),
lambda: x * math_ops.reduce_sum(var.sparse_read([1, 2])),
lambda: constant_op.constant(np.zeros((2, 3)),
dtype=dtypes.float32))
grad = gradients_impl.gradients(r, var)[0]
self.evaluate(variables.global_variables_initializer())
grad_val = self.evaluate(grad)
self.assertIsInstance(grad_val, ops.IndexedSlicesValue)
self.assertAllEqual(gradient_checker_v2._to_numpy(grad_val), [[0., 0.],
[1., 1.],
[1., 1.],
[0., 0.]])
@test_util.disable_xla("b/128643464")
def testCondGrad_MultiGather(self):
# NOTE(skyewm): this test is interesting because the array_ops.gather and
# ResourceVariable.sparse_read gradient functions returns IndexedSlices.
var = resource_variable_ops.ResourceVariable(
np.ones((4, 2), dtype=np.float32))
x1 = constant_op.constant(np.ones((3, 3), dtype=np.float32))
x2 = constant_op.constant(2.0)
def true_fn():
y1 = var.sparse_read([1, 2])
y2 = array_ops.gather(x1, [2]) * x2
y3 = x2 * [1., 1., 1.]
return y1, y2, y3
def false_fn():
y1 = np.zeros((2, 2), dtype=np.float32)
y2 = array_ops.gather(x1, [2]) * x2
y3 = array_ops.gather(x1, [2])
return y1, y2, y3
@def_function.function
def foo():
r = control_flow_ops.cond(constant_op.constant(True), true_fn, false_fn)
return gradients_impl.gradients(r, [var, x1, x2])
grad = foo()
self.evaluate(variables.global_variables_initializer())
var_grad, x1_grad, x2_grad = self.evaluate(grad)
self.assertIsInstance(var_grad, ops.IndexedSlicesValue)
self.assertAllEqual(gradient_checker_v2._to_numpy(var_grad), [[0., 0.],
[1., 1.],
[1., 1.],
[0., 0]])
self.assertIsInstance(x1_grad, ops.IndexedSlicesValue)
self.assertAllEqual(gradient_checker_v2._to_numpy(x1_grad), [[0., 0., 0.],
[0., 0., 0.],
[2., 2., 2.]])
self.assertIsInstance(x1_grad, ops.IndexedSlicesValue)
self.assertEqual(gradient_checker_v2._to_numpy(x2_grad), 6.)
@test_util.run_v1_only("b/120545219")
def testCondPredicateTensor(self):
"""Regression test for lowering predicate from non-first output of an op."""
@eager_function.defun
def foo():
return constant_op.constant("foo"), constant_op.constant(True)
r = control_flow_ops.cond(foo()[1], lambda: 1.0, lambda: 2.0)
self.assertEqual(self.evaluate(r), 1.0)
@test_util.run_v1_only("Tests Session.run() pruning logic.")
def testCondFeedConstantPredicate(self):
with self.cached_session() as sess:
value = constant_op.constant(37.0)
predicate = constant_op.constant(True)
cond_output = control_flow_ops.cond(
predicate, lambda: constant_op.constant(0.0), lambda: value)
result = array_ops.identity(cond_output)
self.assertEqual(37.0, sess.run(result, feed_dict={predicate: False}))
self.assertEqual(0.0, sess.run(result, feed_dict={predicate: True}))
self.assertEqual(0.0, sess.run(result))
@test_util.run_v1_only("Tests Session.run() pruning logic.")
def testCondFeedPlaceholderWithDefaultPredicate(self):
with self.cached_session() as sess:
value = constant_op.constant(37.0)
predicate = array_ops.placeholder_with_default(
constant_op.constant(True), [])
cond_output = control_flow_ops.cond(
predicate, lambda: constant_op.constant(0.0), lambda: value)
result = array_ops.identity(cond_output)
self.assertAllEqual(37.0, sess.run(result, feed_dict={predicate: False}))
self.assertAllEqual(0.0, sess.run(result, feed_dict={predicate: True}))
self.assertAllEqual(0.0, sess.run(result))
@test_util.disable_xla("b/128644469 PrintV2")
@test_util.run_in_graph_and_eager_modes
def testCondAutoControlDeps(self):
if test_util.is_gpu_available():
self.skipTest("b/128676188 causes OOM on opensource gpu tests")
print_prefix = "testCondAutoControlDeps: "
def branch_fn():
enqueue_print_op("A")
enqueue_print_op("B")
with ops.control_dependencies([enqueue_print_op("C")]):
return constant_op.constant(10)
def build_cond():
return control_flow_ops.cond(
constant_op.constant(True), branch_fn, lambda: 0)
def build_nested_cond():
return control_flow_ops.cond(
constant_op.constant(True), build_cond, lambda: 0)
# In v1 graph mode, pruning should make only "C" print.
if not context.executing_eagerly():
with self.cached_session():
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_nested_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
# In defuns, all prints should execute in program order.
# This doesn't work with legacy control flow.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
@eager_function.defun
def cond():
return build_cond()
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(cond()), 10)
self.assertEqual(["A", "B", "C"],
filter_test_messages(printed.contents()))
@eager_function.defun
def nested_cond():
return build_nested_cond()
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(nested_cond()), 10)
self.assertEqual(["A", "B", "C"],
filter_test_messages(printed.contents()))
# wrap_function should prune.
def pruned_cond():
return build_cond()
pruned_cond = wrap_function.wrap_function(pruned_cond, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
def pruned_nested_cond():
return build_nested_cond()
pruned_nested_cond = wrap_function.wrap_function(pruned_nested_cond, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_nested_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
@test_util.disable_xla("b/128643646 PrintV2")
@test_util.run_in_graph_and_eager_modes
def testWhileAutoControlDeps(self):
# Legacy while_loop fails this test because it produces deprecation notices
# in stderr.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2: return
def cond(i, unused_x):
enqueue_print_op("A")
return i < 2
def body(i, x):
enqueue_print_op("B")
with ops.control_dependencies([enqueue_print_op("C")]):
x = array_ops.identity(x)
with ops.control_dependencies([enqueue_print_op("D")]):
return i + 1, x
def build_while():
return control_flow_ops.while_loop(
cond, body, [constant_op.constant(0), constant_op.constant(0)])
def build_nested_while():
return control_flow_ops.cond(
constant_op.constant(True), build_while, lambda: [0, 0])
# In v1 graph mode, pruning should make only "D" print.
if not context.executing_eagerly():
with self.cached_session():
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_while()[0]), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_nested_while()[0]), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
# In defuns, all prints should execute in program order.
@eager_function.defun
def while_loop():
return build_while()[0]
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(while_loop()), 2)
self.assertEqual(["A", "B", "C", "D", "A", "B", "C", "D", "A"],
filter_test_messages(printed.contents()))
@eager_function.defun
def nested_while_loop():
return build_nested_while()[0]
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(nested_while_loop()), 2)
self.assertEqual(["A", "B", "C", "D", "A", "B", "C", "D", "A"],
filter_test_messages(printed.contents()))
# wrap_function should prune.
def pruned_while():
return build_while()[0]
pruned_while = wrap_function.wrap_function(pruned_while, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_while()), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
def pruned_nested_while():
return build_nested_while()[0]
pruned_nested_while = wrap_function.wrap_function(pruned_nested_while, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_nested_while()), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
# Microbenchmark: 256,000 iterations/s.
def testWhile_1(self):
with self.cached_session():
n = constant_op.constant(0)
c = lambda x: math_ops.less(x, 10000)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileExternalControlDependencies(self):
with self.cached_session():
v = variables.Variable(0.0)
v.initializer.run()
increment = v.assign_add(1.0).read_value()
def body_fn(i):
with ops.control_dependencies([increment]):
return i + 1
result = control_flow_ops.while_loop(cond=lambda i: i < 2,
body=body_fn, loop_vars=[1])
self.assertAllEqual(result, 2)
self.assertAllEqual(v.read_value(), 1.0)
@test_util.run_v1_only("b/120545219")
def testWhileExternalControlDependenciesNoInput(self):
with self.cached_session():
v = variables.Variable(0.0)
v.initializer.run()
# TODO(apassos): figure out why the reading is necessary here.
increment = v.assign_add(1.0).read_value()
def body_fn(unused_i):
with ops.control_dependencies([increment]):
return constant_op.constant(5, name="five")
result = control_flow_ops.while_loop(cond=lambda i: i < 5,
body=body_fn, loop_vars=[0])
self.evaluate(result)
self.assertAllEqual(self.evaluate(v), 1.0)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileWithRefs_1(self):
with self.cached_session() as sess:
x = variables.VariableV1(0)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 100)
self.assertEqual(x.dtype, dtypes.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
return (i + 1, gen_array_ops.ref_identity(x))
r = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=5)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x = self.evaluate(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.cached_session():
s = constant_op.constant(0)
r = isum(s)
self.assertAllEqual(45, self.evaluate(r))
def testWhileWithMaximumIterations(self):
with self.cached_session():
s = constant_op.constant([1, 2, 3, 4, 5])
r = isum(s, maximum_iterations=3)
self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileWithMaximumIterationsAndSingleArgument(self):
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [0], maximum_iterations=1)
self.assertEqual(1, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testXLAGradInLoop(self):
# We have an optimization that moves certain reduction ops, this test makes
# sure we don't do that for XLA ops.
# Use dynamic inputs, which triggers the creation of "BroadcastGradientArgs"
# and "Shape" op.
input1 = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
input2 = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
def cond(i1, i2):
return False
def body(i1, i2):
return math_ops.add(i1, i2), math_ops.add(i1, i2)
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
out1, _ = control_flow_ops.while_loop(
cond, body, (input1, input2), maximum_iterations=2)
g = gradients_impl.gradients(out1, [input1])
for op in out1.graph.get_operations():
# Test that the "Shape" is directly passed to BroadcastGradientArgs
# instead of being pushed to the stack.
if op.type == "BroadcastGradientArgs":
self.assertEqual(op.inputs[0].op.type, "Shape")
self.assertEqual(op.inputs[1].op.type, "Shape")
xla_context.Exit()
@test_util.disable_control_flow_v2("b/115776323 (max_iters)")
@test_util.run_v1_only("b/120545219")
def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def training_loop_with_gradient(i):
out = control_flow_ops.while_loop(
lambda i_, _: i_ < 3,
lambda i_, j: [i_ + 1, j * v], [0, 1.0],
maximum_iterations=i)
g = gradients_impl.gradients(out, v)
with ops.control_dependencies(g):
return i + 1
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
# Create training loop, ensure we can call gradient() of
# while_loop inside the training loop.
loop = control_flow_ops.while_loop(lambda i: i < 3,
training_loop_with_gradient, [0])
xla_context.Exit()
loop_execute = array_ops.identity(loop) # Because loop is not fetchable.
# Should execute without issue.
self.assertEqual(3, self.evaluate(loop_execute))
@test_util.run_v1_only("b/120545219")
def testInvalidMaximumIterationsWhileLoopGradientInXLAContext(self):
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.skipTest("WhileV2 does lazy evaluation of maximum_iterations")
v = constant_op.constant(1.0)
def inner_body(i, x):
out = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, j: [i + 1, j * v], [0, x],
maximum_iterations=i)
return out
def create_while_loop(maximum_iterations=None):
return control_flow_ops.while_loop(
lambda i, _: i < 3,
inner_body, [0, 1.0],
maximum_iterations=maximum_iterations)
loop_no_xla = create_while_loop(maximum_iterations=5)
# maximum_iterations is fine outside of an XLA scope
gs = gradients_impl.gradients(loop_no_xla, v)
self.evaluate(gs) # This should execute without error.
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop_no_maxiter = create_while_loop()
loop_with_maxiter = create_while_loop(maximum_iterations=2)
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside "
r"XLA while_loop because maximum_iterations was not passed to "
r"the tf.while_loop call \('.+'\)."):
_ = gradients_impl.gradients(loop_no_maxiter, v)
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.+' for while_loop context "
r"'.+' must be statically known \(e.g. a constant value or known "
r"shape dimension\), or be defined at or outside the while loop "
r"context '.*' \(currently defined in '.*'\)"):
_ = gradients_impl.gradients(loop_with_maxiter, v)
@test_util.run_v1_only("b/120545219")
def testInvalidMaximumIterationsFromSiblingContextWhileLoopInXLAContext(self):
v = constant_op.constant(1.0)
def create_while_loop():
max_iter_holder = []
def create_mi():
max_iter_holder.append(array_ops.placeholder(dtypes.int32, shape=()))
return 1.0
_ = control_flow_ops.cond(
constant_op.constant(True), create_mi, create_mi)
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, v * x), (0, 1.0),
maximum_iterations=max_iter_holder[0])
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
with self.assertRaisesRegexp(
ValueError, r"Tensor.*Placeholder:0.* must be from the same graph.*"):
loop = create_while_loop()
xla_context.Exit()
else:
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop = create_while_loop()
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.*Placeholder:0' for "
r"while_loop context '.+' must be statically known \(e.g. a constant "
r"value or known shape dimension\), or be defined at or outside the "
r"while loop context '' \(currently defined in 'cond/.+'\)"):
_ = gradients_impl.gradients(loop, v)
@test_util.run_v1_only("b/120545219")
def testNestedWhileLoopWithMaxItersFromOuterContextInXLAContext(self):
if test_util.is_gpu_available():
self.skipTest("b/128646372, b/128645947 fails in opensource build")
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def mid_body_builder(iterations):
def mid_body(i, x):
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, x),
maximum_iterations=iterations,
name="inner")
return (i + 1, gradients_impl.gradients(x + r[1], v)[0])
return mid_body
def outer_body(i, x):
iterations = array_ops.size(p, name="iterations")
return (i + 1, x + control_flow_ops.while_loop(
lambda *_: True,
mid_body_builder(iterations), (0, x),
maximum_iterations=iterations,
name="mid")[1])
def create_while_loop():
with ops.device("/cpu:0"):
r = control_flow_ops.while_loop(
lambda *_: True,
outer_body, (0, 1.0),
maximum_iterations=5,
name="outer")
return array_ops.identity(r[1])
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
final_with_xla_context = create_while_loop()
xla_context.Exit()
final_without_xla_context = create_while_loop()
with self.session(use_gpu=False) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata_without_xla_context = config_pb2.RunMetadata()
run_metadata = config_pb2.RunMetadata()
final_value_without_xla_context = sess.run(
final_without_xla_context,
feed_dict={p: [0, 0, 0]},
options=opts,
run_metadata=run_metadata_without_xla_context)
final_value_with_xla_context = sess.run(
final_with_xla_context,
feed_dict={p: [0, 0, 0]},
options=opts,
run_metadata=run_metadata)
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
# With while_v2 on xla, run_metadata only contains the unlowered While
# op so node_stats does not have statistics for the pushes. So as a
# loose check we check the pushes in the lowered version.
for dev in run_metadata_without_xla_context.step_stats.dev_stats:
if "/device:CPU" in dev.device:
node_stats = dev.node_stats
stack_push_op = "TensorListPushBack"
else:
for dev in run_metadata.step_stats.dev_stats:
if "/device:CPU" in dev.device:
node_stats = dev.node_stats
stack_push_op = "StackPushV2"
stack_push_count = len(
[x for x in node_stats if x.node_name.endswith(stack_push_op)])
# Pushes to the stack = product of maximum_iterations values;
# the last two "3"s comes from size(p), when p == [0, 0, 0].
self.assertEqual(stack_push_count, 5 * 3 * 3, str(node_stats))
self.assertAllClose(final_value_with_xla_context,
final_value_without_xla_context)
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
@test_util.run_deprecated_v1
def testWhile_3(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [math_ops.add(m, 1), math_ops.add(c, 1)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
d = ops.convert_to_tensor(100)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, d),
compute, [i, m, c, o])
result = r[3]
self.assertAllEqual(10100, result)
@test_util.run_deprecated_v1
def testWhile_4(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [array_ops.gather(x, i), array_ops.gather(x, i)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, s),
compute, [i, m, c, o])
result = r[3]
self.assertAllEqual(42, result)
@test_util.run_v1_only("b/120545219")
def testWhile_5(self):
with self.cached_session():
def compute(i, c, o):
c = array_ops.strided_slice(x, array_ops.expand_dims(i, 0),
[1] + array_ops.expand_dims(i, 0))
o = array_ops.concat([o, c], 0)
i = math_ops.add(i, 1)
return [i, c, o]
i = ops.convert_to_tensor(0)
c = ops.convert_to_tensor([0])
o = ops.convert_to_tensor([0])
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, c, o: math_ops.less(i, s),
compute, [i, c, o], [
i.get_shape(),
tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()
])
result = r[2]
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testWhile_Device(self):
# Body function defined outside of device scope
def body(x):
return math_ops.exp(x)
with ops.device("CPU:0"):
r = control_flow_ops.while_loop(
lambda x: x < 10, body, [constant_op.constant(-10.)])
self.assertIn("cpu", r.device.lower())
with session.Session() as sess:
options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(r, options=options, run_metadata=run_metadata)
# We expect that everything runs on CPU, even if GPU is available.
self.assertEqual(len(run_metadata.partition_graphs), 1)
@test_util.disable_control_flow_v2("b/116338794 (buffer_reuse)")
@test_util.run_v1_only("b/120545219")
def testBufferForwarding(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.cached_session() as sess:
with ops.device("/cpu:0"):
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
r_val = sess.run(r, options=run_options, run_metadata=run_metadata)
self.assertEqual(1000, r_val)
self.assertTrue(run_metadata.HasField("step_stats"))
unique_allocs = set()
for node_stat in run_metadata.step_stats.dev_stats[0].node_stats:
for output in node_stat.output:
unique_allocs.add(
output.tensor_description.allocation_description.ptr)
# Prior to cl/147536680, the number of unique allocations was about 1005.
self.assertLess(len(unique_allocs), 756)
def _testWhile_Gpu_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
b = lambda x: math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, self.evaluate(r))
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
return math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, self.evaluate(r))
def testWhile_Gpu_2(self):
self._testWhile_Gpu_2(use_gpu=False)
self._testWhile_Gpu_2(use_gpu=True)
def testWhileShape(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def _b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.tile(j, [2, 2])
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, _b, [i, m],
[i.get_shape(), tensor_shape.unknown_shape()])
r = r[1] * array_ops.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), self.evaluate(r))
@test_util.disable_control_flow_v2("b/131265085")
@test_util.run_v1_only("b/131265085")
def testWhileBadShape(self):
x = constant_op.constant([2.0, 4.0], name="values")
i = constant_op.constant(0)
c = lambda i, _: math_ops.less(i, 10)
b = lambda i, x: [i + 1, x + 1]
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
# Shape of x is [2], but we specify a shape of [5].
control_flow_ops.while_loop(
c, b, [i, x], [i.shape, tensor_shape.TensorShape([5])])
@test_util.run_in_graph_and_eager_modes
def testWhileBadBodyReturn(self):
x = constant_op.constant([2.0, 4.0], name="values")
i = constant_op.constant(0)
c = lambda i, *x: math_ops.less(i, 10)
# body accepts N values and returns N+1 values.
b = lambda i, *x: (i, i) + x
with self.assertRaisesRegexp(
ValueError,
"The two structures don't have the same nested structure."):
control_flow_ops.while_loop(c, b, [i, x])
@test_util.run_deprecated_v1
def testWhileWithNonTensorInput_Scalar(self):
with self.cached_session():
n = 0
c = lambda x: x < 10000
b = lambda x: x + 1
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, self.evaluate(r))
def testWhileWithNonTensorInput_Vector(self):
with self.cached_session():
n = np.array([0]) # Note, [0] would not work here; that is a list
c = lambda x: x[0] < 10000
b = lambda x: array_ops.stack([x[0] + 1])
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], self.evaluate(r))
def testWhileShapeInference(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.concat([j, j], 0)
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, b, [i, m],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertTrue(r[1].shape.is_compatible_with([8, 2]))
@test_util.run_v1_only("b/120545219")
def testWhileShapeInferenceBadShape(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
b = lambda i, j: [i + 1, array_ops.concat([j, j], 0)]
with self.assertRaisesRegexp(
ValueError,
r"Input tensor 'ones:0' enters the loop with shape \(2, 2\), but has "
r"shape \(4, 2\) after one iteration. To allow the shape to vary "
r"across iterations, use the `shape_invariants` argument of "
r"tf.while_loop to specify a less-specific shape."):
control_flow_ops.while_loop(c, b, [i, m])
def testWhileShapeInferenceSparseTensor(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b1(i, x): # modifies values. (shape of components is not changed.)
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
def b2(i, x): # adds new values. (shape of components is changed.)
return [
i + 1,
sparse_ops.sparse_add(
x,
sparse_tensor.SparseTensor(
indices=math_ops.cast(
array_ops.fill([1, 1], i), dtypes.int64),
values=array_ops.fill([1], 1.0),
dense_shape=x.dense_shape))
]
def b3(i, x): # modifies rank. (shape of all components is changed.)
return [
i + 1,
sparse_tensor.SparseTensor(
array_ops.concat([x.indices, [[i], [i]]], axis=1), x.values * 2.0,
array_ops.concat([x.dense_shape, [10]], axis=0))
]
def check_shapes(r, indices, values, dense_shape):
self.assertTrue(r.indices.shape.is_compatible_with(indices))
self.assertTrue(r.values.shape.is_compatible_with(values))
self.assertTrue(r.dense_shape.shape.is_compatible_with(dense_shape))
# Default shape invariant; b1 only modifies values.
_, r = control_flow_ops.while_loop(c, b1, [i, x])
check_shapes(r, indices=[None, 1], values=[None], dense_shape=[1])
# Default shape invariant; b2 adds new values
_, r = control_flow_ops.while_loop(c, b2, [i, x])
check_shapes(r, indices=[None, 1], values=[None], dense_shape=[1])
# Explicit shape invariant, allowing any rank; b1 only modifies values.
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None])])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
# Explicit shape invariant, allowing any rank; b3 modifies rank.
_, r = control_flow_ops.while_loop(
c, b3, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None])])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
# Shape invariant with ndims=None. Technically, this isn't supported
# according to the docs, but we support it for backwards compatibility.
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape(None)])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
_, r = control_flow_ops.while_loop(
c, b3, [i, x],
[i.get_shape(), tensor_shape.TensorShape(None)])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
@test_util.disable_control_flow_v2("b/131265085")
@test_util.run_v1_only("b/131265085")
def testWhileBadShapeSparseTensor(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
c = lambda i, _: i < 10
b1 = lambda i, x: [i+1, x]
def b2(i, x): # modifies rank. (shape of all components is changed.)
return [
i + 1,
sparse_tensor.SparseTensor(
array_ops.concat([x.indices, [[i], [i]]], axis=1), x.values * 2.0,
array_ops.concat([x.dense_shape, [10]], axis=0))
]
# Explicit shape invariant, with a specific (incompatible) rank.
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape([5])])
# Default shape invariant, but b2 modifies rank (which is not allowed).
with self.assertRaises(ValueError):
control_flow_ops.while_loop(c, b2, [i, x])
def testWhileShapeInferenceIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10, 2], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0], 2)
self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2]))
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertEqual(r.dense_shape.get_shape()[0], 2)
self.assertTrue(r.values.get_shape().is_compatible_with([None, 2]))
@test_util.disable_control_flow_v2("b/131265085")
@test_util.run_v1_only("b/131265085")
def testWhileBadShapeIndexedSlices(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
c = lambda i, _: 10
b = lambda i, x: [i+1, x]
# Explicit shape invariant, with a specific (incompatible) rank.
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([5])])
def testWhileShapeInferenceRaggedTensor(self):
i = constant_op.constant(0)
x = ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]])
c = lambda i, _: i < 10
def b1(i, x): # Adds new values to rows (but doesn't create new rows)
return [
i + 1,
array_ops.concat([x, x], axis=1)
]
def b2(i, x): # Adds new rows.
return [
i + 1,
array_ops.concat([x, x], axis=0)
]
def check_shapes(r, values, splits):
self.assertTrue(r.values.shape.is_compatible_with(values))
self.assertTrue(r.row_splits.shape.is_compatible_with(splits))
# Default shape invariant; b1 adds new values to rows.
_, r = control_flow_ops.while_loop(c, b1, [i, x])
check_shapes(r, values=[None], splits=[4])
# Default shape invariant; b2 adds new rows (not allowed).
if not context.executing_eagerly():
with self.assertRaises(ValueError):
_, r = control_flow_ops.while_loop(c, b2, [i, x])
# Explicit shape invariant; b1 adds new values to rows.
# (deprecated: use TensorShape instead of RaggedTensorSpec)
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, None])])
check_shapes(r, values=[None], splits=[None])
# Explicit shape invariant; b1 adds new values to rows.
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), ragged_tensor.RaggedTensorSpec([None, None],
dtypes.int32)])
check_shapes(r, values=[None], splits=[None])
# Explicit shape invariant; b2 adds new rows.
_, r = control_flow_ops.while_loop(
c, b2, [i, x],
[i.get_shape(), ragged_tensor.RaggedTensorSpec([None, None],
dtypes.int32)])
check_shapes(r, values=[None], splits=[None])
def testWhileShapeInferenceRaggedTensorRaggedRank2(self):
i = constant_op.constant(0)
x = ragged_factory_ops.constant([[[1, 2], [3], [4, 5, 6]],
[[], [8, 9, 10]]])
c = lambda i, _: i < 10
def b(i, x):
return [
i + 1,
array_ops.concat([x, x[..., i:i+1]], axis=-1)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.row_splits.shape.as_list(), [3])
self.assertTrue(r.values.row_splits.shape.as_list() in ([6], [None]))
self.assertTrue(r.values.values.shape.as_list() in ([49], [None]))
def _testNestedWhile_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(0)
def cpu_sum(s):
c = lambda i, s: math_ops.less(i, 10)
def b(i, s):
i1 = math_ops.add(i, 1)
with ops.device("/cpu:0"):
s1 = math_ops.add(i, s)
return i1, s1
_, r_s = control_flow_ops.while_loop(c, b, [n, s])
return r_s
c = lambda x: math_ops.less(x, 200)
b = lambda x: math_ops.add(x, cpu_sum(n))
r = control_flow_ops.while_loop(c, b, [n])
self.assertEqual(225, self.evaluate(r))
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def _testNestedWhile_2(self, use_gpu):
# Test the cases that A -> Enter and Exit -> A are partitioned.
with self.cached_session(use_gpu=use_gpu):
s0 = constant_op.constant(2.0)
def inner_loop(s):
c = lambda s: math_ops.less(s, 20.0)
def b(s):
s1 = math_ops.add(s, s)
return s1
r_s = control_flow_ops.while_loop(c, b, [s], parallel_iterations=1)
return r_s
outer_c = lambda x: math_ops.less(x, 3000.0)
def outer_b(x):
x = logging_ops.Print(x, [x]) # Edge "Print -> Enter" is partitioned
x = inner_loop(x)
with ops.device("/cpu:0"):
x = math_ops.square(x) # Edge "Exit -> Square" is partitioned
return x
r = control_flow_ops.while_loop(
outer_c, outer_b, [s0], parallel_iterations=1)
self.assertEqual(1048576.0, self.evaluate(r))
def testNestedWhile_2(self):
self._testNestedWhile_2(use_gpu=False)
self._testNestedWhile_2(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_1(self):
with self.cached_session():
n = constant_op.constant(0)
r = constant_op.constant(0)
condition = lambda n_, r_: math_ops.less(n_, 10)
def body(n_, r_):
n_ = math_ops.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [n_, r_]
res = control_flow_ops.while_loop(
condition, body, [n, r], parallel_iterations=1)
self.assertAllEqual(12, res[1])
@test_util.run_deprecated_v1
def testWhileWithControl_2(self):
with self.cached_session():
r = constant_op.constant(0)
condition = lambda r_: math_ops.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [r_]
res = control_flow_ops.while_loop(
condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, self.evaluate(res))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_3(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_4(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(
lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0])
self.assertEqual(10, sess.run(r, {b: True}))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_5(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
def body(x):
with ops.control_dependencies([b]):
return x + c
r = control_flow_ops.while_loop(lambda x: x < 10, body, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileCondWithControl(self):
# Ensure that no control edges by an outer control dependency context are
# added to nodes inside cond/while contexts.
with self.cached_session() as sess:
const_true = lambda: constant_op.constant(True)
const_false = lambda: constant_op.constant(False)
cond = lambda i: control_flow_ops.cond(i > 0, const_true, const_false)
body = lambda i: control_flow_ops.cond(i > 0, lambda: i - 1, lambda: i)
with ops.control_dependencies([control_flow_ops.no_op()]):
loop = control_flow_ops.while_loop(cond, body,
(constant_op.constant(5),))
self.assertEqual(0, self.evaluate(loop))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testWhileCondWithControl_1(self):
with self.cached_session():
v = variable_scope.get_variable(
"v", [], initializer=init_ops.constant_initializer(2))
i0 = constant_op.constant(0)
with ops.control_dependencies([i0]):
def loop_condition(i):
return i < 4
def loop_body(i):
some_cond = control_flow_ops.cond(
constant_op.constant(True),
lambda: state_ops.assign(v, math_ops.square(v)), lambda: v)
with ops.control_dependencies([some_cond]):
return i + 1
r = control_flow_ops.while_loop(loop_condition, loop_body, (i0,))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(4, self.evaluate(r))
self.assertAllClose(65536.0, self.evaluate(v))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testWhileCondExitControl(self):
with self.cached_session():
v = variables.Variable(1)
def false_branch():
cond = lambda i: i < 100
def body(i):
x = state_ops.assign(v, i)
return x + 1
loop = control_flow_ops.while_loop(cond, body, [0])
# Make sure to handle correctly control edge from Exit to a node.
with ops.control_dependencies([loop]):
return constant_op.constant(6.0)
r = control_flow_ops.cond(
constant_op.constant(False), lambda: constant_op.constant(1.0),
false_branch)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(6.0, self.evaluate(r))
self.assertEqual(99, self.evaluate(v))
def testCondWhile_1(self):
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, self.evaluate(r))
def testCondWhile_2(self):
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(n, 1),
lambda: control_flow_ops.while_loop(c, b, [n]))
self.assertAllEqual(10, self.evaluate(r))
def _testCondWhile_3(self, use_gpu):
with self.cached_session(use_gpu=use_gpu) as sess:
p = array_ops.placeholder(dtypes.bool)
n = constant_op.constant(0.0)
def c(x):
return math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
x1 = math_ops.add(x, 1.0)
return x1
r = control_flow_ops.cond(p,
lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: math_ops.multiply(n, 2.0))
r1 = gradients_impl.gradients(r, [n])
self.assertEqual(10., sess.run(r, {p: True}))
self.assertEqual([1.0], sess.run(r1, {p: True}))
self.assertEqual(0.0, sess.run(r, {p: False}))
self.assertEqual([2.0], sess.run(r1, {p: False}))
@test_util.run_deprecated_v1
def testCondWhile_3(self):
self._testCondWhile_3(use_gpu=False)
self._testCondWhile_3(use_gpu=True)
def testWhileCond_1(self):
with self.cached_session():
i = ops.convert_to_tensor(0, name="i")
n = ops.convert_to_tensor(10, name="n")
one = ops.convert_to_tensor(1, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [i])
self.assertAllEqual(10, self.evaluate(r))
def testWhileCond_2(self):
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, self.evaluate(r))
def testWhileCond_3(self):
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(math_ops.less(0, 1),
lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileCondGradMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 2},
allow_soft_placement=True)
with self.cached_session(use_gpu=True, config=config) as sess:
pred = array_ops.placeholder(dtypes.bool, [])
x_init = constant_op.constant(1.0)
with ops.device("/cpu:0"):
z = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, control_flow_ops.cond(
pred, lambda: x * 2.0, lambda: 10.0)),
[0, x_init])
with ops.device("/cpu:1"):
grad = gradients_impl.gradients(z, x_init)[0]
with ops.device("/cpu:0"):
grad_grad = gradients_impl.gradients(grad, x_init)[0]
self.assertEqual(sess.run(grad, {pred: True}), 8.0)
self.assertEqual(sess.run(grad, {pred: False}), 0.0)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
return
self.assertEqual(sess.run(grad_grad, {pred: True}), 0.0)
self.assertEqual(sess.run(grad_grad, {pred: False}), 0.0)
# NOTE: It is ok to have parallel_iterations > 1
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_deprecated_v1
def testWhileUpdateVariable_1(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3, self.evaluate(r))
result = self.evaluate(select)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_2(self):
with self.cached_session():
select1 = variables.Variable([3.0, 4.0, 5.0])
select2 = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns1 = state_ops.scatter_update(select1, j, 10.0)
ns2 = state_ops.scatter_update(select2, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3, self.evaluate(r))
result1 = self.evaluate(select1)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = self.evaluate(select2)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_3(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j, _):
return math_ops.less(j, 3)
def loop_body(j, _):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
return [nj, ns]
r = control_flow_ops.while_loop(
loop_iterator,
loop_body, [n, array_ops.identity(select)],
parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
result = r[1]
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_4(self):
with self.cached_session():
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
self.evaluate(variables.global_variables_initializer())
c = constant_op.constant(0, name="c")
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn2 = state_ops.assign_add(var_b, asn1, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1)
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_5(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
self.evaluate(variables.global_variables_initializer())
# Change condition to check var_b
def pred(_):
return math_ops.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = state_ops.assign_add(
var_a, constant_op.constant(1), name="a_add")
asn2 = state_ops.assign_add(
var_b, constant_op.constant(1), name="b_add")
with ops.control_dependencies([asn1, asn2]):
inc_b = array_ops.identity(var_b)
return inc_b
lpa = control_flow_ops.while_loop(
pred, loop_body, [var_b], parallel_iterations=1, name="loop")
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(10, self.evaluate(var_a))
self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_6(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
c = constant_op.constant(0)
self.evaluate(variables.global_variables_initializer())
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
with ops.control_dependencies([asn1]):
asn2 = state_ops.assign_add(var_b, var_a, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1, name="loop")
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(55, self.evaluate(var_b))
self.assertEqual(10, self.evaluate(var_a))
@test_util.run_v1_only("b/120545219")
def testWhileQueue_1(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(-1, dtypes.int32)
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], self.evaluate(r))
for i in xrange(10):
self.assertEqual([i], self.evaluate(q.dequeue()))
@test_util.run_v1_only("b/120545219")
def testWhileTimeOut(self):
run_options = config_pb2.RunOptions(timeout_in_ms=1)
with self.cached_session() as sess:
n = constant_op.constant(0)
c = lambda x: True
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n])
with self.assertRaises(errors_impl.DeadlineExceededError):
sess.run(r, options=run_options)
@test_util.disable_control_flow_v2("b/117119329 (stack)")
@test_util.run_v1_only("b/120545219")
def testWhileStack_1(self):
with self.cached_session():
s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name="foo")
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops.stack_push_v2(s, i)], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
x = constant_op.constant(0)
def c1(i, _):
return math_ops.greater(i, 0)
def b1(i, x):
ni = math_ops.subtract(i, 1)
nx = x + gen_data_flow_ops.stack_pop_v2(s, dtypes.int32)
return [ni, nx]
_, rx = control_flow_ops.while_loop(
c1,
b1, [r, x],
[r.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
self.assertEqual(45, self.evaluate(rx))
def _testWhileGrad_ColocateGradients(self, colocate):
gpu_dev_name = test.gpu_device_name() if test.is_gpu_available(
) else "/device:CPU:0"
graph = ops.Graph()
with graph.as_default():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
def b(x):
with ops.device(gpu_dev_name):
return math_ops.square(x)
loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(
loop, v, colocate_gradients_with_ops=colocate)[0]
r_ops = graph.get_operations()
r_devices = [(op.name, op.device) for op in r_ops]
self.assertTrue(any("Square" in op.name for op in r_ops))
for (name, dev) in r_devices:
if not colocate and name.endswith("Square"):
# Only forward graph contain gpu in Square device
self.assertTrue(gpu_dev_name in dev)
elif colocate and "Square" in name:
# Forward and backward graphs contain gpu in Square/Square_grad devices
self.assertTrue(gpu_dev_name in dev)
else:
self.assertFalse(gpu_dev_name in dev)
with self.session(graph=graph) as sess:
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/116351701 (colocation)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ColocateGradients(self):
self._testWhileGrad_ColocateGradients(colocate=False)
self._testWhileGrad_ColocateGradients(colocate=True)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Square(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Shape(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=[None])
v = constant_op.constant([2.0], name="v")
n = constant_op.constant(0, name="n")
c = lambda i, v: math_ops.less(i, 5)
b = lambda i, v: [i + 1, math_ops.multiply(x, v)]
r = control_flow_ops.while_loop(
c,
b, [n, v],
[n.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
r = gradients_impl.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
@test_util.run_deprecated_v1
def testWhileGrad_BaseShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, [None])
v0 = constant_op.constant([2.0, 2.0], name="v")
c = lambda v: constant_op.constant(False)
b = lambda v: math_ops.multiply(v, x)
r = control_flow_ops.while_loop(c, b, [v0])
y = math_ops.square(x)
r = gradients_impl.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_MultipleUses(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.multiply(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_LoopAdd(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.add(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(2048.0, self.evaluate(r))
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.cached_session(use_gpu=use_gpu) as sess:
a = constant_op.constant(3.0, name="a")
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)
grad_a, grad_v = gradients_impl.gradients(r, [a, v])
grad_a_val, grad_v_val = self.evaluate([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
@test_util.run_deprecated_v1
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def testWhileGradInControlDeps(self):
@def_function.function
def f():
x_init = constant_op.constant(2.)
loop_cond = lambda i, x: math_ops.less(i, 2)
loop_body = lambda i, x: [i + 1, x**2]
_, x = control_flow_ops.while_loop(loop_cond, loop_body, [0, x_init])
with ops.control_dependencies([x]):
(grad,) = gradients_impl.gradients(x, x_init)
return grad
self.assertAllEqual(f(), 4. * 2.**3) # 4 * x_init ^ 3
def _testNestedWhileCondWhileGrad(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
def b(x):
return control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.square(inner_loop(x)[1]),
lambda: math_ops.multiply(x, 2.0))
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileCondWhileGrad(self):
self._testNestedWhileCondWhileGrad(use_gpu=False)
@test_util.run_deprecated_v1
def testNestedWhileCondWhileGradGpu(self):
self._testNestedWhileCondWhileGrad(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Variable(self):
with self.cached_session():
a = variables.Variable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(r, a)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(216.0, r[0])
@test_util.run_deprecated_v1
def testWhileGrad_ResourceVariable(self):
with self.cached_session():
a = resource_variable_ops.ResourceVariable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
g = gradients_impl.gradients(r, a)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(216.0, g[0])
def testWhileGrad_EagerResourceVariable(self):
with context.eager_mode():
a = resource_variable_ops.ResourceVariable(
np.ones([2, 2], dtype=np.float32))
v = constant_op.constant(1.0)
@eager_function.defun
def fn():
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, x * math_ops.reduce_sum(a) * v),
[0, 1.0])[1]
return gradients_impl.gradients(r, [v])[0]
self.assertEqual(self.evaluate(fn()), 32.)
def testWhileGrad_ResourceVarInFunctionCall(self):
@def_function.function
def foo(x, var):
return x + math_ops.reduce_sum(var.sparse_read([1, 3]))
@def_function.function
def bar(var):
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, foo(x, var)),
[0, 0.0])[1]
return gradients_impl.gradients(r, var)[0]
var = resource_variable_ops.ResourceVariable([1., 2., 3., 4.])
self.evaluate(variables.global_variables_initializer())
grad = self.evaluate(bar(var))
self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 2., 0., 2.])
def testWhileGrad_ResourceVarInNestedFunctionCall(self):
@def_function.function
def foo(x, var):
return x + math_ops.reduce_sum(var.sparse_read([1, 3]))
@def_function.function
def foo2(x, var):
return foo(x, var)
@def_function.function
def bar(var):
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, foo2(x, var)),
[0, 0.0])[1]
return gradients_impl.gradients(r, var)[0]
var = resource_variable_ops.ResourceVariable([1., 1., 1., 1.])
self.evaluate(variables.global_variables_initializer())
grad = self.evaluate(bar(var))
self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 2., 0., 2.])
def testWhileGrad_ResourceVarInLoopInFunctionCall(self):
if test.is_gpu_available():
self.skipTest("b/128635252")
@def_function.function
def foo(x, var):
return control_flow_ops.while_loop(
lambda j, _: j < 3,
lambda j, y: (j + 1,
y + math_ops.reduce_sum(var.sparse_read([1, 2]))),
[0, x])[1]
@def_function.function
def bar(var):
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, foo(x, var)),
[0, 0.0])[1]
return gradients_impl.gradients(r, var)[0]
var = resource_variable_ops.ResourceVariable([1., 1., 1., 1.])
self.evaluate(variables.global_variables_initializer())
grad = self.evaluate(bar(var))
self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 6., 6., 0.])
def testWhileCondGrad_ResourceVarInFunctionCall(self):
@def_function.function
def foo(x, var):
return x + var.sparse_read([1])[0]
def body(i, x):
return (i + 1, control_flow_ops.cond(
math_ops.equal(i % 2, 0),
lambda: foo(x, var1),
lambda: foo(x, var2)))
@def_function.function
def bar(var1, var2):
r = control_flow_ops.while_loop(
lambda i, _: i < 4, body, [0, 0.0])
return gradients_impl.gradients(r, [var1, var2])
var1 = resource_variable_ops.ResourceVariable([1., 2., 3.])
var2 = resource_variable_ops.ResourceVariable([4., 5.])
self.evaluate(variables.global_variables_initializer())
grads = self.evaluate(bar(var1, var2))
self.assertAllEqual(gradient_checker_v2._to_numpy(grads[0]), [0., 2., 0.])
self.assertAllEqual(gradient_checker_v2._to_numpy(grads[1]), [0., 2.])
@test_util.run_deprecated_v1
def testWhileGrad_ResourceVarSparseRead(self):
# NOTE(skyewm): this test is interesting because the gradient is the
# aggregation result of IndexedSlices and Tensors.
var = resource_variable_ops.ResourceVariable(np.ones(5),
dtype=dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, x * math_ops.reduce_sum(var.sparse_read([1, 3]))),
[0, constant_op.constant(1.0)])[1]
grad = gradients_impl.gradients(r, var)[0]
self.evaluate(variables.global_variables_initializer())
grad_val = self.evaluate(grad)
arr = gradient_checker_v2._to_numpy(grad_val)
self.assertAllEqual(arr, [0., 12., 0., 12., 0.])
@test_util.run_deprecated_v1
def testWhileGrad_MultiResourceVarSparseRead(self):
# NOTE(skyewm): this test is interesting because the gradient is the
# aggregation result of IndexedSlices and Tensors.
var1 = resource_variable_ops.ResourceVariable(np.ones(5),
dtype=dtypes.float32)
var2 = resource_variable_ops.ResourceVariable(np.ones(3),
dtype=dtypes.float32)
x1_init = constant_op.constant([0., 0.])
x2_init = constant_op.constant(1.)
x3_init = constant_op.constant(1.)
def body(i, unused_x1, x2, x3):
y1 = var1.sparse_read([1, 3])
y2 = x2 * 2
y3 = x3 * math_ops.reduce_sum(var2.sparse_read([0]))
return i + 1, y1, y2, y3
r = control_flow_ops.while_loop(
lambda i, x1, x2, x3: i < 3, body,
[0, x1_init, x2_init, x3_init])[1:]
var1_grad, var2_grad = gradients_impl.gradients(r, [var1, var2])
self.evaluate(variables.global_variables_initializer())
var1_grad_val = self.evaluate(var1_grad)
var2_grad_val = self.evaluate(var2_grad)
self.assertAllEqual(gradient_checker_v2._to_numpy(var1_grad_val),
[0., 1., 0., 1., 0.])
self.assertAllEqual(gradient_checker_v2._to_numpy(var2_grad_val),
[3., 0., 0.])
@test_util.run_deprecated_v1
def testWhileGrad_Gather(self):
# NOTE(skyewm): this test is interesting because the gather gradient
# function returns an IndexedSlices.
x = constant_op.constant([1., 1., 1., 1., 1.])
y = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, x + array_ops.gather(x, [0])),
[0, x[:1]])[1]
z = y * 3.0
grad = gradients_impl.gradients(z, x)[0]
self.assertEqual(self.evaluate(y), 8.)
self.assertAllEqual(self.evaluate(grad), [24., 0., 0., 0., 0.])
@test_util.run_deprecated_v1
def testWhileGrad_GatherNoFanOut(self):
# NOTE(skyewm): this test is interesting because the gather gradient
# function returns an IndexedSlices.
x = constant_op.constant([1., 1., 1., 1., 1.])
y = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, array_ops.gather(x, [0])),
[0, x[:1]])[1]
z = y * 3.0
grad = gradients_impl.gradients(z, x)[0]
self.assertEqual(self.evaluate(y), 1.)
self.assertAllEqual(self.evaluate(grad), [3., 0., 0., 0., 0.])
@test_util.run_v1_only("b/120545219")
def testWhileGradInCond(self):
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def fn1():
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)[0]
r = control_flow_ops.cond(math_ops.less(1, 2), fn1, lambda: x)
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.disable_control_flow_v2("b/116340060")
@test_util.run_v1_only("b/120545219")
def testGradInWhileWrtInitialLoopVal(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=(), name="x")
y = x + 1
def body(i, v):
z = v * 2
return i + 1, gradients_impl.gradients(z, x)[0]
with self.assertRaisesRegexp(
ValueError,
"Cannot compute gradient inside while loop with respect to op 'x'. "
"We do not support taking the gradient wrt or through the initial "
"value of a loop variable. Gradients can be computed through "
"loop invariants or wrt the input parameters to the loop body."):
control_flow_ops.while_loop(lambda i, x: i < 3, body, [0, y])
@test_util.run_v1_only("b/120545219")
def testWhileGradInWhile(self):
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def b1(n):
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.while_loop(lambda n: n < 6.0, b1, [n],
[tensor_shape.unknown_shape()])
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.run_v1_only("b/120545219")
def testCondGradInNestedWhiles(self):
def outer_body(i, x):
_, x = control_flow_ops.while_loop(
lambda j, x: j < 3, inner_body, [0, 0.0])
return i + 1, x
def inner_body(j, x):
y = control_flow_ops.cond(math_ops.less(x, 1), lambda: 2 * x, lambda: x)
return j + 1, gradients_impl.gradients(y, x)[0]
i, x = control_flow_ops.while_loop(lambda i, x: i < 3, outer_body, [0, 0.0])
with self.cached_session() as sess:
i_val, x_val = self.evaluate([i, x])
self.assertEqual(i_val, 3)
self.assertAllClose(x_val, 1.0)
@test_util.run_gpu_only
def testGpuResourceAccess(self):
with ops.device(test.gpu_device_name()):
var = resource_variable_ops.ResourceVariable(constant_op.constant(3.0))
@def_function.function
def foo():
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, control_flow_ops.cond(
constant_op.constant(True),
lambda: x + var,
lambda: x)),
[0, 0.0])[1]
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(foo()), 9.0)
def testNestedResourceAccess(self):
var = resource_variable_ops.ResourceVariable(constant_op.constant(3.0))
@eager_function.defun
def test_fn():
x = constant_op.constant(0.0)
r = control_flow_ops.while_loop(
# Outer loop condition
lambda i, y: i < 2,
# Outer loop body
lambda i, y: (i + 1, y + control_flow_ops.cond(
constant_op.constant(True),
# True branch
lambda: control_flow_ops.while_loop(
# Inner loop condition
lambda j, z: j < 3,
# Inner loop body
lambda j, z: (j + 1, z + math_ops.square(var)),
# Inner initial loop value
[0, y])[1],
# False branch
lambda: (0.0))),
# Outer initial loop value
[0, x])[1]
grad = gradients_impl.gradients(r, x)[0]
return r, grad
self.evaluate(variables.global_variables_initializer())
r, grad = self.evaluate(test_fn())
# 2 * 3 * 3^2
self.assertEqual(r, 81.0)
# v1 control flow gets the wrong answer!!!
# Gradient computation:
# f(x) = x + 3^2
# inner_loop(x) = f(f(f(x))) = x + 3*3^2 = x + 27
# g(x) = x + inner_loop(x) = 2x + 27
# outer_loop(x) = g(g(x)) = 4x + 81
# outer_loop'(x) = 4
# Note that v1 control flow gets 4.0 as well if the cond is removed.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(grad, 4.0)
def testWhile_NestedInput(self):
with self.cached_session() as sess:
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, lv2):
lv0 = named(a=lv0.a + 1, b=lv0.b)
lv1 = (lv1[0] + 1, lv1[1])
lv2 += 2
return [lv0, lv1, lv2]
r = control_flow_ops.while_loop(c, b, loop_vars)
self.assertTrue(isinstance(r, list))
self.assertTrue(isinstance(r[0], named))
self.assertTrue(isinstance(r[1], tuple))
self.assertTrue(isinstance(r[2], ops.Tensor))
r_flattened = nest.flatten(r)
self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],
self.evaluate(r_flattened))
@test_util.run_v1_only("b/120545219")
def testWhile_NestedBadArityFails(self):
with self.cached_session():
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, _):
return [lv0, lv1]
with self.assertRaisesRegexp(ValueError, "the same number of elements"):
control_flow_ops.while_loop(c, b, loop_vars)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ys_xs(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.add(x, y)
x1 = math_ops.multiply(x, y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)
r = gradients_impl.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0])
r = gradients_impl.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0])
r = gradients_impl.gradients([rx], x)
self.assertAllClose(295.0, r[0])
r = gradients_impl.gradients([rx], y)
self.assertAllClose(120.0, r[0])
@test_util.run_deprecated_v1
def testWhileGrad_Dependency(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 10)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
ri, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
r = gradients_impl.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0])
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0])
@test_util.disable_control_flow_v2("b/116355153 (back_prop flag)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_NoGradient(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], back_prop=False)
r = math_ops.add(r, v)
r = gradients_impl.gradients(r, v)
self.assertAllClose(1.0, r[0])
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_NoDependency(self):
with self.cached_session() as sess:
variable = variables.Variable(array_ops.ones([2, 3]))
duration = array_ops.zeros([], dtype=dtypes.int32)
def cond(duration, tensor, _):
del tensor
return duration < 10
def body(duration, tensor, _):
return (duration + 1, tensor, tensor)
loop_vars = [duration, variable, variable]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[2])
grad = gradients_impl.gradients(cost, [variable])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
@test_util.run_deprecated_v1
def testWhileGrad_Const(self):
with self.cached_session() as sess:
c0 = constant_op.constant(0.0, name="c0")
c1 = constant_op.constant(1.0, name="c1")
duration = constant_op.constant(0, name="t")
def cond(duration, _):
return duration < 1
def body(duration, _):
return duration + 1, c1
loop_vars = [duration, c0]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[1])
grad = gradients_impl.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_SerialTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, rx = control_flow_ops.while_loop(c, b, [i, rx], parallel_iterations=1)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0])
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ParallelTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, r1 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, r2 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
rx = math_ops.add(r1, r2)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(64.0, r[0])
@test_util.run_v1_only("b/120545219")
def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(1.0, name="y")
c = lambda i, *_: math_ops.less(i, 1, name="cond_less")
def b(i, xi, yi):
# return (i + 1, xi, xi + yi)
return (math_ops.add(i, 1, name="inc"), array_ops.identity(
xi, name="xi"), math_ops.add(xi, yi, name="xi_plus_yi"))
_, x_f, y_f = control_flow_ops.while_loop(c, b, [i, x, y])
with ops.control_dependencies([x_f]):
y_f_d = array_ops.identity(y_f, name="y_f_d")
self.assertAllClose(2.0, self.evaluate(y_f_d)) # y_f_d = 1.0 + 1.0
g = gradients_impl.gradients([y_f_d], [x])[0]
self.assertTrue(g is not None)
self.assertAllClose(1.0,
self.evaluate(g)) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
c = lambda x: math_ops.less(x, 4.0)
b = lambda x: math_ops.multiply(x, 2.0)
return control_flow_ops.while_loop(c, b, [s])
c = lambda x: math_ops.less(x, 2.0)
b = lambda x: math_ops.multiply(inner_loop(x), 2.0)
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(8.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testNestedWhileGrad_SerialInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(256.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileGrad_ParallelInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1])
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testNestedWhileGrad_ParallelIterations(self):
# Make sure the stack pushes and pops of an inner loop are executed in
# the sequential order of the iterations of its outer loop.
with self.cached_session() as sess:
def inner_loop(t):
fn = lambda n: n + math_ops.square(var)
return map_fn.map_fn(fn=fn, elems=t, parallel_iterations=10)
def outer_loop(inp):
return map_fn.map_fn(
fn=inner_loop, elems=inp, parallel_iterations=10)
var = variables.Variable(constant_op.constant(3.0))
inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
res = outer_loop(inp)
optimizer = adam.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))
self.evaluate(variables.global_variables_initializer())
self.evaluate(train_op)
self.assertAllClose(2.999, var.read_value())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = ops.convert_to_tensor(2.0, name="v")
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileCondGrad_Simple(self):
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
@test_util.run_deprecated_v1
def testWhileCondGrad_UnknownShape(self):
with self.cached_session() as sess:
v = array_ops.placeholder(dtypes.float32)
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
@test_util.run_deprecated_v1
def testWhileGrad_Concat(self):
with self.cached_session() as sess:
x = variable_scope.get_variable("x", initializer=[[1., 2.]])
i0 = constant_op.constant(0)
h0 = array_ops.zeros([0, 2])
def condition(i, _):
return i < 2
def body(i, h):
return i + 1, array_ops.concat([h, x], 0)
_, h = control_flow_ops.while_loop(
condition, body, [i0, h0],
[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
s = math_ops.reduce_sum(h)
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
op = optimizer.minimize(s)
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
self.assertAllClose([[0.98000002, 1.98000002]], self.evaluate(x))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileWithRefsWithGradients_1(self):
with self.cached_session() as sess:
x = variables.VariableV1(0.)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 10)
self.assertEqual(x.dtype, dtypes.float32_ref)
def body(i, x):
self.assertEqual(x.dtype, dtypes.float32_ref)
return [i + 1, gen_array_ops.ref_identity(x)]
r = control_flow_ops.while_loop(c, body, [i, x], parallel_iterations=5)
grad_ys = [variables.VariableV1(73)._ref()] # pylint: disable=protected-access
grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.float32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
@test_util.deprecated_graph_mode_only
def testWhileGrad_IndexedSlices(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.deprecated_graph_mode_only
def testWhileGrad_SparseTensor(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.deprecated_graph_mode_only
def testCallGradInLoop(self):
with self.cached_session() as sess:
i0 = constant_op.constant(0)
params = constant_op.constant(5.0)
params_1 = math_ops.square(params)
def c(i, _):
return i < 10
def b(i, x):
data = constant_op.constant([1.0, 2.0, 3.0])
data = math_ops.multiply(data, params_1)
x1 = x + gradients_impl.gradients(data, params)[0]
return i + 1, x1
output_grad = control_flow_ops.while_loop(
c, b, [i0, constant_op.constant(0.0)])
self.assertAllClose(600.0, self.evaluate(output_grad)[1])
@test_util.run_deprecated_v1
def testWhileAndTensorArray(self):
with self.cached_session() as sess:
param = constant_op.constant(2.0)
n0 = constant_op.constant(0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
def c(i, _):
return i < 10
def b(i, y):
return [
i + 1,
map_fn.map_fn(lambda x: math_ops.multiply(x, param), y)
]
r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)
r = gradients_impl.gradients(r, param)[0]
self.assertAllClose(107520.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileAndTensorArray(self):
n = constant_op.constant(3.0)
def Body(row, ta):
def InnerBody(row, col, ta):
# Note: row and col are 1-based.
ta = ta.write(
math_ops.cast(n * (row - 1.) + col - 1., dtypes.int32), row * col)
return row, col + 1., ta
ta = control_flow_ops.while_loop(
lambda _, col, _1: col <= n,
InnerBody, [row, constant_op.constant(1.), ta],
return_same_structure=False)[2]
return row + 1., ta
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=9)
ta = control_flow_ops.while_loop(
lambda row, _: row <= n,
Body, [constant_op.constant(1.), ta],
return_same_structure=False)[1]
output = array_ops.reshape(ta.stack(), [3, 3])
self.assertAllEqual(
self.evaluate(output), [[1., 2., 3.], [2., 4., 6.], [3., 6., 9.]])
# TODO(b/117675481): This does not work with current TA. Enable with new TA.
# grad = gradients_impl.gradients(output, [n])
# self.assertEqual(self.evaluate(grad), 3.5)
@test_util.run_deprecated_v1
def testWhileGrad_StopGrad(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.square(y)
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertEqual(136.0, self.evaluate(r))
r = gradients_impl.gradients(ry, y)[0]
self.assertEqual(32.0, self.evaluate(r))
r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.square(rx)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]
self.assertEqual(168.0, self.evaluate(r))
r = gradients_impl.gradients(
math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]
self.assertEqual(136.0, self.evaluate(r))
r = gradients_impl.gradients(
math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]
self.assertEqual(32.0, self.evaluate(r))
@test_util.run_deprecated_v1
@test_util.disable_control_flow_v2("b/118712257")
def testWhileGrad_StopGradInside(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertAllClose(0.0, self.evaluate(r))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose(156.0, self.evaluate(r))
@test_util.run_deprecated_v1
@test_util.disable_control_flow_v2("b/118712257")
def testWhileGrad_StopGradInsideNoShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y, name="stopped"))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]}
self.assertAllClose([0.0, 0.0], sess.run(r, feed_dict=feed_dict))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose([156.0, 400.0], sess.run(r, feed_dict=feed_dict))
name = "gradients/while/stopped_grad"
all_ops = x.graph.get_operations()
self.assertFalse(any(name in op.name for op in all_ops))
@test_util.run_deprecated_v1
def testWhileGradGradFail(self):
theta = variables.Variable(initial_value=1.)
def fn(prev, x):
return prev + x * theta
result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
grad_theta = gradients_impl.gradients(result, theta)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
with self.assertRaisesRegexp(TypeError, "Second-order gradient"):
gradients_impl.gradients(grad_theta, theta)
grad_theta_stopped = array_ops.stop_gradient(grad_theta)
gradients_impl.gradients(grad_theta_stopped, theta)
@test_util.run_deprecated_v1
def testStopGradOnWhileGrad(self):
with self.cached_session():
x = constant_op.constant(2.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x: math_ops.less(x, 100.0)
b = lambda x: math_ops.multiply(x, y)
rx = control_flow_ops.while_loop(c, b, [x])
rg = gradients_impl.gradients(rx, y)[0]
rg = array_ops.stop_gradient(rg)
r = math_ops.add(math_ops.square(y), rx)
r = math_ops.add(r, rg)
r = gradients_impl.gradients(r, y)[0]
self.assertEqual(388.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_deprecated_v1
def testWhileGradientWithNontrainablePath1(self):
q = variables.Variable([7., 8.])
def cond(_, y):
del y
return False
def body(x, _):
return x, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
self.evaluate(q.initializer)
self.assertAllClose([0., 0.], self.evaluate(dy_dq))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileGradientWithNontrainablePath2(self):
q = variables.Variable([7., 8.])
def cond(_, y):
return math_ops.equal(y, 0.)
def body(x, _):
zero = constant_op.constant(0, dtype=dtypes.int64)
return zero, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
self.evaluate(q.initializer)
self.assertAllClose([1., 1.], self.evaluate(dy_dq))
@test_util.run_v1_only("b/120545219")
def testIssue16504(self):
c = constant_op.constant(np.arange(100), dtype=dtypes.float32)
w = variables.Variable(
initial_value=np.ones(100), dtype=dtypes.float32) / 100
k = variables.Variable(0, dtype=dtypes.int32)
chg_w = constant_op.constant(np.inf, dtype=dtypes.float32)
def cond(k, _, chg_w):
return math_ops.logical_and(k < 10, chg_w > 1e-3)
def body(k, w, chg_w):
grad, = gradients_impl.gradients(-math_ops.reduce_sum(w * c), w)
w_n = w * math_ops.exp(-0.1 * grad)
w_n /= math_ops.reduce_sum(w_n)
chg_w = (
math_ops.reduce_sum(math_ops.abs(w_n - w)) / math_ops.reduce_sum(
math_ops.abs(w)))
return k + 1, w_n, chg_w
_, w, _ = control_flow_ops.while_loop(cond, body, [k, w, chg_w])
grad, = gradients_impl.gradients(w, c)
self.assertIsNotNone(grad)
@test_util.run_v1_only("b/120545219")
def testStopGradMultiFlows(self):
with self.cached_session():
def body(i, y, r):
x = variable_scope.get_variable(
"x",
shape=(),
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
y *= x
return [i + 1, y, r + math_ops.reduce_sum(y)]
i0 = constant_op.constant(0)
y0 = array_ops.ones(5)
r0 = constant_op.constant(0.0)
cond = lambda i, y, r: i < 1
_, _, r = control_flow_ops.while_loop(
cond, body, [i0, y0, r0], back_prop=True)
vars_ = variables.global_variables()
grads = linalg_ops.norm(gradients_impl.gradients(r, vars_)[0])
z = math_ops.add(r, array_ops.stop_gradient(math_ops.reduce_sum(grads)))
result = gradients_impl.gradients(z, vars_)[0]
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(result))
@test_util.run_v1_only("b/120545219")
def testOneValueCond(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
one = ops.convert_to_tensor(1, name="one")
two = ops.convert_to_tensor(2, name="two")
p = math_ops.greater_equal(c, 1)
i = control_flow_ops.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, ops.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
@test_util.run_deprecated_v1
def testExampleCond(self):
with self.cached_session():
x = ops.convert_to_tensor([-2.0, 2.0], name="x")
d = array_ops.placeholder(dtypes.int32, shape=[])
def l2():
return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x)))
def l1():
return math_ops.reduce_sum(math_ops.abs(x))
i = control_flow_ops.cond(math_ops.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
@test_util.run_v1_only("b/120545219")
def testCase(self):
with self.cached_session():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
{
x < y: f1,
x > z: f2
}, default=f3, exclusive=True)
self.assertAllEqual(r1, 17)
r2 = control_flow_ops.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2, 23)
# Duplicate events can happen, first one is selected
r3 = control_flow_ops.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3, 17)
# Duplicate events cause an error if exclusive = True
r4 = control_flow_ops.case(
[(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError("Input error:"):
self.evaluate(r4)
# Check that the default is called if none of the others are
r5 = control_flow_ops.case({x > y: f1}, default=f3)
self.assertAllEqual(r5, -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
ran_once[ix] = True
return constant_op.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
r6 = control_flow_ops.case(
[(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=lambda: constant_op.constant(2))
self.assertAllEqual(r6, 0)
@test_util.run_v1_only("b/120545219")
def testCaseSideEffects(self):
with self.cached_session() as sess:
v0 = variables.Variable(-1)
v1 = variables.Variable(-1)
v2 = variables.Variable(-1)
a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0)
b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1)
c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2)
x = constant_op.constant(1)
y = constant_op.constant(2)
r0 = control_flow_ops.case(
((x < y, a), (x > y, b)), default=c, exclusive=True)
r1 = control_flow_ops.case(
((x > y, a), (x < y, b)), default=c, exclusive=True)
r2 = control_flow_ops.case(
((x > y, a), (x > y, b)), default=c, exclusive=True)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, self.evaluate(r2))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1, -1, 2])
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, self.evaluate(r1))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1, 1, -1])
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, self.evaluate(r0))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [0, -1, -1])
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testOneOpCond(self):
with self.cached_session():
v = variables.Variable(0)
c = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
two = ops.convert_to_tensor(2)
p = math_ops.greater_equal(c, 1)
def a():
return state_ops.assign(v, one)
def b():
return state_ops.assign(v, two)
i = control_flow_ops.cond(p, a, b)
self.assertTrue(isinstance(i, ops.Tensor))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(v))
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, self.evaluate(v))
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, self.evaluate(v))
@test_util.run_v1_only("b/120545219")
def testWithOpsDependencies(self):
with self.cached_session() as sess:
v = variables.VariableV1(0.0)
c = constant_op.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v._ref(), # pylint: disable=protected-access
dependencies=[v.initializer])
c_val, real_v_val = self.evaluate([c, real_v])
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
@test_util.run_v1_only("b/120545219")
def testWithTensorDependencies(self):
with self.cached_session():
v = variables.VariableV1(0.0)
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v)
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, self.evaluate(c2_with_c1_dep))
# Ensure that 'v' is initialized
self.assertAllClose(0.0, self.evaluate(v))
@test_util.run_v1_only("b/120545219")
def testWithIndexedSlicesDependencies(self):
with self.cached_session():
v = variables.VariableV1(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = ops.IndexedSlices(v, constant_op.constant([1]))
gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values,
v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(gather_v_at_1)
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]],
self.evaluate(gather_v_at_1_after_init))
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
self.evaluate(v))
def testDependenciesDevice(self):
with ops.Graph().as_default():
# device set on tensor => same device on dep.
with ops.device("/job:ps"):
vd = variables.VariableV1([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
vnod = variables.VariableV1([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = variables.VariableV1([0.0], name="vdef")
with ops.device("/job:worker/device:GPU:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testGroup(self):
with self.cached_session() as sess:
v1 = variables.VariableV1([0.0])
v2 = variables.VariableV1([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v1)
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = self.evaluate([v1, v2])
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
@test_util.run_v1_only("b/120545219")
def testGroupEmpty(self):
op = control_flow_ops.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
@test_util.run_deprecated_v1
def testMergeShapes(self):
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, None])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
@test_util.run_v1_only("b/120545219")
def testRefSelect(self):
index = array_ops.placeholder(dtypes.int32)
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
v1 = variables.VariableV1(p1, validate_shape=False)
v2 = variables.VariableV1(p2, validate_shape=False)
v3 = variables.VariableV1(p3, validate_shape=False)
self.assertIs(None, v1.get_shape().ndims)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
v1 = variables.VariableV1([[1, 2]])
v2 = variables.VariableV1([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
v1 = variables.VariableV1([[1, 2]])
v2 = variables.VariableV1([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
v1 = variables.VariableV1([[1., 2.]])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
v2 = variables.VariableV1(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
@test_util.run_deprecated_v1
def testRunLoopTensor(self):
with self.cached_session() as sess:
tensor_list = []
def condition(t):
return t < constant_op.constant(5)
def body(_):
tensor_list.append(constant_op.constant(5))
return constant_op.constant(10)
result = control_flow_ops.while_loop(condition, body,
[constant_op.constant(4)])
self.assertEqual(10, self.evaluate(result))
# Ensure that we cannot run a tensor that escapes the loop body
# accidentally.
with self.assertRaises(ValueError):
sess.run(tensor_list[0])
@test_util.run_v1_only("b/120545219")
def testWhilePyFuncBasic(self):
def func(x):
return np.square(x)
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i, v: i < 4,
lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]],
[constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(self.evaluate(r[1]), 65536.0)
@test_util.run_v1_only("b/120545219")
def testWhileFuncBasic(self):
@function.Defun(dtypes.float32)
def func(x):
return math_ops.square(math_ops.square(x))
with self.cached_session():
x = constant_op.constant(2.0, dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, v: i < 2, lambda i, v: [i + 1, func(v)],
[constant_op.constant(0), x],
[tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()])
grad = gradients_impl.gradients(r, x)[0]
self.assertEqual(self.evaluate(r[1]), 65536.0)
self.assertEqual(self.evaluate(grad), 524288.0)
# while_v2 does not have stacks.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(
len([op for op in x.graph.get_operations() if op.type == "StackV2"
]), 1)
@test_util.run_v1_only("b/120545219")
def testQIntSwitchMerge(self):
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
constant_qint = constant_op.constant(np.array([42]), dtypes.qint8)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.switch(constant_qint, cond)
result = control_flow_ops.merge([v_f, v_t])
self.evaluate(result)
@test_util.run_v1_only("b/120545219")
def testQIntRefSwitchMerge(self):
with self.cached_session(use_gpu=test.is_gpu_available()) as sess:
var_qint = gen_state_ops.variable(
shape=[1], dtype=dtypes.qint8, name="v", container="", shared_name="")
assign_op = state_ops.assign(
var_qint, constant_op.constant(np.array([42]), dtypes.qint8))
self.evaluate(assign_op)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.ref_switch(var_qint, cond)
result = control_flow_ops.ref_merge([v_f, v_t])
self.evaluate(result)
@test_util.run_v1_only("b/120545219")
def testUInt64SwitchMerge(self):
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
constant_uint64 = constant_op.constant(np.array([42]), dtypes.uint64)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.switch(constant_uint64, cond)
result = control_flow_ops.merge([v_f, v_t])
self.evaluate(result)
@test_util.run_deprecated_v1
def testQIntArgAndRet(self):
@function.Defun(dtypes.qint8)
def func(x):
return x
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
qint = constant_op.constant(np.array([42]), dtypes.qint8)
result = func(qint)
self.evaluate(result)
def testSparseIdentity(self):
st1 = sparse_tensor.SparseTensor([[0, 5]], ['x'], [10, 10])
st2 = control_flow_ops._Identity(st1)
self.assertAllEqual(st1.indices, st2.indices)
self.assertAllEqual(st1.values, st2.values)
self.assertAllEqual(st1.dense_shape, st2.dense_shape)
def testSparseEnterExit(self):
st1 = sparse_tensor.SparseTensor([[0, 5]], ['x'], [10, 10])
st2 = control_flow_ops._Enter(st1, "foo_1")
st3 = control_flow_ops.exit(st2)
self.assertAllEqual(st1.indices, st3.indices)
self.assertAllEqual(st1.values, st3.values)
self.assertAllEqual(st1.dense_shape, st3.dense_shape)
def _buildWhileWithShapeInvariants(self, shape_invariants):
r = constant_op.constant([1, 2])
def cond(_):
return False
def body(_):
return constant_op.constant([1])
return control_flow_ops.while_loop(
cond, body, [r], shape_invariants=shape_invariants)
def testWhileOutputShapeWithShapeInvariantsUnknownRank(self):
@def_function.function
def runTest():
while_output = self._buildWhileWithShapeInvariants(
[tensor_shape.TensorShape(None)])
self.assertIsNone(while_output.shape.rank)
runTest()
def testWhileOutputShapeWithShapeInvariantsPartialShape(self):
@def_function.function
def runTest():
while_output = self._buildWhileWithShapeInvariants(
[tensor_shape.TensorShape([None])])
self.assertAllEqual(while_output.shape.as_list(), [None])
runTest()
class ControlFlowContextCheckTest(test.TestCase):
def _getWhileTensor(self):
"""Creates and returns a tensor from a while context."""
tensor = []
def body(i):
if not tensor:
tensor.append(constant_op.constant(1))
return i + tensor[0]
control_flow_ops.while_loop(lambda i: i < 10, body, [0])
return tensor[0]
def _getCondTensor(self):
cond_tensor = []
def true_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
return cond_tensor[0]
@test_util.run_v1_only("b/120545219")
def testInvalidContext(self):
# Accessing a while loop tensor outside of control flow is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while/Const_1' as input to 'Add' because 'while/Const_1' "
"is in a while loop. See info log for more details."):
math_ops.add(1, while_tensor)
@test_util.run_v1_only("b/120545219")
def testInvalidContextInCond(self):
# Accessing a while loop tensor in cond is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError, "Cannot use 'while/Const_1' as input to 'cond/Add' because "
"'while/Const_1' is in a while loop. See info log for more details."):
# TODO(skyewm): this passes if we return while_tensor directly instead
# of using it as input to another op.
control_flow_ops.cond(
math_ops.less(1, 2), lambda: math_ops.add(1, while_tensor),
lambda: constant_op.constant(0))
@test_util.run_v1_only("b/120545219")
def testInvalidContextInWhile(self):
# Accessing a while loop tensor in a different while loop is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while/Const_1' as input to 'while_1/Add' because they are "
"in different while loops. See info log for more details."):
control_flow_ops.while_loop(lambda i: i < 10,
lambda x: math_ops.add(1, while_tensor), [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while/Const_1' as input to 'while_2/NextIteration' "
"because they are in different while loops. See info log for more "
"details."):
control_flow_ops.while_loop(lambda i: i < 10, lambda i: while_tensor, [0])
def testValidCondContext(self):
# Accessing a tensor from a cond context is OK (although dangerous).
cond_tensor = self._getCondTensor()
math_ops.add(1, cond_tensor)
def testValidCondContextBranches(self):
# Accessing a tensor from a cond context from the other branch's cond
# context is OK (although dangerous).
cond_tensor = []
def branch_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(math_ops.less(1, 2), branch_fn, branch_fn)
@test_util.run_v1_only("b/120545219")
def testValidWhileContext(self):
# Accessing a tensor in a nested while is OK.
def body(_):
c = constant_op.constant(1)
return control_flow_ops.while_loop(lambda i: i < 3, lambda i: i + c, [0])
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
@test_util.run_v1_only("b/120545219")
def testValidNestedContexts(self):
# Accessing a tensor from a cond context in a while context, all inside an
# outer while context, is OK.
def body(_):
cond_tensor = self._getCondTensor()
# Create another cond containing the while loop for good measure
return control_flow_ops.cond(
math_ops.less(1, 2),
lambda: control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + cond_tensor, [0]),
lambda: constant_op.constant(0))
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
@test_util.run_v1_only("b/120545219")
def testInvalidNestedContexts(self):
# Accessing a tensor from a while context in a different while context, all
# inside a cond context, is illegal.
def true_fn():
while_tensor = self._getWhileTensor()
return control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + while_tensor, [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'cond/while/Const_1' as input to 'cond/while_1/add' because"
" they are in different while loops. See info log for more details."):
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
class TupleTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testTensors(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.VariableV1([1.0])
add1 = math_ops.add(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
2.0)
v2 = variables.VariableV1([10.0])
add2 = math_ops.add(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v1)
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v2)
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], self.evaluate(t1))
self.assertAllClose([10.0], self.evaluate(v2))
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], self.evaluate(t2))
self.assertAllClose([1.0], self.evaluate(v1))
@test_util.run_v1_only("b/120545219")
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.VariableV1(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
v2 = variables.VariableV1(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = array_ops.gather(st1.values, st1.indices)
g2 = array_ops.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v1)
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v2)
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], self.evaluate(g1))
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
self.evaluate(v2))
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], self.evaluate(g2))
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
self.evaluate(v1))
def testAcceptTensorsAsControlInputs(self):
with self.cached_session():
var = variables.VariableV1(0)
assign = state_ops.assign(var, 1)
t, = control_flow_ops.tuple(
[constant_op.constant(0)], control_inputs=[assign])
# Should trigger the assign.
self.evaluate(t)
self.assertEquals(1, self.evaluate(var))
class AssertTest(test.TestCase):
@test_util.run_deprecated_v1
def testGuardedAssertDoesNotCopyWhenTrue(self):
if test_util.is_gpu_available():
self.skipTest("b/128646478 fails in opensource")
with self.session(use_gpu=True) as sess:
with ops.device(test.gpu_device_name()):
value = constant_op.constant(1.0)
with ops.device("/cpu:0"):
true = constant_op.constant(True)
guarded_assert = control_flow_ops.Assert(true, [value], name="guarded")
unguarded_assert = gen_logging_ops._assert(
true, [value], name="unguarded")
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
guarded_metadata = config_pb2.RunMetadata()
sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
unguarded_metadata = config_pb2.RunMetadata()
sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
guarded_nodestat_names = [
n.node_name
for d in guarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
unguarded_nodestat_names = [
n.node_name
for d in unguarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
guarded_memcpy_nodestat_names = [
n for n in guarded_nodestat_names if "MEMCPYDtoH" in n
]
unguarded_memcpy_nodestat_names = [
n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n
]
if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
# A copy was performed for the unguarded assert
self.assertLess(0, len(unguarded_memcpy_nodestat_names),
str(unguarded_nodestat_names))
# No copy was performed for the guarded assert
self.assertEqual([], guarded_memcpy_nodestat_names)
class WhileOpBenchmark(test.Benchmark):
"""Evaluate the performance of while_loop op."""
def _getInitVariables(self):
batch_size = 10
image_size = 256
kernel_size = 3
depth = 16
init_step = constant_op.constant(-1)
image = variable_scope.get_variable(
"image",
initializer=random_ops.random_normal(
[batch_size, image_size, image_size, depth],
dtype=dtypes.float32,
stddev=1e-1))
kernel = variable_scope.get_variable(
"weights",
initializer=random_ops.truncated_normal(
[kernel_size, kernel_size, depth, depth],
dtype=dtypes.float32,
stddev=1e-1))
return init_step, image, kernel
def _runOneBenchmark(self,
default_device,
num_iters=10,
static_unroll=False,
steps=10):
"""Evaluate the while loop performance.
Args:
default_device: The default device to run all ops except the loop_body.
loop_body is always run on GPU.
num_iters: Number of iterations to run.
static_unroll: If true, run unrolled version; otherwise, run while_loop.
steps: Total number of repeated steps to run the loop.
Returns:
The duration of the run in seconds.
"""
def loop_body(i, x):
with ops.device("/gpu:0"):
# Always put loop body on GPU.
nx = nn_ops.conv2d(
input=x,
filter=kernel,
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC",
name="conv2d")
ni = math_ops.add(i, 1)
return ni, nx
ops.reset_default_graph()
with session.Session() as sess, ops.device(default_device):
# Get the initial id i, input x, and kernel.
i, x, kernel = self._getInitVariables()
variables.global_variables_initializer().run()
if static_unroll:
for _ in xrange(steps):
i, x = loop_body(i, x)
else:
i, x = control_flow_ops.while_loop(
lambda i, _: i < steps,
loop_body, [i, x],
parallel_iterations=steps,
swap_memory=True)
r = math_ops.reduce_sum(x)
dx, dk = gradients_impl.gradients(r, [x, kernel])
# Use group to avoid fetching back results.
r = control_flow_ops.group(dx, dk)
for _ in xrange(3):
# exclude warm up time
self.evaluate(r)
start_time = time.time()
for _ in xrange(num_iters):
self.evaluate(r)
return (time.time() - start_time) / num_iters
def benchmarkWhileOpCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_cross_device", iters=iters, wall_time=duration)
def benchmarkWhileOpSameDevicePlacement(self):
iters = 10
# Run all ops on the same GPU device.
duration = self._runOneBenchmark("gpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_same_device", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_cross_device_cpu", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollSameDevicePlacement(self):
iters = 10
# Run all ops on GPU.
duration = self._runOneBenchmark("gpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_same_device", iters=iters, wall_time=duration)
@test_util.with_control_flow_v2
class EagerTest(test.TestCase):
def testCond(self):
with context.eager_mode():
pred = math_ops.less(1, 2)
fn1 = lambda: [constant_op.constant(10)]
fn2 = lambda: [constant_op.constant(20)]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual(r.numpy(), 10)
self.assertFalse(isinstance(r, list))
# TODO(b/117279927): Re-enable once msan failure is fixed.
def DISABLED_testCondInDefun(self):
with context.eager_mode():
@eager_function.defun
def foo(pred):
# TODO(b/111124878): this only needs to output one element.
fn1 = lambda: (constant_op.constant(10), constant_op.constant(100))
fn2 = lambda: (constant_op.constant(20), constant_op.constant(200))
return control_flow_ops.cond(constant_op.constant(pred), fn1, fn2)
r = foo(True)
self.assertAllEqual(r[0].numpy(), 10)
self.assertNotIsInstance(r, list)
r = foo(False)
self.assertAllEqual(r[0].numpy(), 20)
self.assertFalse(isinstance(r, list))
def testWhileLoop(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(isum(tensor).numpy(), [46, 47, 48, 49, 50])
def testWhileLoopWithMaxIterations(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(
isum(tensor, maximum_iterations=3).numpy(),
[1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3])
@test_util.run_v1_only("b/120545219")
def testWhileWithMaximumIterationsAndSingleArgument(self):
with context.eager_mode():
tensor = constant_op.constant(0)
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [tensor], maximum_iterations=1)
self.assertEqual(1, r.numpy())
def testWithDependencies(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
t3 = control_flow_ops.with_dependencies(t1, t2)
self.assertAllEqual(t2.numpy(), t3.numpy())
def testTuple(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
tup1, tup2 = control_flow_ops.tuple([t1, t2])
self.assertAllEqual(t1.numpy(), tup1.numpy())
self.assertAllEqual(t2.numpy(), tup2.numpy())
@test_util.run_v1_only("b/120545219")
def testCase(self):
with context.eager_mode():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
[(x < y, f1), (x > z, f2)], default=f3, exclusive=True)
self.assertAllEqual(r1.numpy(), 17)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/control_flow_ops_py_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ExtractVolumePatches gradient."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class ExtractVolumePatchesGradTest(test.TestCase):
"""Gradient-checking for ExtractVolumePatches op."""
_TEST_CASES = [
{
'in_shape': [2, 5, 5, 5, 3],
'ksizes': [1, 1, 1, 1, 1],
'strides': [1, 2, 3, 4, 1],
},
{
'in_shape': [2, 7, 7, 7, 3],
'ksizes': [1, 3, 3, 3, 1],
'strides': [1, 1, 1, 1, 1],
},
{
'in_shape': [2, 5, 7, 6, 3],
'ksizes': [1, 3, 2, 2, 1],
'strides': [1, 1, 1, 1, 1],
},
{
'in_shape': [2, 7, 8, 6, 3],
'ksizes': [1, 2, 3, 2, 1],
'strides': [1, 2, 4, 3, 1],
},
]
@test_util.run_deprecated_v1
def testGradient(self):
# Set graph seed for determinism.
random_seed = 42
random_seed_lib.set_random_seed(random_seed)
with self.cached_session():
for test_case in self._TEST_CASES:
np.random.seed(random_seed)
in_shape = test_case['in_shape']
in_val = constant_op.constant(
np.random.random(in_shape), dtype=dtypes.float32)
for padding in ['VALID', 'SAME']:
out_val = array_ops.extract_volume_patches(
in_val, test_case['ksizes'], test_case['strides'], padding)
out_shape = out_val.get_shape().as_list()
err = gradient_checker.compute_gradient_error(in_val, in_shape,
out_val, out_shape)
print('extract_volume_patches gradient err: %.4e' % err)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testConstructGradientWithLargeVolumess(self):
batch_size = 4
planes = 8
height = 32
width = 32
ksize = 5
volumes = variable_scope.get_variable(
'inputs', (batch_size, planes, height, width, 1))
patches = array_ops.extract_volume_patches(
volumes,
ksizes=[1, ksize, ksize, ksize, 1],
strides=[1, 1, 1, 1, 1],
padding='SAME')
# Github issue: #20146
# tf.extract_volume_patches() gradient very slow at graph construction time
gradients = gradients_impl.gradients(patches, volumes)
# Won't time out.
self.assertIsNotNone(gradients)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/extract_volume_patches_grad_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
class NthElementTest(test.TestCase):
def _validateNthElement(self, inputs, dtype, n, reverse, expected_values):
np_expected_values = np.array(expected_values)
with self.cached_session(use_gpu=False) as sess:
inputs_op = ops.convert_to_tensor(inputs, dtype=dtype)
values_op = nn_ops.nth_element(inputs_op, n, reverse=reverse)
values = self.evaluate(values_op)
self.assertShapeEqual(np_expected_values, values_op)
self.assertAllClose(np_expected_values, values)
def testExample1(self):
inputs = [2.2, 4.4, 1.1, 5.5, 3.3]
self._validateNthElement(inputs, dtypes.float32, 1, False, 2.2)
self._validateNthElement(inputs, dtypes.float32, 1, True, 4.4)
def testExample2(self):
inputs = [[2.2, 4.4, 1.1], [5.5, 3.3, 6.6]]
self._validateNthElement(inputs, dtypes.float64, 2, False, [4.4, 6.6])
self._validateNthElement(inputs, dtypes.float64, 2, True, [1.1, 3.3])
def testExample3(self):
inputs = [[[2, 4, 1], [5, -3, 6]],
[[7, 9, -8], [9, 0, 4]]]
self._validateNthElement(inputs, dtypes.int32, 0, False,
[[1, -3], [-8, 0]])
self._validateNthElement(inputs, dtypes.int64, 0, True,
[[4, 6], [9, 9]])
def _testFloatLargeInput(self, input_shape):
inputs = np.random.random_sample(input_shape)
n = np.random.randint(input_shape[-1])
sort_inputs = np.sort(inputs)
expected_values = sort_inputs[..., n]
self._validateNthElement(
inputs, dtypes.float32, n, False, expected_values)
expected_values = sort_inputs[..., ::-1][..., n]
self._validateNthElement(
inputs, dtypes.float64, n, True, expected_values)
def _testIntLargeInput(self, input_shape):
inputs = np.random.randint(-1e3, 1e3, input_shape)
n = np.random.randint(input_shape[-1])
sort_inputs = np.sort(inputs)
expected_values = sort_inputs[..., n]
self._validateNthElement(
inputs, dtypes.int32, n, False, expected_values)
expected_values = sort_inputs[..., ::-1][..., n]
self._validateNthElement(
inputs, dtypes.int64, n, True, expected_values)
def _testLargeInput(self, input_shape):
self._testFloatLargeInput(input_shape)
self._testIntLargeInput(input_shape)
def testLargeInput(self):
self._testLargeInput([1])
self._testLargeInput([10])
self._testLargeInput([5, 10])
self._testLargeInput([50, 100])
self._testLargeInput([50, 10000])
self._testLargeInput([50, 10, 100])
self._testLargeInput([50, 10, 10, 100])
def _testEnumerateN(self, input_shape):
inputs = np.random.random_sample(input_shape)
sort_inputs = np.sort(inputs)
for n in range(input_shape[-1]):
expected_values = sort_inputs[..., n]
self._validateNthElement(
inputs, dtypes.float32, n, False, expected_values)
expected_values = sort_inputs[..., ::-1][..., n]
self._validateNthElement(
inputs, dtypes.float64, n, True, expected_values)
def testEnumerateN(self):
self._testEnumerateN([1])
self._testEnumerateN([10])
self._testEnumerateN([10, 10])
self._testEnumerateN([10, 10, 10])
self._testEnumerateN([10, 10, 10, 10])
@test_util.run_deprecated_v1
def testInvalidInput(self):
with self.assertRaisesRegexp(ValueError,
"at least rank 1 but is rank 0"):
nn_ops.nth_element(5, 0)
@test_util.run_deprecated_v1
def testInvalidInputAtEval(self):
with self.session(use_gpu=False):
v = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesOpError("Input must be >= 1-D"):
nn_ops.nth_element(v, 0).eval(feed_dict={v: 5.0})
@test_util.run_deprecated_v1
def testInvalidN(self):
with self.assertRaisesRegexp(ValueError,
"non-negative but is -1"):
nn_ops.nth_element([5], -1)
with self.assertRaisesRegexp(ValueError,
"scalar but has rank 1"):
nn_ops.nth_element([5, 6, 3], [1])
@test_util.run_deprecated_v1
def testInvalidNAtEval(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.session(use_gpu=False):
n = array_ops.placeholder(dtypes.int32)
values = nn_ops.nth_element(inputs, n)
with self.assertRaisesOpError("Need n >= 0, got -7"):
values.eval(feed_dict={n: -7})
@test_util.run_deprecated_v1
def testNTooLarge(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.assertRaisesRegexp(ValueError,
"must have last dimension > n = 2"):
nn_ops.nth_element(inputs, 2)
@test_util.run_deprecated_v1
def testNTooLargeAtEval(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.session(use_gpu=False):
n = array_ops.placeholder(dtypes.int32)
values = nn_ops.nth_element(inputs, n)
with self.assertRaisesOpError(r"Input must have at least n\+1 columns"):
values.eval(feed_dict={n: 2})
@test_util.run_deprecated_v1
def testGradients(self):
with self.session(use_gpu=False) as sess:
inputs = array_ops.placeholder(dtypes.float32, shape=[3, 5])
values = nn_ops.nth_element(inputs, 3)
grad = sess.run(
gradients_impl.gradients(
values, inputs, grad_ys=[[-1., 2., 5.]]),
feed_dict={inputs: [[2., -1., 1000., 3., 1000.],
[1., 5., 2., 4., 3.],
[2., 2., 2., 2., 2.],
]})
self.assertAllClose(grad[0], [[0, 0, -0.5, 0, -0.5],
[0, 0, 0, 2, 0],
[1, 1, 1, 1, 1],
])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/nth_element_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv1DTransposeTest(test.TestCase):
def testConv1DTransposeSingleStride(self):
with self.cached_session():
strides = [1, 1, 1]
# Input, output: [batch, width, depth]
x_shape = [2, 6, 3]
y_shape = [2, 6, 2]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
for n in xrange(y_shape[0]):
for w in xrange(y_shape[1]):
for c in xrange(y_shape[2]):
target = 2 * 3.0
w_in = w > 0 and w < y_shape[1] - 1
if w_in:
target += 3.0
self.assertAllClose(target, value[n, w, c])
def testConv1DTransposeSame(self):
with self.cached_session():
strides = [1, 2, 1]
# Input, output: [batch, width, depth]
x_shape = [2, 4, 3]
y_shape = [2, 8, 2]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[1]):
for w in xrange(y_shape[1]):
target = 3.0
# We add a case for locations divisible by the stride.
w_in = w % strides[1] == 0 and w > 0 and w < y_shape[1] - 1
if w_in:
target += 3.0
self.assertAllClose(target, value[n, w, k])
def testConv1DTransposeValid(self):
with self.cached_session():
strides = [1, 2, 1]
# Input, output: [batch, width, depth]
x_shape = [2, 4, 3]
y_shape = [2, 9, 2]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[1]):
for w in xrange(pad, y_shape[1] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
w_in = w % strides[1] == 0 and w > pad and w < y_shape[1] - 1 - pad
if w_in:
target += 3.0
cache_values[n, w, k] = target
# copy values in the border
cache_values[n, 0, k] = cache_values[n, 1, k]
cache_values[n, -1, k] = cache_values[n, -2, k]
cache_values[n, :, k] = cache_values[n, :, k]
self.assertAllClose(cache_values, value)
@test_util.run_deprecated_v1
def testGradient(self):
x_shape = [2, 4, 3]
f_shape = [3, 2, 3]
y_shape = [2, 8, 2]
strides = [1, 2, 1]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.cached_session():
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
print("conv1d_transpose gradient err = %g " % err)
err_tolerance = 0.0005
self.assertLess(err, err_tolerance)
def testConv1DTransposeSingleStrideNCW(self):
# `NCW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
strides = [1, 1, 1]
# Input, output: [batch, depth, width]
x_shape = [2, 3, 4]
y_shape = [2, 2, 4]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCW")
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[1]):
for w in xrange(y_shape[2]):
target = 2 * 3.0
w_in = w > 0 and w < y_shape[2] - 1
if w_in:
target += 3.0
self.assertAllClose(target, value[n, k, w])
def testConv1DTransposeSameNCW(self):
# `NCW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
strides = [1, 1, 2]
# Input, output: [batch, depth, width]
x_shape = [2, 3, 4]
y_shape = [2, 2, 8]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCW")
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[1]):
for w in xrange(y_shape[2]):
target = 3.0
# We add a case for locations divisible by the stride.
w_in = w % strides[2] == 0 and w > 0 and w < y_shape[2] - 1
if w_in:
target += 3.0
self.assertAllClose(target, value[n, k, w])
def testConv1DTransposeValidNCW(self):
# `NCW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
strides = [1, 1, 2]
# Input, output: [batch, depth, width]
x_shape = [2, 3, 4]
y_shape = [2, 2, 9]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="VALID", data_format="NCW")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[1]):
for w in xrange(pad, y_shape[2] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
w_in = w % strides[2] == 0 and w > pad and \
w < y_shape[2] - 1 - pad
if w_in:
target += 3.0
cache_values[n, k, w] = target
# copy values in the border
cache_values[n, k, 0] = cache_values[n, k, 1]
cache_values[n, k, -1] = cache_values[n, k, -2]
cache_values[n, k, :] = cache_values[n, k, :]
self.assertAllClose(cache_values, value)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/conv1d_transpose_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def _AddTest(test_class, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test_class, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test_class, test_name, fn)
class SelfAdjointEigTest(test.TestCase):
@test_util.run_deprecated_v1
def testWrongDimensions(self):
# The input to self_adjoint_eig should be a tensor of
# at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaises(ValueError):
linalg_ops.self_adjoint_eig(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.self_adjoint_eig(vector)
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
all_ops = []
with self.session(use_gpu=True) as sess:
for compute_v_ in True, False:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
if compute_v_:
e1, v1 = linalg_ops.self_adjoint_eig(matrix1)
e2, v2 = linalg_ops.self_adjoint_eig(matrix2)
all_ops += [e1, v1, e2, v2]
else:
e1 = linalg_ops.self_adjoint_eigvals(matrix1)
e2 = linalg_ops.self_adjoint_eigvals(matrix2)
all_ops += [e1, e2]
val = self.evaluate(all_ops)
self.assertAllEqual(val[0], val[2])
# The algorithm is slightly different for compute_v being True and False,
# so require approximate equality only here.
self.assertAllClose(val[2], val[4])
self.assertAllEqual(val[4], val[5])
self.assertAllEqual(val[1], val[3])
def testMatrixThatFailsWhenFlushingDenormsToZero(self):
# Test a 32x32 matrix which is known to fail if denorm floats are flushed to
# zero.
matrix = np.genfromtxt(
test.test_src_dir_path(
"python/kernel_tests/testdata/"
"self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
self.assertEqual(matrix.shape, (32, 32))
matrix_tensor = constant_op.constant(matrix)
with self.session(use_gpu=True) as sess:
(e, v) = self.evaluate(linalg_ops.self_adjoint_eig(matrix_tensor))
self.assertEqual(e.size, 32)
self.assertAllClose(
np.matmul(v, v.transpose()), np.eye(32, dtype=np.float32), atol=2e-3)
self.assertAllClose(matrix,
np.matmul(np.matmul(v, np.diag(e)), v.transpose()))
def SortEigenDecomposition(e, v):
if v.ndim < 2:
return e, v
else:
perm = np.argsort(e, -1)
return np.take(e, perm, -1), np.take(v, perm, -1)
def EquilibrateEigenVectorPhases(x, y):
"""Equilibrate the phase of the Eigenvectors in the columns of `x` and `y`.
Eigenvectors are only unique up to an arbitrary phase. This function rotates x
such that it matches y. Precondition: The coluns of x and y differ by a
multiplicative complex phase factor only.
Args:
x: `np.ndarray` with Eigenvectors
y: `np.ndarray` with Eigenvectors
Returns:
`np.ndarray` containing an equilibrated version of x.
"""
phases = np.sum(np.conj(x) * y, -2, keepdims=True)
phases /= np.abs(phases)
return phases * x
def _GetSelfAdjointEigTest(dtype_, shape_, compute_v_):
def CompareEigenVectors(self, x, y, tol):
x = EquilibrateEigenVectorPhases(x, y)
self.assertAllClose(x, y, atol=tol)
def CompareEigenDecompositions(self, x_e, x_v, y_e, y_v, tol):
num_batches = int(np.prod(x_e.shape[:-1]))
n = x_e.shape[-1]
x_e = np.reshape(x_e, [num_batches] + [n])
x_v = np.reshape(x_v, [num_batches] + [n, n])
y_e = np.reshape(y_e, [num_batches] + [n])
y_v = np.reshape(y_v, [num_batches] + [n, n])
for i in range(num_batches):
x_ei, x_vi = SortEigenDecomposition(x_e[i, :], x_v[i, :, :])
y_ei, y_vi = SortEigenDecomposition(y_e[i, :], y_v[i, :, :])
self.assertAllClose(x_ei, y_ei, atol=tol, rtol=tol)
CompareEigenVectors(self, x_vi, y_vi, tol)
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
np_dtype = dtype_.as_numpy_dtype
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
a += np.conj(a.T)
a = np.tile(a, batch_shape + (1, 1))
if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
atol = 1e-4
else:
atol = 1e-12
np_e, np_v = np.linalg.eigh(a)
with self.session(use_gpu=True):
if compute_v_:
tf_e, tf_v = linalg_ops.self_adjoint_eig(constant_op.constant(a))
# Check that V*diag(E)*V^T is close to A.
a_ev = math_ops.matmul(
math_ops.matmul(tf_v, array_ops.matrix_diag(tf_e)),
tf_v,
adjoint_b=True)
self.assertAllClose(self.evaluate(a_ev), a, atol=atol)
# Compare to numpy.linalg.eigh.
CompareEigenDecompositions(self, np_e, np_v, self.evaluate(tf_e),
self.evaluate(tf_v), atol)
else:
tf_e = linalg_ops.self_adjoint_eigvals(constant_op.constant(a))
self.assertAllClose(
np.sort(np_e, -1), np.sort(self.evaluate(tf_e), -1), atol=atol)
return Test
class SelfAdjointEigGradTest(test.TestCase):
pass # Filled in below
def _GetSelfAdjointEigGradTest(dtype_, shape_, compute_v_):
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
np_dtype = dtype_.as_numpy_dtype
def RandomInput():
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
a += np.conj(a.T)
a = np.tile(a, batch_shape + (1, 1))
return a
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(np_dtype).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
# after discarding one random input sample
_ = RandomInput()
if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
tol = 1e-2
else:
tol = 1e-7
with self.session(use_gpu=True):
def Compute(x):
e, v = linalg_ops.self_adjoint_eig(x)
# (complex) Eigenvectors are only unique up to an arbitrary phase
# We normalize the vectors such that the first component has phase 0.
top_rows = v[..., 0:1, :]
if dtype_.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
v *= phase
return e, v
if compute_v_:
funcs = [lambda x: Compute(x)[0], lambda x: Compute(x)[1]]
else:
funcs = [linalg_ops.self_adjoint_eigvals]
for f in funcs:
theoretical, numerical = gradient_checker_v2.compute_gradient(
f,
[RandomInput()],
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == "__main__":
for compute_v in True, False:
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.complex64,
dtypes_lib.complex128):
for size in 1, 2, 5, 10:
for batch_dims in [(), (3,)] + [(3, 2)] * (max(size, size) < 10):
shape = batch_dims + (size, size)
name = "%s_%s_%s" % (dtype.name, "_".join(map(str, shape)), compute_v)
_AddTest(SelfAdjointEigTest, "SelfAdjointEig", name,
_GetSelfAdjointEigTest(dtype, shape, compute_v))
_AddTest(SelfAdjointEigGradTest, "SelfAdjointEigGrad", name,
_GetSelfAdjointEigGradTest(dtype, shape, compute_v))
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Split Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
_TEST_DTYPES = (dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128)
class SplitOpTest(test.TestCase):
def _makeData(self, shape, dtype):
data = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 1j * data
return data
@test_util.run_deprecated_v1
def testShapeInference(self):
model_input = array_ops.placeholder(dtypes.float32, shape=(1, 10))
# check that we fail during static shape inference if sizes are known
with self.assertRaises(ValueError):
# pylint: disable=expression-not-assigned
array_ops.split(model_input, [4], axis=1)[0]
# pylint: enable=expression-not-assigned
model_input = array_ops.placeholder(dtypes.float32)
inp = np.zeros((1, 10))
# check that we still fail at runtime if the shapes were unknown
with self.cached_session(use_gpu=True) as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(array_ops.split(model_input, [4]), {model_input: inp})
# scalar Tensors are not permitted as num_splits
for axis in [0, -2]:
with self.cached_session(use_gpu=True) as sess:
with self.assertRaises(ValueError):
# pylint: disable=expression-not-assigned
sess.run(
array_ops.split(
array_ops.ones([4, 4]),
num_or_size_splits=constant_op.constant(2),
axis=axis))
# pylint: enable=expression-not-assigned
# test that none split dimensions remain, even if we don't know how
# the split_dim will be split, but we do know the axis
result = array_ops.split(
array_ops.ones([5, 2]), array_ops.constant([2, 1, 2]) * 1, axis=0)
self.assertEqual(result[0].shape[1], 2)
self.assertEqual(result[1].shape[1], 2)
self.assertEqual(result[2].shape[1], 2)
model_input2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
result = array_ops.split(model_input2, [2, 2], axis=0)[0]
with self.cached_session(use_gpu=True) as sess:
sess.run(result, feed_dict={model_input2: np.ones([4, 2])})
@test_util.run_deprecated_v1
def testFailWithoutExplicitNum(self):
size_splits = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
with self.session(use_gpu=True) as sess:
with self.assertRaises(ValueError) as context:
sess.run(array_ops.split(value, size_splits), {size_splits: [2, 2, 6]})
self.assertTrue("Cannot infer num from shape" in str(context.exception))
@test_util.run_in_graph_and_eager_modes
def testExplicitNum(self):
size_splits = array_ops.constant([2, 2, 6], dtype=dtypes.int32)
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Eager and Graph modes raise different exceptions
with self.assertRaises((errors_impl.InvalidArgumentError, ValueError)):
array_ops.split(value, size_splits, num=4)
r = self.evaluate(array_ops.split(value, size_splits, num=3))
self.assertAllEqual(r[0], value[0:2])
self.assertAllEqual(r[1], value[2:4])
self.assertAllEqual(r[2], value[4:])
@test_util.run_in_graph_and_eager_modes
def testListOfScalarTensors(self):
a = math_ops.cast(5, dtypes.int32)
b = math_ops.cast(6, dtypes.int32)
value = np.random.rand(11, 11)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(value, [a, b]))
self.assertAllEqual(result[0], value[0:5, :])
self.assertAllEqual(result[1], value[5:, :])
def _RunAndVerifyVariable(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(16, 25)
else:
num_split = np.random.randint(2, 8)
size_splits = np.random.randint(2, 8, num_split, dtype=np.int32)
shape[split_dim] = np.sum(size_splits)
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[slices])
def _testSpecialCasesVariable(self):
inp = np.random.rand(4, 4).astype("f")
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, [4], 0))
self.assertAllEqual(result[0], inp)
result = self.evaluate(array_ops.split(inp, [-1, 3], 0))
self.assertAllEqual(result[0], inp[0:1, :])
self.assertAllEqual(result[1], inp[1:4, :])
def _testHugeNumberOfTensorsVariable(self, dtype):
num_split = 1000
size_splits = np.random.randint(1, 3, num_split, dtype=np.int32)
shape = [3, np.sum(size_splits)]
split_dim = 1
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes
def testSpecialCasesVariable(self):
self._testSpecialCasesVariable()
for dtype in _TEST_DTYPES:
self._testHugeNumberOfTensorsVariable(dtype)
@test_util.run_in_graph_and_eager_modes
def testDegenerateVariable(self):
inp = np.random.rand(4, 4).astype("f")
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, [-1, 4], 0))
self.assertAllEqual(result[0], inp[0:0, :])
self.assertAllEqual(result[1], inp[0:4, :])
result = self.evaluate(array_ops.split(inp, [4, -1], 0))
self.assertAllEqual(result[0], inp[0:4, :])
self.assertAllEqual(result[1], inp[4:4, :])
result = self.evaluate(array_ops.split(inp, [-1, 4], 1))
self.assertAllEqual(result[0], inp[:, 0:0])
self.assertAllEqual(result[1], inp[:, 0:4])
result = self.evaluate(array_ops.split(inp, [4, -1], 1))
self.assertAllEqual(result[0], inp[:, 0:4])
self.assertAllEqual(result[1], inp[:, 4:4])
def _testGradientsSimpleVariable(self, dtype):
inp = self._makeData((4, 4), dtype)
with test_util.device(use_gpu=True):
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(inp_tensor, [1, 3], 1)
inp_grads = [
self._makeData((4, 1), dtype), self._makeData((4, 3), dtype)
]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[-1]
result = self.evaluate(grad)
self.assertAllEqual(result[:, 0:1], inp_grads[0])
self.assertAllEqual(result[:, 1:4], inp_grads[1])
@test_util.run_deprecated_v1
def testOutputShape(self):
for axis in [1, -1]:
with self.cached_session(use_gpu=True):
tensor = array_ops.placeholder(dtypes.float32, shape=[None, 12])
size_splits = [3, 7, 2]
outputs = array_ops.split(tensor, size_splits, axis)
for i, output in enumerate(outputs):
self.assertEqual(output.get_shape().as_list(), [None, size_splits[i]])
def _compare(self, x, dim, num):
np_ans = np.split(x, num, dim)
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(out))
for i in range(num):
self.assertAllEqual(np_ans[i], out[i])
self.assertShapeEqual(np_ans[i], tf_ans[i])
@test_util.run_in_graph_and_eager_modes
def testSplitRows(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 0, 4)
@test_util.run_in_graph_and_eager_modes
def testSplitCols(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 1, 4)
def _testEmpty(self, x, dim, num, expected_shape):
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(x.size, 0)
self.assertEqual(len(out), num)
for i in range(num):
self.assertEqual(out[i].shape, expected_shape)
self.assertEqual(expected_shape, tf_ans[i].get_shape())
@test_util.run_in_graph_and_eager_modes
def testEmpty(self):
# Note: np.split returns a rank-0 empty ndarray
# if the input ndarray is empty.
for dtype in _TEST_DTYPES:
inp = self._makeData((8, 0, 21), dtype)
self._testEmpty(inp, 0, 2, (4, 0, 21))
self._testEmpty(inp, 0, 4, (2, 0, 21))
self._testEmpty(inp, 1, 4, (8, 0, 21))
self._testEmpty(inp, 2, 3, (8, 0, 7))
self._testEmpty(inp, 2, 7, (8, 0, 3))
@test_util.run_in_graph_and_eager_modes
def testIdentity(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((2, 2, 2), dtype)
self._compare(inp, 0, 1)
self._compare(inp, 1, 1)
self._compare(inp, 2, 1)
@test_util.run_in_graph_and_eager_modes
def testSplitDim0(self):
for dtype in _TEST_DTYPES:
self._compare(self._makeData((6, 10, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 9), dtype), 0, 3)
def _RunAndVerify(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(0, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(9, 15)
else:
num_split = np.random.randint(2, 8)
shape[split_dim] = np.random.randint(2, 5) * num_split
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(
array_ops.split(
value=inp, num_or_size_splits=num_split, axis=split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
length = shape[split_dim] // num_split
for i in range(num_split):
slices[split_dim] = slice(offset, offset + length)
offset += length
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes
def testRandom(self):
for dtype in _TEST_DTYPES:
for _ in range(5):
self._RunAndVerify(dtype)
self._RunAndVerify(dtype, large_num_splits=True)
self._RunAndVerifyVariable(dtype)
self._RunAndVerifyVariable(dtype, large_num_splits=True)
def _testGradientsSimple(self, dtype):
inp = self._makeData((4, 4), dtype)
with self.cached_session(use_gpu=True):
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(value=inp_tensor, num_or_size_splits=4, axis=1)
inp_grads = [self._makeData((4, 1), dtype)for _ in range(4)]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[0]
result = self.evaluate(grad)
for i in range(4):
self.assertAllEqual(result[:, i:i + 1], inp_grads[i])
@test_util.run_deprecated_v1
def testGradientsAll(self):
for dtype in _TEST_DTYPES:
self._testGradientsSimple(dtype)
self._testGradientsSimpleVariable(dtype)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# split_dim greater than rank of input.
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=2)
# split dim less than -(rank of input)
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=-3)
# num_split does not evenly divide the size in split_dim.
with self.assertRaisesRegexp(ValueError, "should evenly divide"):
array_ops.split(value=[0, 1, 2, 3], num_or_size_splits=3, axis=0)
# Unknown split_dim.
splits = array_ops.split(
value=[[0, 1, 2, 3]],
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual([None, None], s.get_shape().as_list())
# Unknown split_dim and input shape.
splits = array_ops.split(
value=array_ops.placeholder(dtypes.float32),
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual(None, s.get_shape().ndims)
@test_util.run_deprecated_v1
def testVariableShapeFunction(self):
# size_splits too big
with self.assertRaises(ValueError):
array_ops.split([0, 1], [3, -1], axis=0)
# Correct inference of variable dimension
s0, s1 = array_ops.split([0, 1, 2], [2, -1], axis=0)
assert s0.shape.as_list() == [2]
assert s1.shape.as_list() == [1]
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testNonexistentDimTensor(self):
x = array_ops.placeholder(dtypes.int32)
values = np.zeros([5, 30])
splits = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegexp(ValueError, "Cannot infer"):
y = array_ops.split(values, splits, axis=x)
splits = array_ops.placeholder(dtypes.int32, [3])
y = array_ops.split(values, splits, axis=x)
with self.session(use_gpu=True) as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"must have exactly one element"):
sess.run(y, {x: np.array([], dtype=np.int32), splits: [4, 11, 15]})
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/split_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.bcast_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops.gen_array_ops import broadcast_args
from tensorflow.python.ops.gen_array_ops import broadcast_gradient_args
from tensorflow.python.platform import test
class BcastOpsTest(test.TestCase):
def _GetBroadcastShape(self, xs, ys):
with self.cached_session() as sess:
return sess.run(broadcast_args(xs, ys))
def _GetGradientArgs(self, xs, ys):
with self.cached_session() as sess:
return sess.run(broadcast_gradient_args(xs, ys))
@test_util.run_deprecated_v1
def testBasic(self):
r = self._GetBroadcastShape([2, 3, 5], [1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([1], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([5], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 5], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [3, 1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 1], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 1, 5], [3, 1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 1], [2, 1, 5])
self.assertAllEqual(r, [2, 3, 5])
@test_util.run_deprecated_v1
def testBasicGradient(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
r0, r1 = self._GetGradientArgs([1], [2, 3, 5])
self.assertAllEqual(r0, [0, 1, 2])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1])
r0, r1 = self._GetGradientArgs([5], [2, 3, 5])
self.assertAllEqual(r0, [0, 1])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [3, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0])
r0, r1 = self._GetGradientArgs([3, 5], [2, 3, 5])
self.assertAllEqual(r0, [0])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [3, 1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 2])
r0, r1 = self._GetGradientArgs([3, 1], [2, 3, 5])
self.assertAllEqual(r0, [0, 2])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 1, 5], [3, 1])
self.assertAllEqual(r0, [1])
self.assertAllEqual(r1, [0, 2])
r0, r1 = self._GetGradientArgs([3, 1], [2, 1, 5])
self.assertAllEqual(r0, [0, 2])
self.assertAllEqual(r1, [1])
@test_util.run_deprecated_v1
def testZeroDims(self):
r = self._GetBroadcastShape([2, 0, 3, 0, 5], [3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([3, 0, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([2, 0, 3, 0, 5], [3, 1, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([3, 1, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
@test_util.run_deprecated_v1
def testZeroDimsGradient(self):
r0, r1 = self._GetGradientArgs([2, 0, 3, 0, 5], [3, 0, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1])
r0, r1 = self._GetGradientArgs([3, 0, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r0, [0, 1])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 0, 3, 0, 5], [3, 1, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 3])
r0, r1 = self._GetGradientArgs([3, 1, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r0, [0, 1, 3])
self.assertAllEqual(r1, [])
@test_util.run_deprecated_v1
def testDataTypes(self):
for dtype in [dtypes.int32, dtypes.int64]:
r = self._GetBroadcastShape(
constant_op.constant([2, 3, 5], dtype=dtype),
constant_op.constant([1], dtype=dtype))
self.assertAllEqual(r, [2, 3, 5])
r0, r1 = self._GetGradientArgs(
constant_op.constant([2, 3, 5], dtype=dtype),
constant_op.constant([1], dtype=dtype))
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/bcast_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for confusion_matrix_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class ConfusionMatrixTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testExample(self):
"""This is a test of the example provided in pydoc."""
with self.cached_session():
self.assertAllEqual([
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]
], self.evaluate(confusion_matrix.confusion_matrix(
labels=[1, 2, 4], predictions=[2, 2, 4])))
def _testConfMatrix(self, labels, predictions, truth, weights=None,
num_classes=None):
with self.cached_session():
dtype = predictions.dtype
ans = confusion_matrix.confusion_matrix(
labels, predictions, dtype=dtype, weights=weights,
num_classes=num_classes).eval()
self.assertAllClose(truth, ans, atol=1e-10)
self.assertEqual(ans.dtype, dtype)
def _testBasic(self, dtype):
labels = np.arange(5, dtype=dtype)
predictions = np.arange(5, dtype=dtype)
truth = np.asarray(
[[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]],
dtype=dtype)
self._testConfMatrix(labels=labels, predictions=predictions, truth=truth)
@test_util.run_deprecated_v1
def testInt32Basic(self):
self._testBasic(dtype=np.int32)
@test_util.run_deprecated_v1
def testInt64Basic(self):
self._testBasic(dtype=np.int64)
def _testConfMatrixOnTensors(self, tf_dtype, np_dtype):
with self.cached_session() as sess:
m_neg = array_ops.placeholder(dtype=dtypes.float32)
m_pos = array_ops.placeholder(dtype=dtypes.float32)
s = array_ops.placeholder(dtype=dtypes.float32)
neg = random_ops.random_normal(
[20], mean=m_neg, stddev=s, dtype=dtypes.float32)
pos = random_ops.random_normal(
[20], mean=m_pos, stddev=s, dtype=dtypes.float32)
data = array_ops.concat([neg, pos], 0)
data = math_ops.cast(math_ops.round(data), tf_dtype)
data = math_ops.minimum(math_ops.maximum(data, 0), 1)
lab = array_ops.concat(
[
array_ops.zeros(
[20], dtype=tf_dtype), array_ops.ones(
[20], dtype=tf_dtype)
],
0)
cm = confusion_matrix.confusion_matrix(
lab, data, dtype=tf_dtype, num_classes=2)
d, l, cm_out = sess.run([data, lab, cm], {m_neg: 0.0, m_pos: 1.0, s: 1.0})
truth = np.zeros([2, 2], dtype=np_dtype)
for i in xrange(len(d)):
truth[l[i], d[i]] += 1
self.assertEqual(cm_out.dtype, np_dtype)
self.assertAllClose(cm_out, truth, atol=1e-10)
@test_util.run_deprecated_v1
def testOnTensors_int32(self):
self._testConfMatrixOnTensors(dtypes.int32, np.int32)
@test_util.run_deprecated_v1
def testOnTensors_int64(self):
self._testConfMatrixOnTensors(dtypes.int64, np.int64)
def _testDifferentLabelsInPredictionAndTarget(self, dtype):
labels = np.asarray([4, 5, 6], dtype=dtype)
predictions = np.asarray([1, 2, 3], dtype=dtype)
truth = np.asarray(
[[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]],
dtype=dtype)
self._testConfMatrix(labels=labels, predictions=predictions, truth=truth)
@test_util.run_deprecated_v1
def testInt32DifferentLabels(self, dtype=np.int32):
self._testDifferentLabelsInPredictionAndTarget(dtype)
@test_util.run_deprecated_v1
def testInt64DifferentLabels(self, dtype=np.int64):
self._testDifferentLabelsInPredictionAndTarget(dtype)
def _testMultipleLabels(self, dtype):
labels = np.asarray([1, 1, 2, 3, 5, 1, 3, 6, 3, 1], dtype=dtype)
predictions = np.asarray([1, 1, 2, 3, 5, 6, 1, 2, 3, 4], dtype=dtype)
truth = np.asarray(
[[0, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 0]],
dtype=dtype)
self._testConfMatrix(labels=labels, predictions=predictions, truth=truth)
@test_util.run_deprecated_v1
def testInt32MultipleLabels(self, dtype=np.int32):
self._testMultipleLabels(dtype)
@test_util.run_deprecated_v1
def testInt64MultipleLabels(self, dtype=np.int64):
self._testMultipleLabels(dtype)
@test_util.run_deprecated_v1
def testWeighted(self):
labels = np.arange(5, dtype=np.int32)
predictions = np.arange(5, dtype=np.int32)
weights = np.arange(5, dtype=np.int32)
truth = np.asarray(
[[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 2, 0, 0],
[0, 0, 0, 3, 0],
[0, 0, 0, 0, 4]],
dtype=np.int32)
self._testConfMatrix(
labels=labels, predictions=predictions, weights=weights, truth=truth)
@test_util.run_deprecated_v1
def testLabelsTooLarge(self):
labels = np.asarray([1, 1, 0, 3, 5], dtype=np.int32)
predictions = np.asarray([2, 1, 0, 2, 2], dtype=np.int32)
with self.assertRaisesOpError("`labels`.*x < y"):
self._testConfMatrix(
labels=labels, predictions=predictions, num_classes=3, truth=None)
def testLabelsNegative(self):
labels = np.asarray([1, 1, 0, -1, -1], dtype=np.int32)
predictions = np.asarray([2, 1, 0, 2, 2], dtype=np.int32)
with self.assertRaisesOpError("`labels`.*negative values"):
self._testConfMatrix(
labels=labels, predictions=predictions, num_classes=3, truth=None)
@test_util.run_deprecated_v1
def testPredictionsTooLarge(self):
labels = np.asarray([1, 1, 0, 2, 2], dtype=np.int32)
predictions = np.asarray([2, 1, 0, 3, 5], dtype=np.int32)
with self.assertRaisesOpError("`predictions`.*x < y"):
self._testConfMatrix(
labels=labels, predictions=predictions, num_classes=3, truth=None)
def testPredictionsNegative(self):
labels = np.asarray([1, 1, 0, 2, 2], dtype=np.int32)
predictions = np.asarray([2, 1, 0, -1, -1], dtype=np.int32)
with self.assertRaisesOpError("`predictions`.*negative values"):
self._testConfMatrix(
labels=labels, predictions=predictions, num_classes=3, truth=None)
@test_util.run_deprecated_v1
def testInvalidRank_predictionsTooBig(self):
labels = np.asarray([1, 2, 3])
predictions = np.asarray([[1, 2, 3]])
self.assertRaisesRegexp(ValueError, "an not squeeze dim",
confusion_matrix.confusion_matrix, predictions,
labels)
@test_util.run_deprecated_v1
def testInvalidRank_predictionsTooSmall(self):
labels = np.asarray([[1, 2, 3]])
predictions = np.asarray([1, 2, 3])
self.assertRaisesRegexp(ValueError, "an not squeeze dim",
confusion_matrix.confusion_matrix, predictions,
labels)
@test_util.run_deprecated_v1
def testInputDifferentSize(self):
labels = np.asarray([1, 2])
predictions = np.asarray([1, 2, 3])
self.assertRaisesRegexp(ValueError, "must be equal",
confusion_matrix.confusion_matrix, predictions,
labels)
def testOutputIsInt32(self):
labels = np.arange(2)
predictions = np.arange(2)
with self.cached_session():
cm = confusion_matrix.confusion_matrix(
labels, predictions, dtype=dtypes.int32)
tf_cm = self.evaluate(cm)
self.assertEqual(tf_cm.dtype, np.int32)
def testOutputIsInt64(self):
labels = np.arange(2)
predictions = np.arange(2)
with self.cached_session():
cm = confusion_matrix.confusion_matrix(
labels, predictions, dtype=dtypes.int64)
tf_cm = self.evaluate(cm)
self.assertEqual(tf_cm.dtype, np.int64)
class RemoveSqueezableDimensionsTest(test.TestCase):
@test_util.run_deprecated_v1
def testBothScalarShape(self):
label_values = 1.0
prediction_values = 0.0
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values))
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.float32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSameShape(self):
label_values = np.ones(shape=(2, 3, 1))
prediction_values = np.zeros_like(label_values)
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSameShapeExpectedRankDiff0(self):
label_values = np.ones(shape=(2, 3, 1))
prediction_values = np.zeros_like(label_values)
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values, expected_rank_diff=0))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder, expected_rank_diff=0))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSqueezableLabels(self):
label_values = np.ones(shape=(2, 3, 1))
prediction_values = np.zeros(shape=(2, 3))
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
expected_label_values = np.reshape(label_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(expected_label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
expected_label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSqueezableLabelsExpectedRankDiffPlus1(self):
label_values = np.ones(shape=(2, 3, 1))
prediction_values = np.zeros(shape=(2, 3, 5))
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values, expected_rank_diff=1))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder, expected_rank_diff=1))
expected_label_values = np.reshape(label_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(expected_label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
expected_label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSqueezablePredictions(self):
label_values = np.ones(shape=(2, 3))
prediction_values = np.zeros(shape=(2, 3, 1))
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
expected_prediction_values = np.reshape(prediction_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(expected_prediction_values,
self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
expected_prediction_values,
dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSqueezablePredictionsExpectedRankDiffMinus1(self):
label_values = np.ones(shape=(2, 3, 5))
prediction_values = np.zeros(shape=(2, 3, 1))
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values, expected_rank_diff=-1))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder, expected_rank_diff=-1))
expected_prediction_values = np.reshape(prediction_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(expected_prediction_values,
self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
expected_prediction_values,
dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testUnsqueezableLabels(self):
label_values = np.ones(shape=(2, 3, 2))
prediction_values = np.zeros(shape=(2, 3))
with self.assertRaisesRegexp(ValueError, r"Can not squeeze dim\[2\]"):
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values)
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
with self.cached_session():
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Can not squeeze dim\[2\]"):
dynamic_labels.eval(feed_dict=feed_dict)
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testUnsqueezablePredictions(self):
label_values = np.ones(shape=(2, 3))
prediction_values = np.zeros(shape=(2, 3, 2))
with self.assertRaisesRegexp(ValueError, r"Can not squeeze dim\[2\]"):
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values)
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
with self.cached_session():
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Can not squeeze dim\[2\]"):
dynamic_predictions.eval(feed_dict=feed_dict)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/confusion_matrix_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import time
import unittest
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
@test_util.run_all_in_graph_and_eager_modes
class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
def testNonBatchMatrix(self):
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
transposed = array_ops.matrix_transpose(matrix)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed)
def testConjugate(self):
m = [[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j, 6 + 6j]]
expected_transposed = [[1 - 1j, 4 - 4j], [2 - 2j, 5 - 5j], [3 - 3j, 6 - 6j]]
matrix = ops.convert_to_tensor(m)
transposed = array_ops.matrix_transpose(matrix, conjugate=True)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed)
def testBatchMatrix(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
transposed = array_ops.matrix_transpose(batch_matrix)
self.assertEqual((2, 3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed)
def testNonBatchMatrixDynamicallyDefined(self):
# needs explicit `constant` because lists are not automatically
# converted to sensors when applying `transpose` below
matrix = constant_op.constant([[1, 2, 3], [4, 5, 6]]) # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
])
def transpose(matrix):
self.assertIs(matrix.shape.ndims, None)
return array_ops.matrix_transpose(matrix)
self.assertAllEqual(expected_transposed, transpose(matrix))
def testBatchMatrixDynamicallyDefined(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
# needs explicit `constant` because lists are not automatically
# converted to sensors when applying `transpose` below
batch_matrix = constant_op.constant([matrix_0, matrix_1]) # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
])
def transpose(matrix):
self.assertIs(matrix.shape.ndims, None)
return array_ops.matrix_transpose(matrix)
self.assertAllEqual(expected_transposed, transpose(batch_matrix))
def testTensorWithStaticRankLessThanTwoRaisesBecauseNotAMatrix(self):
vector = [1, 2, 3]
with self.assertRaisesRegexp(ValueError, "should be a "):
array_ops.matrix_transpose(vector)
class BooleanMaskTest(test_util.TensorFlowTestCase):
def setUp(self):
self.rng = np.random.RandomState(42)
def CheckVersusNumpy(self, ndims_mask, arr_shape, make_mask=None, axis=None):
"""Check equivalence between boolean_mask and numpy masking."""
if make_mask is None:
make_mask = lambda shape: self.rng.randint(0, 2, size=shape).astype(bool)
arr = np.random.rand(*arr_shape)
mask = make_mask(arr_shape[:ndims_mask])
if axis is not None:
mask = make_mask(arr_shape[axis:ndims_mask + axis])
if axis is None or axis == 0:
masked_arr = arr[mask]
elif axis == 1:
masked_arr = arr[:, mask]
elif axis == 2:
masked_arr = arr[:, :, mask]
with self.cached_session():
masked_tensor = array_ops.boolean_mask(arr, mask, axis=axis)
# Leading dimension size of masked_tensor is always unknown until runtime
# since we don't how many elements will be kept.
leading = 1 if axis is None else axis + 1
self.assertAllEqual(masked_tensor.get_shape()[leading:],
masked_arr.shape[leading:])
self.assertAllClose(masked_arr, masked_tensor.eval())
@test_util.run_deprecated_v1
def testMaskDim1ArrDim2Axis1(self):
ndims_mask = 1
for arr_shape in [(1, 1), (2, 2), (2, 5)]:
self.CheckVersusNumpy(ndims_mask, arr_shape, axis=1)
@test_util.run_deprecated_v1
def testMaskDim2ArrDim2Axis1(self):
ndims_mask = 2
for arr_shape in [(1, 1), (2, 2), (2, 5)]:
self.CheckVersusNumpy(ndims_mask, arr_shape, axis=1)
@test_util.run_deprecated_v1
def testMaskDim1ArrDim1(self):
ndims_mask = 1
for arr_shape in [(1,), (2,), (3,), (10,)]:
self.CheckVersusNumpy(ndims_mask, arr_shape)
@test_util.run_deprecated_v1
def testMaskDim1ArrDim2(self):
ndims_mask = 1
for arr_shape in [(1, 1), (2, 2), (2, 5)]:
self.CheckVersusNumpy(ndims_mask, arr_shape)
@test_util.run_deprecated_v1
def testMaskDim2ArrDim2(self):
ndims_mask = 2
for arr_shape in [(1, 1), (2, 2), (2, 5)]:
self.CheckVersusNumpy(ndims_mask, arr_shape)
@test_util.run_deprecated_v1
def testMaskDim2ArrDim3(self):
ndims_mask = 2
for arr_shape in [(1, 1, 1), (1, 2, 2), (2, 2, 1)]:
self.CheckVersusNumpy(ndims_mask, arr_shape)
@test_util.run_deprecated_v1
def testEmptyInput2D(self):
mask = np.array([True, False])
arr = np.array([[], []]).astype(np.float32)
numpy_result = arr[mask]
tf_result = array_ops.boolean_mask(arr, mask)
self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
with self.cached_session():
self.assertAllClose(numpy_result, tf_result.eval())
@test_util.run_deprecated_v1
def testEmptyInput1D(self):
mask = np.array([]).astype(bool)
arr = np.array([]).astype(np.float32)
numpy_result = arr[mask]
tf_result = array_ops.boolean_mask(arr, mask)
self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
with self.cached_session():
self.assertAllClose(numpy_result, tf_result.eval())
@test_util.run_deprecated_v1
def testEmptyOutput(self):
make_mask = lambda shape: np.zeros(shape, dtype=bool)
for ndims_mask in range(1, 4):
for ndims_arr in range(ndims_mask, ndims_mask + 3):
for _ in range(3):
arr_shape = np.random.randint(1, 5, size=ndims_arr)
self.CheckVersusNumpy(ndims_mask, arr_shape, make_mask=make_mask)
@test_util.run_deprecated_v1
def testWorksWithDimensionsEqualToNoneDuringGraphBuild(self):
# The rank of the mask tensor must be specified. This is explained
# in the docstring as well.
with self.cached_session() as sess:
ph_tensor = array_ops.placeholder(dtypes.int32, shape=None)
ph_mask = array_ops.placeholder(dtypes.bool, shape=[None])
arr = np.array([[1, 2], [3, 4]])
mask = np.array([False, True])
masked_tensor = sess.run(
array_ops.boolean_mask(ph_tensor, ph_mask),
feed_dict={
ph_tensor: arr,
ph_mask: mask
})
np.testing.assert_allclose(masked_tensor, arr[mask])
@test_util.run_deprecated_v1
def testMaskDimensionsSetToNoneRaises(self):
# The rank of the mask tensor must be specified. This is explained
# in the docstring as well.
with self.cached_session():
tensor = array_ops.placeholder(dtypes.int32, shape=[None, 2])
mask = array_ops.placeholder(dtypes.bool, shape=None)
with self.assertRaisesRegexp(ValueError, "dimensions must be specified"):
array_ops.boolean_mask(tensor, mask)
def testMaskHasMoreDimsThanTensorRaises(self):
mask = [[True, True], [False, False]]
tensor = [1, 2, 3, 4]
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "incompatible"):
array_ops.boolean_mask(tensor, mask).eval()
def testMaskIsScalarRaises(self):
mask = True
tensor = 1
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "mask.*scalar"):
array_ops.boolean_mask(tensor, mask).eval()
def testMaskShapeDifferentThanFirstPartOfTensorShapeRaises(self):
mask = [True, True, True]
tensor = [[1, 2], [3, 4]]
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "incompatible"):
array_ops.boolean_mask(tensor, mask).eval()
@test_util.run_deprecated_v1
def testStringMask(self):
# Reproduces b/111171330, where the optimized boolean_mask graph would
# be incorrectly placed on GPU.
with ops.Graph().as_default():
tile_placeholder = array_ops.placeholder(dtypes.int32, [2])
string_tensor = array_ops.tile([["hello"]], tile_placeholder)
bool_tensor = array_ops.tile([[True]], tile_placeholder)
masked_tensor = array_ops.boolean_mask(string_tensor, bool_tensor)
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.shape_optimization = 1
config.gpu_options.per_process_gpu_memory_fraction = 0.3
with session.Session(config=config) as sess:
result = sess.run(masked_tensor, feed_dict={tile_placeholder: [2, 2]})
self.assertAllEqual([b"hello", b"hello", b"hello", b"hello"], result)
@test_util.run_all_in_graph_and_eager_modes
class OperatorShapeTest(test_util.TensorFlowTestCase):
def testExpandScalar(self):
scalar = "hello"
scalar_expanded = array_ops.expand_dims(scalar, [0])
self.assertEqual(scalar_expanded.get_shape(), (1,))
def testSqueezeScalar(self):
scalar = "hello"
scalar_squeezed = array_ops.squeeze(scalar, ())
self.assertEqual(scalar_squeezed.get_shape(), ())
def testSqueezeMatrix(self):
matrix = [[1, 2, 3]]
matrix_squeezed = array_ops.squeeze(matrix, [0])
self.assertEqual(matrix_squeezed.get_shape(), (3))
with self.assertRaisesRegexp(
Exception, "Can not squeeze dim.1., expected a dimension of 1, got 3"):
matrix_squeezed = array_ops.squeeze(matrix, [1])
def testSqueezeScalarDim(self):
matrix = [[1, 2, 3]]
matrix_squeezed = array_ops.squeeze(matrix, 0)
self.assertEqual(matrix_squeezed.get_shape(), (3))
def testExpandDimsWithNonScalarDim(self):
with self.assertRaisesRegexp(Exception,
"must be a tensor with a single value"):
array_ops.expand_dims(1, axis=[0, 1])
class ReverseV2Test(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testReverse0DimAuto(self):
x_np = 4
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
x_tf = array_ops.reverse_v2(x_np, []).eval()
self.assertAllEqual(x_tf, x_np)
def _reverse1DimAuto(self, np_dtype):
x_np = np.array([1, 200, 3, 40, 5], dtype=np_dtype)
for use_gpu in [False, True]:
for axis_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(use_gpu=use_gpu):
x_tf = array_ops.reverse_v2(
x_np, constant_op.constant([0], dtype=axis_dtype)).eval()
self.assertAllEqual(x_tf, np.asarray(x_np)[::-1])
def _reverse2DimAuto(self, np_dtype):
x_np = np.array([[1, 200, 3], [4, 5, 60]], dtype=np_dtype)
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for use_gpu in [False, True]:
for axis_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(use_gpu=use_gpu):
x_tf_1 = reverse_f(x_np,
constant_op.constant([0],
dtype=axis_dtype)).eval()
x_tf_2 = reverse_f(x_np,
constant_op.constant([-2],
dtype=axis_dtype)).eval()
x_tf_3 = reverse_f(x_np,
constant_op.constant([1],
dtype=axis_dtype)).eval()
x_tf_4 = reverse_f(x_np,
constant_op.constant([-1],
dtype=axis_dtype)).eval()
x_tf_5 = reverse_f(x_np,
constant_op.constant([1, 0],
dtype=axis_dtype)).eval()
self.assertAllEqual(x_tf_1, np.asarray(x_np)[::-1, :])
self.assertAllEqual(x_tf_2, np.asarray(x_np)[::-1, :])
self.assertAllEqual(x_tf_3, np.asarray(x_np)[:, ::-1])
self.assertAllEqual(x_tf_4, np.asarray(x_np)[:, ::-1])
self.assertAllEqual(x_tf_5, np.asarray(x_np)[::-1, ::-1])
# This test covers the axis validation in the shape function
# (no eval())
@test_util.run_deprecated_v1
def testInvalidAxis(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
with self.assertRaisesRegexp(ValueError, "is out of valid range"):
array_ops.reverse_v2(x_np, [-30])
with self.assertRaisesRegexp(ValueError, "is out of valid range"):
array_ops.reverse_v2(x_np, [2])
with self.assertRaisesRegexp(ValueError, "axis 0 specified more than once"):
array_ops.reverse_v2(x_np, [0, -2])
# This is the version of reverse that uses axis indices rather than
# bool tensors
# TODO(b/32254538): Change this test to use array_ops.reverse
#
# Note: this test passes placeholder as constant axis is validated
# in shape function (see testInvalidAxis)
@test_util.run_deprecated_v1
def testInvalid(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
axis = array_ops.placeholder(dtypes.int32)
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of valid range"):
array_ops.reverse_v2(x_np, axis).eval(feed_dict={axis: [-30]})
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of valid range"):
array_ops.reverse_v2(x_np, axis).eval(feed_dict={axis: [2]})
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"axis 0 specified more than once"):
array_ops.reverse_v2(x_np, axis).eval(feed_dict={axis: [0, -2]})
@test_util.run_deprecated_v1
def testReverse1DimAuto(self):
for dtype in [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.bool,
np.float16, np.float32, np.float64, np.complex64, np.complex128,
np.array(b"").dtype.type
]:
self._reverse1DimAuto(dtype)
@test_util.run_deprecated_v1
def testReverse2DimAuto(self):
for dtype in [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.bool,
np.float16, np.float32, np.float64, np.complex64, np.complex128,
np.array(b"").dtype.type
]:
self._reverse2DimAuto(dtype)
@test_util.run_deprecated_v1
def testUnknownDims(self):
reverse_v2 = array_ops.reverse_v2
data_t = array_ops.placeholder(dtypes.float32)
axis_known_t = array_ops.placeholder(dtypes.int32, shape=[3])
reverse_known_t = reverse_v2(data_t, axis_known_t)
# Unlike V1 we cannot know this anymore
self.assertEqual(None, reverse_known_t.get_shape().ndims)
axis_unknown_t = array_ops.placeholder(dtypes.int32)
reverse_unknown_t = reverse_v2(data_t, axis_unknown_t)
self.assertIs(None, reverse_unknown_t.get_shape().ndims)
data_2d_t = array_ops.placeholder(dtypes.float32, shape=[None, None])
axis_2d_t = array_ops.placeholder(dtypes.int32, shape=[3])
reverse_2d_t = reverse_v2(data_2d_t, axis_2d_t)
self.assertEqual(2, reverse_2d_t.get_shape().ndims)
@test_util.run_deprecated_v1
def testReverseRowsOf3Channels(self):
"""Tests optimized code for reversing rows with last dim size = 3."""
with self.session(use_gpu=True):
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for outer_size in (1, 2):
for middle_size in list(range(50)) + [100000]:
x_np = np.reshape(
np.arange(outer_size * middle_size * 3, dtype=np.float32),
newshape=(outer_size, middle_size, 3))
x_tf = reverse_f(x_np, [1]).eval()
np_answer = x_np[:, ::-1, :]
self.assertAllEqual(x_tf, np_answer)
@test_util.run_deprecated_v1
def testReverseRowsOf4Channels(self):
with self.session(use_gpu=True):
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for outer_size in (1, 2):
for middle_size in list(range(50)) + [100000]:
x_np = np.reshape(
np.arange(outer_size * middle_size * 4, dtype=np.float32),
newshape=(outer_size, middle_size, 4))
x_tf = reverse_f(x_np, [1]).eval()
np_answer = x_np[:, ::-1, :]
self.assertAllEqual(x_tf, np_answer)
@test_util.run_deprecated_v1
def testReverseColumnsOf3Channels(self):
with self.session(use_gpu=True):
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for outer_size in list(range(50)) + [100000]:
for middle_size in (1, 2):
x_np = np.reshape(
np.arange(outer_size * middle_size * 3, dtype=np.float32),
newshape=(outer_size, middle_size, 3))
x_tf = reverse_f(x_np, [0]).eval()
np_answer = x_np[::-1, :, :]
self.assertAllEqual(x_tf, np_answer)
class MeshgridTest(test_util.TensorFlowTestCase):
def _compareDiff(self, x, y, use_gpu):
for index in ("ij", "xy"):
numpy_out = np.meshgrid(x, y, indexing=index)
tf_out = array_ops.meshgrid(x, y, indexing=index)
with self.cached_session(use_gpu=use_gpu):
for xx, yy in zip(numpy_out, tf_out):
self.assertAllEqual(xx, yy.eval())
def _compareDiffType(self, n, np_dtype, use_gpu):
inputs = []
for index in ("ij", "xy"):
for _ in range(n):
x = np.linspace(-10, 10, 5).astype(np_dtype)
if np_dtype in (np.complex64, np.complex128):
x += 1j
inputs.append(x)
numpy_out = np.meshgrid(*inputs, indexing=index)
with self.cached_session(use_gpu=use_gpu):
tf_out = array_ops.meshgrid(*inputs, indexing=index)
for x_np, x_tf in zip(numpy_out, tf_out):
self.assertAllEqual(x_np, x_tf.eval())
@test_util.run_deprecated_v1
def testCompare(self):
for t in (np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128):
self._compareDiffType(2, t, False)
self._compareDiffType(3, t, False)
x = [1, 2, 3]
y = [4, 5]
a = [[1, 1], [1, 1]]
self._compareDiff(x, y, False)
self._compareDiff(x, a, False)
class StridedSliceChecker(object):
"""Check a given tensor against the numpy result."""
REF_TENSOR = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
REF_TENSOR_ALIGNED = np.arange(1, 97, dtype=np.float32).reshape(3, 4, 8)
def __init__(self, test, x, tensor_type=dtypes.int32, check_type_infer=True):
self.x_np = np.array(x).astype(tensor_type.as_numpy_dtype)
if tensor_type.is_bool:
self.x_np = np.array(x % 3).astype(np.bool)
# Give the value a non-zero imaginary component for complex types.
if tensor_type.is_complex:
self.x_np -= 1j * self.x_np
self.test = test
self.x = constant_op.constant(self.x_np, dtype=tensor_type)
self.check_type_infer = check_type_infer
def __getitem__(self, spec):
op = self.x.__getitem__(spec)
def eval_if_tensor(x):
try:
return x.eval()
except AttributeError:
return x
if isinstance(spec, bool) or \
(isinstance(spec, ops.Tensor) and spec.dtype == dtypes.bool) or \
(isinstance(spec, np.ndarray) and spec.dtype == bool) or \
(isinstance(spec, (list, tuple)) and np.asarray(spec).dtype == bool):
tensor = op.eval()
np_spec = eval_if_tensor(spec)
self.test.assertAllEqual(self.x_np[np_spec], tensor)
return tensor
if not isinstance(spec, (list, tuple)):
spec = [spec]
tensor = op.eval()
# Make a numpy spec that pre-evals the tensors
np_specs = []
for s in spec:
if isinstance(s, slice):
start = eval_if_tensor(s.start)
stop = eval_if_tensor(s.stop)
step = eval_if_tensor(s.step)
np_specs.append(slice(start, stop, step))
else:
np_specs.append(eval_if_tensor(s))
self.test.assertAllEqual(self.x_np[tuple(np_specs)], tensor)
if self.check_type_infer:
self.test.assertAllEqual(tensor.shape, op.get_shape())
return tensor
STRIDED_SLICE_TYPES = [
dtypes.int32, dtypes.int64, dtypes.int16, dtypes.int8, dtypes.float32,
dtypes.float64, dtypes.complex64, dtypes.complex128, dtypes.bool
]
class StridedSliceTest(test_util.TensorFlowTestCase):
"""Test the strided slice operation with variants of slices."""
@test_util.run_deprecated_v1
def test_basic_slice(self):
for tensor_type in STRIDED_SLICE_TYPES:
with self.cached_session(use_gpu=True):
checker = StridedSliceChecker(
self, StridedSliceChecker.REF_TENSOR, tensor_type=tensor_type)
_ = checker[:, :, :]
# Various ways of representing identity slice
_ = checker[:, :, :]
_ = checker[::, ::, ::]
_ = checker[::1, ::1, ::1]
# Not zero slice
_ = checker[::1, ::5, ::2]
# Reverse in each dimension independently
_ = checker[::-1, :, :]
_ = checker[:, ::-1, :]
_ = checker[:, :, ::-1]
## negative index tests i.e. n-2 in first component
_ = checker[-2::-1, :, ::1]
# negative index tests i.e. n-2 in first component, non-unit stride
_ = checker[-2::-1, :, ::2]
# Check rank-0 examples
checker2 = StridedSliceChecker(self, 5, tensor_type=tensor_type)
_ = checker2[None]
_ = checker2[...]
_ = checker2[tuple()]
def testInt64GPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with test_util.force_gpu():
x = constant_op.constant([1., 2., 3.])
begin = constant_op.constant([2], dtype=dtypes.int64)
end = constant_op.constant([3], dtype=dtypes.int64)
strides = constant_op.constant([1], dtype=dtypes.int64)
s = array_ops.strided_slice(x, begin, end, strides)
self.assertAllEqual([3.], self.evaluate(s))
@test_util.assert_no_new_pyobjects_executing_eagerly
@test_util.assert_no_garbage_created
def testTensorSliceEagerMemory(self):
with context.eager_mode():
inputs = constant_op.constant([[[1], [2], [3], [4]]],
dtype=dtypes.float32)
# Tests that slicing an EagerTensor doesn't leak memory
inputs[0] # pylint: disable=pointless-statement
@test_util.assert_no_new_pyobjects_executing_eagerly
@test_util.assert_no_garbage_created
def testVariableSliceEagerMemory(self):
with context.eager_mode():
v = variables.Variable([1., 2.])
v[0] # pylint: disable=pointless-statement
@test_util.run_deprecated_v1
def testDegenerateSlices(self):
with self.session(use_gpu=True):
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
# degenerate by offering a forward interval with a negative stride
_ = checker[0:-1:-1, :, :]
# degenerate with a reverse interval with a positive stride
_ = checker[-1:0, :, :]
# empty interval in every dimension
_ = checker[-1:0, 2:2, 2:3:-1]
# empty first dimension only (used to break for aligned tensors).
checker = StridedSliceChecker(self,
StridedSliceChecker.REF_TENSOR_ALIGNED)
_ = checker[1:0]
@test_util.run_deprecated_v1
def testEllipsis(self):
with self.session(use_gpu=True):
raw = [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]]
checker = StridedSliceChecker(self, raw)
_ = checker[0:]
# implicit ellipsis
_ = checker[0:, ...]
# ellipsis alone
_ = checker[...]
# ellipsis at end
_ = checker[0:1, ...]
# ellipsis at begin
_ = checker[..., 0:1]
# ellipsis at middle
_ = checker[0:1, ..., 0:1]
# multiple ellipses not allowed
with self.assertRaisesRegexp(ValueError, "Multiple ellipses"):
_ = checker[..., :, ...].eval()
@test_util.run_deprecated_v1
def testShrink(self):
with self.session(use_gpu=True):
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw)
_ = checker[:, :, :, :, 3]
_ = checker[..., 3]
_ = checker[:, 0]
_ = checker[:, :, 0]
@test_util.run_deprecated_v1
def testBothNewAxisAndShrink(self):
with self.session(use_gpu=True):
ones = array_ops.placeholder(shape=[2, 2], dtype=dtypes.int16)
self.assertAllEqual(
ones[array_ops.newaxis, :, 0].eval(
feed_dict={ones: [[1, 1], [1, 1]]}), [[1, 1]])
@test_util.run_deprecated_v1
def testTensorIndexing(self):
with self.session(use_gpu=True):
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw, check_type_infer=False)
bar = constant_op.constant(2)
bar2 = constant_op.constant(3)
_ = checker[..., bar:bar2]
_ = checker[..., bar]
_ = checker[..., 3]
_ = checker[..., 2**64 // 2**63] # Test longs in Python 2
def testTensorIndexingTypeError(self):
with self.session(use_gpu=True):
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
expected = re.escape(array_ops._SLICE_TYPE_ERROR)
with self.assertRaisesRegexp(TypeError, expected):
_ = checker["foo"]
with self.assertRaisesRegexp(TypeError, expected):
_ = checker[constant_op.constant("foo")]
with self.assertRaisesRegexp(TypeError, expected):
_ = checker[0.0]
with self.assertRaisesRegexp(TypeError, expected):
_ = checker[constant_op.constant(0.0)]
with self.assertRaisesRegexp(TypeError, expected):
_ = checker[constant_op.constant([1, 2, 3])]
with self.assertRaisesRegexp(TypeError, expected):
_ = checker[[2.1, -0.7, 1.5]]
@test_util.run_deprecated_v1
def testExpand(self):
with self.session(use_gpu=True):
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw)
# new axis (followed by implicit ellipsis)
_ = checker[np.newaxis]
# newaxis after ellipsis
_ = checker[..., np.newaxis]
# newaxis in between ellipsis and explicit range
_ = checker[..., np.newaxis, :]
_ = checker[:, ..., np.newaxis, :, :]
# Reverse final dimension with new axis
_ = checker[:, :, np.newaxis, :, 2::-1]
# Ellipsis in middle of two newaxis
_ = checker[np.newaxis, ..., np.newaxis]
@test_util.run_deprecated_v1
def testExpandVariable(self):
with self.session(use_gpu=True):
x = variables.Variable(7, dtype=dtypes.int32)
x.initializer.run()
y = x[None].eval()
self.assertEqual(y.shape, (1,))
self.assertAllEqual(y, (7,))
@test_util.run_deprecated_v1
def testOptimizedCases(self):
with self.session(use_gpu=True):
checker = StridedSliceChecker(self,
StridedSliceChecker.REF_TENSOR_ALIGNED)
# Identity
_ = checker[:]
# Identity
_ = checker[...]
# Identity
_ = checker[np.newaxis, ..., np.newaxis]
# First axis slice
_ = checker[1:]
# First axis slice
_ = checker[np.newaxis, 1:]
@test_util.run_v1_only("currently failing on v2")
def testMasks(self):
with self.session(use_gpu=True):
scalar = np.array(0)
# Test tensor type mask
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
_ = checker[checker.x > 2]
_ = checker[checker.x <= 5]
_ = checker[ops.convert_to_tensor(scalar)]
# Test numpy array type mask
raw = np.array([[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23,
24]]]]])
checker1 = StridedSliceChecker(self, raw)
_ = checker1[raw >= 4]
_ = checker1[raw < 19]
_ = checker1[scalar]
# Test boolean and non boolean cases
mask = np.array([True, False, True])
raw1 = np.array([[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]])
checker2 = StridedSliceChecker(self, raw1)
_ = checker2[mask]
_ = checker2[ops.convert_to_tensor(mask)]
class StridedSliceShapeChecker(object):
def __init__(self, x):
self.x = x
def __getitem__(self, spec):
op = self.x.__getitem__(spec)
return op.get_shape()
class StridedSliceShapeTest(test_util.TensorFlowTestCase):
"""Test the shape inference of StridedSliceShapes."""
@test_util.run_deprecated_v1
def testUnknown(self):
with self.session(use_gpu=True):
uncertain_tensor = array_ops.placeholder(dtypes.float32)
a = StridedSliceShapeChecker(uncertain_tensor)
a_slice_shape = a[...]
self.assertAllEqual(a_slice_shape.ndims, None)
def tensorShapeEqual(self, x, y):
self.assertTrue(x is not None and y is not None or x is None and y is None)
self.assertEqual(x.as_list(), y.as_list())
@test_util.run_deprecated_v1
def testTensorShapeUncertain(self):
with self.session(use_gpu=True):
uncertain_tensor = array_ops.placeholder(
dtypes.float32, shape=(5, None, 7))
a = StridedSliceShapeChecker(uncertain_tensor)
self.tensorShapeEqual(a[3:5], tensor_shape.TensorShape([2, None, 7]))
self.tensorShapeEqual(a[3:5, :, 4], tensor_shape.TensorShape([2, None]))
self.tensorShapeEqual(a[3:5, 3:4, 4], tensor_shape.TensorShape([2, None]))
self.tensorShapeEqual(a[3:5, :, 5:10],
tensor_shape.TensorShape([2, None, 2]))
self.tensorShapeEqual(a[3:5, :, 50:3],
tensor_shape.TensorShape([2, None, 0]))
self.tensorShapeEqual(a[3:5, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[1:5:2, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[:5:3, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[:2:3, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([1, None, 1, 0]))
self.tensorShapeEqual(a[::-1, :, array_ops.newaxis, ::-2],
tensor_shape.TensorShape([5, None, 1, 4]))
@test_util.run_deprecated_v1
def testTensorValuedIndexShape(self):
with self.session(use_gpu=True):
defined_shape_tensor = array_ops.placeholder(
dtypes.float32, shape=(5, 3, 7))
index_value = array_ops.placeholder(dtypes.int32, shape=())
a = StridedSliceShapeChecker(defined_shape_tensor)
self.tensorShapeEqual(a[index_value], tensor_shape.TensorShape([3, 7]))
self.tensorShapeEqual(a[index_value, ::-1],
tensor_shape.TensorShape([3, 7]))
self.tensorShapeEqual(a[index_value, ::-2],
tensor_shape.TensorShape([2, 7]))
other_scalar = array_ops.placeholder(dtypes.int32, shape=())
self.tensorShapeEqual(a[index_value, other_scalar:2],
tensor_shape.TensorShape([None, 7]))
class GradSliceChecker(object):
"""Tests that we can compute a gradient for var^2."""
def __init__(self, test, sess, var, varnp):
self.test = test
self.sess = sess
self.val = var * var
self.var = var
self.varnp = varnp
def __getitem__(self, spec):
slice_var = self.var[spec]
slice_val = self.val[spec]
# compute analytic 2nd derivative
analytic_grad2 = 2 * slice_val
dy = variables.Variable(
array_ops.ones_like(slice_var, dtype=dtypes.float32))
assign = dy.assign(slice_var)
slice_val_grad, = gradients_impl.gradients(slice_val, self.var, grad_ys=dy)
slice_val_grad2, = gradients_impl.gradients(
slice_val_grad, dy, grad_ys=self.var)
self.sess.run(assign)
slice_val_grad_evaled, slice_val_grad2_evaled = (
self.sess.run([slice_val_grad, slice_val_grad2]))
analytic_grad2_evaled = analytic_grad2.eval()
self.test.assertAllEqual(slice_val_grad2_evaled, analytic_grad2_evaled)
# compute analytic gradient for slice
np_val_grad = (2 * self.varnp * self.varnp)
np_sliceval_grad = np.zeros(self.var.get_shape())
if isinstance(spec, ops.Tensor):
spec = self.sess.run([spec])
np_sliceval_grad[spec] = np_val_grad[spec]
# verify gradient
self.test.assertAllEqual(slice_val_grad_evaled, np_sliceval_grad)
class StridedSliceGradTest(test_util.TensorFlowTestCase):
"""Test that strided slice's custom gradient produces correct gradients."""
@test_util.run_v1_only("b/120545219")
def testGradient(self):
with self.session(use_gpu=True) as sess:
var = variables.Variable(
array_ops.reshape(
math_ops.range(1, 97, 1, dtype=dtypes.float32), shape=(6, 4, 4)))
init = variables.global_variables_initializer()
sess.run(init)
raw = np.array(range(1, 97, 1)).reshape((6, 4, 4))
grad = GradSliceChecker(self, sess, var, raw)
_ = grad[2:6:2, 1:3, 1:3]
_ = grad[3:0:-2, 1:3, 1:3]
_ = grad[3:0:-2, array_ops.newaxis, 1:3, 2, array_ops.newaxis]
_ = grad[3:0:-2, 1:3, 2]
_ = grad[:, -1, :]
_ = grad[:, -2, :]
with self.assertRaisesRegexp(ValueError, "out of bounds"):
_ = grad[:, -200, :]
with self.assertRaisesRegexp(ValueError, "out of bounds"):
_ = grad[:, 200, :]
# Test numpy array type mask
_ = grad[raw > 51]
# Test tensor type mask
_ = grad[ops.convert_to_tensor(raw) <= 76]
@test_util.run_v1_only("b/120545219")
def testGradientZero(self):
with self.session(use_gpu=True) as sess:
var = variables.Variable(8.)
init = variables.global_variables_initializer()
sess.run(init)
grad = GradSliceChecker(self, sess, var, np.array(8))
_ = grad[tuple()]
@test_util.run_deprecated_v1
def testInt64Indices(self):
with self.session(use_gpu=True) as sess:
a = math_ops.range(3, dtype=dtypes.float32)
index = constant_op.constant(1, dtype=dtypes.int64)
b = 2. * a[index]
grad, = gradients_impl.gradients(b, a)
self.assertAllEqual(self.evaluate(grad), [0., 2., 0.])
class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
"""Test varied index types and host located memory."""
@test_util.run_deprecated_v1
def testHostVsDevice(self):
with self.session(use_gpu=True) as sess:
var2 = variables.Variable(
array_ops.reshape(
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
shape=(4, 1, 1)))
varshape = variables.Variable([6, 4, 4], dtype=dtypes.int32)
self.evaluate(variables.global_variables_initializer())
begin = constant_op.constant([0, 0, 0])
end = constant_op.constant([4, 1, 1])
strides = constant_op.constant([1, 1, 1])
foo = array_ops.strided_slice_grad(varshape, begin, end, strides, var2)
sess.run(foo)
@test_util.run_deprecated_v1
def testInt64Shape(self):
with self.session(use_gpu=True) as sess:
original_dy = array_ops.reshape(
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
shape=(4, 1, 1))
original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
self.evaluate(variables.global_variables_initializer())
begin = constant_op.constant([0, 0, 0], dtype=dtypes.int64)
end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
original_dy)
sess.run(dx)
@test_util.run_deprecated_v1
def testMixedIndexTypes(self):
with self.session(use_gpu=True) as sess:
original_dy = array_ops.reshape(
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
shape=(4, 1, 1))
original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
self.evaluate(variables.global_variables_initializer())
begin = constant_op.constant([0, 0, 0], dtype=dtypes.int32)
end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
with self.assertRaisesRegexp(
TypeError, "Input 'begin' of 'StridedSliceGrad' Op has type int32"
" that does not match type int64 of argument 'shape'"):
dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
original_dy)
sess.run(dx)
class BenchmarkSlice(object):
def __init__(self, tensor):
self.tensor = tensor
def __getitem__(self, x):
return self.tensor[x]
class StridedSliceBenchmark(test_lib.Benchmark):
"""Benchmark new strided slice operation on non-trivial case."""
def run_and_time(self, slice_op):
variables.global_variables_initializer().run()
for _ in range(10):
_ = slice_op.eval()
iters = 1000
t0 = time.time()
for _ in range(iters):
slice_op.eval()
t1 = time.time()
self.report_benchmark(iters=iters, wall_time=(t1 - t0) / 1000.0)
def make_variable(self):
n = 256
shape = (n, n, n)
items = n**3
var = variables.Variable(
array_ops.reshape(math_ops.linspace(1., float(items), items), shape),
dtype=dtypes.float32)
return var
def benchmark_strided_slice_skip(self):
with session.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[::2, ::1, ::2]
self.run_and_time(slice_op)
def benchmark_strided_slice_easy(self):
with session.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
def benchmark_slice_easy(self):
with session.Session():
var = self.make_variable()
slice_op = var[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
class StridedSliceAssignChecker(object):
def __init__(self, test, x, tensor_type=dtypes.float32, use_resource=False):
self.tensor_type = tensor_type
self.test = test
self._use_resource = use_resource
self.x_np = np.array(x).astype(tensor_type.as_numpy_dtype)
# Give the value a non-zero imaginary component for complex types.
if tensor_type.is_complex:
self.x_np -= 1j * self.x_np
self.x = constant_op.constant(self.x_np, dtype=tensor_type)
def __setitem__(self, index, value):
value = np.array(value).astype(self.tensor_type.as_numpy_dtype)
# Give the value a non-zero imaginary component for complex types.
if self.tensor_type.is_complex:
value -= 1j * value
with self.test.test_session(use_gpu=True) as sess:
if self._use_resource:
var = resource_variable_ops.ResourceVariable(self.x)
else:
var = variables.Variable(self.x)
sess.run(variables.variables_initializer([var]))
val = sess.run(var[index].assign(value))
# val_copy is used to check that tf.compat.v1.assign works equivalently
# to the assign method above.
val_copy = sess.run(state_ops.assign(var[index], value))
valnp = np.copy(self.x_np)
valnp[index] = np.array(value)
self.test.assertAllEqual(val, valnp)
self.test.assertAllEqual(val_copy, valnp)
class SliceAssignTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testInvalidSlice(self):
with self.cached_session() as sess:
foo = constant_op.constant([1, 2, 3])
with self.assertRaisesRegexp(ValueError, "Sliced assignment"
" is only supported for variables"):
bar = foo[:2].assign(constant_op.constant([1, 2]))
sess.run(bar)
def doTestSliceAssign(self, use_resource):
for dtype in STRIDED_SLICE_TYPES:
checker = StridedSliceAssignChecker(
self, [[1, 2, 3], [4, 5, 6]],
use_resource=use_resource,
tensor_type=dtype)
# Check if equal
checker[:] = [[10, 20, 30], [40, 50, 60]]
# Check trivial (1,1) shape tensor
checker[1:2, 1:2] = [[66]]
# shrinks shape changes
checker[1:2, 1] = [66]
checker[1, 1:2] = [66]
checker[1, 1] = 66
# newaxis shape changes
checker[:, None, :] = [[[10, 20, 30]], [[40, 50, 50]]]
# shrink and newaxis
checker[None, None, 0, 0:1] = [[[99]]]
# Non unit strides
checker[::1, ::-2] = [[3, 33], [4, 44]]
# degenerate interval
checker[8:10, 0] = []
checker[8:10, 8:10] = [[]]
# Assign vector to scalar (rank-0) using newaxis
checker2 = StridedSliceAssignChecker(self, 222)
checker2[()] = 6 # no indices
checker2[...] = 6 # ellipsis
checker2[None] = [6] # new axis
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123559667")
def testSliceAssign(self):
self.doTestSliceAssign(use_resource=False)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123559667")
def testSliceAssignResource(self):
self.doTestSliceAssign(use_resource=True)
@test_util.run_v1_only("b/120545219")
def testUninitialized(self):
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
"Attempting to use uninitialized value Variable"):
with self.cached_session() as sess:
v = variables.VariableV1([1, 2])
sess.run(v[:].assign([1, 2]))
@test_util.run_v1_only("b/120545219")
def testTypeError(self):
init_val = constant_op.constant([1, 2], dtype=dtypes.int32)
too_small_val = constant_op.constant([3, 4], dtype=dtypes.int8)
too_large_val = constant_op.constant([3, 4], dtype=dtypes.int64)
v = variables.VariableV1(init_val)
with self.assertRaises(TypeError):
v[:].assign(too_small_val)
with self.assertRaises(TypeError):
v[:].assign(too_large_val)
@test_util.run_deprecated_v1
def testTypeErrorResource(self):
init_val = constant_op.constant([1, 2], dtype=dtypes.int32)
too_small_val = constant_op.constant([3, 4], dtype=dtypes.int8)
too_large_val = constant_op.constant([3, 4], dtype=dtypes.int64)
v = resource_variable_ops.ResourceVariable(init_val)
with self.cached_session() as sess:
self.evaluate(v.initializer)
with self.assertRaises(ValueError):
sess.run(v[:].assign(too_large_val))
with self.assertRaises(ValueError):
sess.run(v[:].assign(too_small_val))
class ShapeSizeRankTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDenseShape(self):
t_value = [[0, 42], [24, 0]]
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t_value)))
self.assertEqual(4, self.evaluate(array_ops.size(t_value)))
self.assertEqual(2, self.evaluate(array_ops.rank(t_value)))
t = constant_op.constant(t_value)
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t)))
self.assertEqual(4, self.evaluate(array_ops.size(t)))
self.assertEqual(2, self.evaluate(array_ops.rank(t)))
@test_util.run_in_graph_and_eager_modes
def testSparseShape(self):
sp_value = sparse_tensor.SparseTensorValue(
indices=((0, 1), (1, 0)), values=(42, 24), dense_shape=(2, 2))
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp_value)))
self.assertEqual(4, self.evaluate(array_ops.size(sp_value)))
self.assertEqual(2, self.evaluate(array_ops.rank(sp_value)))
sp = sparse_tensor.SparseTensor.from_value(sp_value)
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp)))
self.assertEqual(4, self.evaluate(array_ops.size(sp)))
self.assertEqual(2, self.evaluate(array_ops.rank(sp)))
@test_util.run_in_graph_and_eager_modes
def testSizeDtype(self):
tensor = [1]
self.assertEqual(dtypes.int32, self.evaluate(array_ops.size(tensor)).dtype)
self.assertEqual(
dtypes.int64,
self.evaluate(array_ops.size(tensor, out_type=dtypes.int64)).dtype)
class SequenceMaskTest(test_util.TensorFlowTestCase):
def testExceptions(self):
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "maxlen must be scalar"):
array_ops.sequence_mask([10, 20], [10, 20])
@test_util.run_deprecated_v1
def testOneDimensionalWithMaxlen(self):
with self.cached_session():
res = array_ops.sequence_mask(constant_op.constant([1, 3, 2]), 5)
self.assertAllEqual(res.get_shape(), [3, 5])
self.assertAllEqual(
res.eval(),
[[True, False, False, False, False], [True, True, True, False, False],
[True, True, False, False, False]])
@test_util.run_deprecated_v1
def testOneDimensionalDtypeWithoutMaxlen(self):
with self.cached_session():
# test dtype and default maxlen:
res = array_ops.sequence_mask(
constant_op.constant([0, 1, 4]), dtype=dtypes.float32)
self.assertAllEqual(res.get_shape().as_list(), [3, 4])
self.assertAllEqual(
res.eval(),
[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]])
@test_util.run_deprecated_v1
def testOneDimensionalWithoutMaxlen(self):
with self.cached_session():
res = array_ops.sequence_mask(constant_op.constant([0, 1, 4]))
self.assertAllEqual(res.get_shape().as_list(), [3, 4])
self.assertAllEqual(
res.eval(), [[False, False, False, False],
[True, False, False, False], [True, True, True, True]])
@test_util.run_deprecated_v1
def testTwoDimensional(self):
with self.cached_session():
res = array_ops.sequence_mask(constant_op.constant([[1, 3, 2]]), 5)
self.assertAllEqual(res.get_shape(), [1, 3, 5])
self.assertAllEqual(res.eval(), [[[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]]])
# test dtype and default maxlen:
res = array_ops.sequence_mask(
constant_op.constant([[0, 1, 4], [1, 2, 3]]), dtype=dtypes.float32)
self.assertAllEqual(res.get_shape().as_list(), [2, 3, 4])
self.assertAllEqual(
res.eval(),
[[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]],
[[1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0]]])
@test_util.run_deprecated_v1
def testUnknownShape(self):
lengths = array_ops.placeholder(dtype=dtypes.int32)
res = array_ops.sequence_mask(lengths)
self.assertEqual(res.shape, None)
@test_util.run_deprecated_v1
def testDtypes(self):
def check_dtypes(lengths_dtype, maxlen_dtype):
res = array_ops.sequence_mask(
constant_op.constant([1, 3, 2], dtype=lengths_dtype),
constant_op.constant(5, dtype=maxlen_dtype))
self.assertAllEqual(res.get_shape(), [3, 5])
self.assertAllEqual(
res.eval(),
[[True, False, False, False, False], [True, True, True, False, False],
[True, True, False, False, False]])
with self.cached_session():
check_dtypes(dtypes.int32, dtypes.int32)
check_dtypes(dtypes.int32, dtypes.int64)
check_dtypes(dtypes.int64, dtypes.int32)
check_dtypes(dtypes.int64, dtypes.int64)
class ConcatSliceResourceTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def testConcatSlice(self):
r1 = test_ops.stub_resource_handle_op(container="a", shared_name="b")
r2 = test_ops.stub_resource_handle_op(container="a", shared_name="c")
c = array_ops.stack([r1, r2])
s = array_ops.strided_slice(c, [1], [2])
self.evaluate(test_ops.resource_create_op(s))
with self.assertRaises(errors.AlreadyExistsError):
self.evaluate(test_ops.resource_create_op(r2))
class IdentityTest(test_util.TensorFlowTestCase):
@test_util.run_gpu_only
def testEagerIdentity(self):
with context.eager_mode():
def _test(x, y, device):
self.assertAllEqual(x.numpy(), y.numpy())
self.assertTrue(device in y.device.lower())
with test_util.force_gpu():
a = constant_op.constant([[2], [3]], dtype=dtypes.float32)
with test_util.force_gpu():
b = array_ops.identity(a)
_test(a, b, "gpu")
with test_util.force_cpu():
c = array_ops.identity(b)
_test(b, c, "cpu")
with test_util.force_cpu():
d = array_ops.identity(c)
_test(c, d, "cpu")
with test_util.force_gpu():
e = array_ops.identity(d)
_test(d, e, "gpu")
class PadTest(test_util.TensorFlowTestCase):
def testEager(self):
with context.eager_mode():
t = constant_op.constant([[1, 2, 3], [4, 5, 6]])
paddings = constant_op.constant([[
1,
1,
], [2, 2]])
padded = array_ops.pad(t, paddings, "CONSTANT")
self.assertAllEqual(padded.numpy(),
[[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 3, 0, 0],
[0, 0, 4, 5, 6, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
class InvertPermutationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testInvertPermutation(self):
for dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(use_gpu=True):
x = constant_op.constant([3, 4, 0, 2, 1], dtype=dtype)
y = array_ops.invert_permutation(x)
self.assertAllEqual(y.get_shape(), [5])
self.assertAllEqual(y.eval(), [2, 4, 3, 0, 1])
class UnravelIndexTest(test_util.TensorFlowTestCase):
# TODO(b/73086570): Reenable test.
@unittest.skip("Test does not pass internally.")
def testUnravelIndex(self):
with self.cached_session():
for dtype in [dtypes.int32, dtypes.int64]:
indices_1 = constant_op.constant(1621, dtype=dtype)
dims_1 = constant_op.constant([6, 7, 8, 9], dtype=dtype)
out_1 = array_ops.unravel_index(indices_1, dims_1)
self.assertAllEqual(out_1.eval(), [3, 1, 4, 1])
indices_2 = constant_op.constant([1621], dtype=dtype)
dims_2 = constant_op.constant([6, 7, 8, 9], dtype=dtype)
out_2 = array_ops.unravel_index(indices_2, dims_2)
self.assertAllEqual(out_2.eval(), [[3], [1], [4], [1]])
indices_3 = constant_op.constant([22, 41, 37], dtype=dtype)
dims_3 = constant_op.constant([7, 6], dtype=dtype)
out_3 = array_ops.unravel_index(indices_3, dims_3)
self.assertAllEqual(out_3.eval(), [[3, 6, 6], [4, 5, 1]])
class GuaranteeConstOpTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testSimple(self):
with self.cached_session():
a = array_ops.constant(10)
guarantee_a = array_ops.guarantee_const(a)
self.assertEqual(10, guarantee_a.eval())
@test_util.run_deprecated_v1
def testVariables(self):
with self.cached_session() as sess:
for use_resource in [False, True]:
a = variable_scope.get_variable(
"var_{}".format(use_resource), [],
initializer=init_ops.constant_initializer(10.0),
use_resource=use_resource)
guarantee_a = array_ops.guarantee_const(a)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(10.0, guarantee_a.eval())
@test_util.run_deprecated_v1
def testResourceRejection(self):
with self.cached_session() as sess:
a = variable_scope.get_variable(
"resource_var", [],
initializer=init_ops.constant_initializer(10.0),
use_resource=True)
guarantee_a = array_ops.guarantee_const(a.handle)
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"cannot be a resource variable"):
guarantee_a.eval()
class SnapshotOpTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testInvertPermutation(self):
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
with self.cached_session(use_gpu=True):
x = constant_op.constant([0, 1, 2, 3], dtype=dtype)
y = gen_array_ops.snapshot(x)
self.assertAllEqual(y.eval(), [0, 1, 2, 3])
@test_util.run_all_in_graph_and_eager_modes
class SortedSearchTest(test_util.TensorFlowTestCase):
def testUpperBoundFloatHandCoded(self):
cdf = np.array([0, .2, .5, .6, .8, 1.], dtype=np.float32)
arr = np.array([.04, .99, .53, .58, .31, .01, .79, .8, .21],
dtype=np.float32)
result = np.searchsorted(cdf, arr, side="right")
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
self.assertAllEqual(result, tf_result)
def testUpperBoundFloatRandomNd(self):
dim_size = 7
for d in range(1, 5):
shape = [dim_size] * d
cdf = np.cumsum(
np.random.uniform(size=shape).astype(np.float32), axis=(d - 1))
arr = np.random.uniform(size=shape).astype(np.float32) * dim_size
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
cdf = cdf.reshape([-1, dim_size])
arr = arr.reshape([-1, dim_size])
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(dim_size**(d - 1)):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
result = result.reshape(shape)
self.assertAllEqual(result, tf_result)
def testUpperBoundFloatUneven(self):
batch_size = 7
size_search_array = 1000
size_values = 47
cdf = np.cumsum(
np.random.uniform(size=[batch_size, size_search_array]).astype(
np.float32),
axis=1)
arr = np.random.uniform(size=[batch_size, size_values]).astype(
np.float32) * size_search_array
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(batch_size):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
self.assertAllEqual(result, tf_result)
def testLowerBoundFloatHandCoded(self):
cdf = np.array([0, .2, .5, .6, .8, 1.], dtype=np.float32)
arr = np.array([.04, .99, .53, .58, .31, .01, .79, .8, .21],
dtype=np.float32)
result = np.searchsorted(cdf, arr, side="left")
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
self.assertAllEqual(result, tf_result)
def testLowerBoundFloatRandomNd(self):
dim_size = 7
for d in range(1, 5):
shape = [dim_size] * d
cdf = np.cumsum(
np.random.uniform(size=shape).astype(np.float32), axis=(d - 1))
arr = np.random.uniform(size=shape).astype(np.float32) * dim_size
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
cdf = cdf.reshape([-1, dim_size])
arr = arr.reshape([-1, dim_size])
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(dim_size**(d - 1)):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
result = result.reshape(shape)
self.assertAllEqual(result, tf_result)
def testLowerBoundFloatUneven(self):
batch_size = 7
size_search_array = 1000
size_values = 47
cdf = np.cumsum(
np.random.uniform(size=[batch_size, size_search_array]).astype(
np.float32),
axis=1)
arr = np.random.uniform(size=[batch_size, size_values]).astype(
np.float32) * size_search_array
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(batch_size):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
self.assertAllEqual(result, tf_result)
def testUpperBoundIntHandCoded(self):
cdf = np.array([0, 20, 50, 60, 80, 100], dtype=np.int64)
arr = np.array([4, 99, 53, 58, 31, 1, 79, 8, 21], dtype=np.int64)
result = np.searchsorted(cdf, arr, side="right")
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
self.assertAllEqual(result, tf_result)
def testUpperBoundIntRandomNd(self):
dim_size = 7
for d in range(1, 5):
shape = [dim_size] * d
cdf = np.cumsum(
np.random.randint(low=0, high=10, size=shape).astype(np.int64),
axis=(d - 1))
arr = np.random.randint(
low=0, high=10 * dim_size, size=shape).astype(np.int64)
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
cdf = cdf.reshape([-1, dim_size])
arr = arr.reshape([-1, dim_size])
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(dim_size**(d - 1)):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
result = result.reshape(shape)
self.assertAllEqual(result, tf_result)
def testUpperBoundIntUneven(self):
batch_size = 7
size_search_array = 1000
size_values = 47
cdf = np.cumsum(
np.random.randint(low=0, high=10,
size=[batch_size,
size_search_array]).astype(np.int64),
axis=1)
arr = np.random.randint(
low=0, high=10 * size_search_array, size=[batch_size,
size_values]).astype(np.int64)
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(batch_size):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
self.assertAllEqual(result, tf_result)
def testLowerBoundIntHandCoded(self):
cdf = np.array([0, 20, 50, 60, 80, 100], dtype=np.int64)
arr = np.array([4, 99, 53, 58, 31, 1, 79, 8, 21], dtype=np.int64)
result = np.searchsorted(cdf, arr, side="left")
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
self.assertAllEqual(result, tf_result)
def testLowerBoundIntRandomNd(self):
dim_size = 7
for d in range(1, 5):
shape = [dim_size] * d
cdf = np.cumsum(
np.random.randint(low=0, high=10, size=shape).astype(np.int64),
axis=(d - 1))
arr = np.random.randint(
low=0, high=10 * dim_size, size=shape).astype(np.int64)
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
cdf = cdf.reshape([-1, dim_size])
arr = arr.reshape([-1, dim_size])
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(dim_size**(d - 1)):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
result = result.reshape(shape)
self.assertAllEqual(result, tf_result)
def testLowerBoundIntUneven(self):
batch_size = 7
size_search_array = 1000
size_values = 47
cdf = np.cumsum(
np.random.randint(low=0, high=10,
size=[batch_size,
size_search_array]).astype(np.int64),
axis=1)
arr = np.random.randint(
low=0, high=10 * size_search_array, size=[batch_size,
size_values]).astype(np.int64)
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(batch_size):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
self.assertAllEqual(result, tf_result)
class BatchGatherNdTest(test_util.TensorFlowTestCase):
def testShapesMatch(self):
"""Tests for various different shape combinations."""
shapes = []
# params_shape, indices_shape, batch_dims
shapes.append(((2, 2, 2), (2, 1), 1),)
shapes.append(((2, 2, 2), (2, 2), 1),)
shapes.append(((2, 2, 2), (2, 3), 0),)
shapes.append(((2, 2, 2), (3,), 0),)
shapes.append(((2, 2, 2), (1,), 0),)
shapes.append(((2, 2, 3, 2), (2, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 1), 1),)
shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
for params_shape, indices_shape, batch_dims in shapes:
params = constant_op.constant(1.0, shape=(params_shape))
indices = constant_op.constant(
1, shape=(indices_shape), dtype=dtypes.int32)
out = array_ops.batch_gather_nd(
params=params, indices=indices, batch_dims=batch_dims)
ndims_params = len(params_shape) - batch_dims
ndims_rows = ndims_params - indices_shape[-1]
expected_out_shape = indices_shape[:-1]
if ndims_rows > 0:
expected_out_shape += params_shape[-ndims_rows:]
self.assertSequenceEqual(out.shape, expected_out_shape)
def testReducesToGatherNDWhenBatchDimIsZero(self):
"""Confirms setting batch_dims to zero reduces to tf.gather_nd."""
params = constant_op.constant(np.random.uniform(0.0, 1.0, size=(7, 8, 9)))
indices_shapes = []
indices_shapes.append((1,))
indices_shapes.append((3, 1))
indices_shapes.append((3, 3, 1))
indices_shapes.append((2,))
indices_shapes.append((3, 2))
indices_shapes.append((3, 3, 2))
indices_shapes.append((3,))
indices_shapes.append((3, 3))
indices_shapes.append((3, 3, 3))
for indices_shape in indices_shapes:
indices = np.random.randint(0, 7, size=indices_shape)
gather_nd_result = gen_array_ops.gather_nd(params, indices)
batch_gather_nd_result = array_ops.batch_gather_nd(
params=params, indices=indices, batch_dims=0)
self.assertAllEqual(gather_nd_result, batch_gather_nd_result)
def testSameResultAsMapFn(self):
"""Compares results with gather_nd called on every element with map_fn."""
shapes = []
# params_shape, indices_shape, batch_dims
shapes.append(((2, 2, 2), (2, 1), 1),)
shapes.append(((2, 2, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 1), 1),)
shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
for params_shape, indices_shape, batch_dims in shapes:
params = constant_op.constant(
np.random.uniform(0.0, 1.0, size=(params_shape)))
indices = np.random.randint(0, 2, size=indices_shape)
batch_gather_nd_result = array_ops.batch_gather_nd(
params=params, indices=indices, batch_dims=batch_dims)
if batch_dims > 1:
params = array_ops.reshape(
params, shape=[-1] + list(params_shape[batch_dims:]))
indices = array_ops.reshape(
indices, shape=[-1] + list(indices_shape[batch_dims:]))
map_fn_gather_nd_result = map_fn.map_fn(
fn=self._map_fn_body, elems=(params, indices), dtype=dtypes.float64)
if batch_dims > 1:
out_shape = map_fn_gather_nd_result.shape.as_list()
out_shape = list(params_shape[:batch_dims]) + out_shape[1:]
map_fn_gather_nd_result = array_ops.reshape(
map_fn_gather_nd_result, shape=out_shape)
self.assertAllEqual(map_fn_gather_nd_result, batch_gather_nd_result)
def _map_fn_body(self, elems):
return gen_array_ops.gather_nd(elems[0], elems[1])
def testBatchDimsAsTensor(self):
"""Tests Tensor batch_dims as input works as intended."""
shapes = []
# params_shape, indices_shape, batch_dims
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 0),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 1),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
for params_shape, indices_shape, batch_dims in shapes:
params = constant_op.constant(
np.random.uniform(0.0, 1.0, size=(params_shape)))
indices = np.random.randint(0, 2, size=indices_shape)
batch_gather_nd_result = array_ops.gather_nd(
params=params, indices=indices, batch_dims=batch_dims)
batch_dims_tensor = constant_op.constant([batch_dims])
batch_gather_nd_tensor_batch_dims_result = array_ops.gather_nd(
params=params, indices=indices, batch_dims=batch_dims_tensor)
self.assertAllEqual(batch_gather_nd_tensor_batch_dims_result,
batch_gather_nd_result)
def testInvalidBatchDimsRaisesException(self):
"""Tests whether invalid batch_dims raise expected exceptions."""
params = constant_op.constant(
np.random.uniform(0.0, 1.0, size=(3, 2, 2, 3, 4)))
indices = np.random.randint(0, 2, size=(3, 2, 3))
with self.assertRaises(TypeError):
array_ops.batch_gather_nd(
params=params,
indices=indices,
batch_dims=constant_op.constant((0, 1)))
with self.assertRaises(ValueError):
array_ops.batch_gather_nd(params=params, indices=indices, batch_dims=-1)
with self.assertRaises(ValueError):
array_ops.batch_gather_nd(params=params, indices=indices, batch_dims=4)
@test_util.run_deprecated_v1
def testNoneBatchDimensions(self):
"""Tests gather_nd works with None dimensions."""
shapes = []
# params_shape, indices_shape, batch_dims
shapes.append(((2, 2, 2), (2, 1), 1),)
shapes.append(((2, 2, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 1), 1),)
shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
for params_shape, indices_shape, batch_dims in shapes:
params_ph_shape = list(params_shape)
indices_ph_shape = list(indices_shape)
for i in range(batch_dims):
params_ph_shape[i] = None
indices_ph_shape[i] = None
params = array_ops.placeholder(dtypes.float32, shape=params_ph_shape)
indices = array_ops.placeholder(dtypes.int32, shape=indices_ph_shape)
out = array_ops.batch_gather_nd(
params=params, indices=indices, batch_dims=batch_dims)
with self.cached_session() as sess:
params_val = np.ones(dtype=np.float32, shape=params_shape)
indices_val = np.ones(dtype=np.int32, shape=indices_shape)
res = sess.run(
out, feed_dict={
params: params_val,
indices: indices_val
})
row_ndims = len(params_shape) - batch_dims - indices_shape[-1]
expected_out_shape = indices_shape[:-1]
if row_ndims > 0:
expected_out_shape += params_shape[-row_ndims:]
self.assertSequenceEqual(res.shape, expected_out_shape)
@test_util.run_deprecated_v1
def testUnknownIndices(self):
"""Tests whether indices with unknown rank works correctly."""
params = constant_op.constant(((0, 1, 2),))
indices = array_ops.placeholder(dtypes.int32)
gather_nd_t = array_ops.gather_nd(params, indices, batch_dims=1)
shape = gather_nd_t.get_shape()
self.assertEqual(None, shape.ndims)
self.assertEqual(None, tensor_shape.dimension_value(shape[0]))
if __name__ == "__main__":
test_lib.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/array_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.control_flow_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.platform import test
class ControlFlowUtilTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testIsSwitch(self):
switch_false, _ = control_flow_ops.switch(1, True)
switch = switch_false.op
self.assertTrue(control_flow_util.IsSwitch(switch))
ref_switch_false, _ = control_flow_ops.ref_switch(test_ops.ref_output(),
True)
ref_switch = ref_switch_false.op
self.assertTrue(control_flow_util.IsSwitch(ref_switch))
self.assertFalse(control_flow_util.IsSwitch(test_ops.int_output().op))
@test_util.run_v1_only("b/120545219")
def testIsLoopEnter(self):
enter = gen_control_flow_ops.enter(1, frame_name="name").op
self.assertTrue(control_flow_util.IsLoopEnter(enter))
self.assertFalse(control_flow_util.IsLoopConstantEnter(enter))
ref_enter = gen_control_flow_ops.ref_enter(test_ops.ref_output(),
frame_name="name").op
self.assertTrue(control_flow_util.IsLoopEnter(ref_enter))
self.assertFalse(control_flow_util.IsLoopConstantEnter(ref_enter))
const_enter = gen_control_flow_ops.enter(1, frame_name="name",
is_constant=True).op
self.assertTrue(control_flow_util.IsLoopEnter(const_enter))
self.assertTrue(control_flow_util.IsLoopConstantEnter(const_enter))
self.assertFalse(control_flow_util.IsLoopEnter(test_ops.int_output().op))
@test_util.run_v1_only("b/120545219")
def testIsLoopExit(self):
exit_op = control_flow_ops.exit(1).op
self.assertTrue(control_flow_util.IsLoopExit(exit_op))
ref_exit = control_flow_ops.exit(test_ops.ref_output()).op
self.assertTrue(control_flow_util.IsLoopExit(ref_exit))
self.assertFalse(control_flow_util.IsLoopExit(test_ops.int_output().op))
def build_test_graph(self):
g = ops.Graph()
with g.as_default():
def while_loop(x):
def b(x):
with ops.name_scope("NestedCond"):
return control_flow_ops.cond(
math_ops.less(x, 100), lambda: math_ops.add(x, 1),
lambda: math_ops.add(x, 2))
c = lambda x: math_ops.less(x, 10000)
with ops.name_scope("OuterWhile"):
return control_flow_ops.while_loop(c, b, [x])
x = array_ops.placeholder(dtypes.int32)
with ops.name_scope("OuterCond"):
control_flow_ops.cond(
math_ops.less(x, 1000), lambda: while_loop(x),
lambda: math_ops.add(x, 2))
return g
def testIsCondSwitch(self):
g = self.build_test_graph()
cond_switch = [
"OuterCond/cond/Switch",
"OuterCond/cond/OuterWhile/while/Switch",
"OuterCond/cond/OuterWhile/while/NestedCond/cond/Switch",
"OuterCond/cond/OuterWhile/while/NestedCond/cond/Add/Switch",
"OuterCond/cond/OuterWhile/while/NestedCond/cond/Add_1/Switch",
"OuterCond/cond/Add/Switch",
]
for n in g.get_operations():
if control_flow_util.IsSwitch(n):
self.assertTrue(
control_flow_util.IsCondSwitch(n) != control_flow_util.IsLoopSwitch(
n))
if n.name in cond_switch:
self.assertTrue(control_flow_util.IsSwitch(n))
self.assertTrue(
control_flow_util.IsCondSwitch(n),
msg="Mismatch for {}".format(n.name))
self.assertFalse(
control_flow_util.IsLoopSwitch(n),
msg="Mismatch for {}".format(n.name))
else:
self.assertFalse(
control_flow_util.IsCondSwitch(n),
msg="Mismatch for {}".format(n.name))
def testIsLoopSwitch(self):
g = self.build_test_graph()
loop_switch = ["OuterCond/cond/OuterWhile/while/Switch_1"]
for n in g.get_operations():
if control_flow_util.IsSwitch(n):
self.assertTrue(
control_flow_util.IsCondSwitch(n) != control_flow_util.IsLoopSwitch(
n))
if n.name in loop_switch:
self.assertTrue(control_flow_util.IsSwitch(n))
self.assertFalse(
control_flow_util.IsCondSwitch(n),
msg="Mismatch for {}".format(n.name))
self.assertTrue(
control_flow_util.IsLoopSwitch(n),
msg="Mismatch for {}".format(n.name))
else:
self.assertFalse(
control_flow_util.IsLoopSwitch(n),
msg="Mismatch for {}".format(n.name))
def testIsCondMerge(self):
g = self.build_test_graph()
cond_merges = [
"OuterCond/cond/OuterWhile/while/NestedCond/cond/Merge",
"OuterCond/cond/Merge"
]
for n in g.get_operations():
if n.name in cond_merges:
self.assertTrue(control_flow_util.IsMerge(n))
self.assertTrue(control_flow_util.IsCondMerge(n))
self.assertFalse(control_flow_util.IsLoopMerge(n))
else:
self.assertFalse(control_flow_util.IsCondMerge(n))
self.assertTrue(not control_flow_util.IsMerge(n) or
control_flow_util.IsLoopMerge(n))
def testIsLoopMerge(self):
g = self.build_test_graph()
loop_merges = [
"OuterCond/cond/OuterWhile/while/Merge",
]
for n in g.get_operations():
if n.name in loop_merges:
self.assertTrue(control_flow_util.IsMerge(n))
self.assertFalse(control_flow_util.IsCondMerge(n))
self.assertTrue(control_flow_util.IsLoopMerge(n))
else:
self.assertFalse(control_flow_util.IsLoopMerge(n))
self.assertTrue(not control_flow_util.IsMerge(n) or
control_flow_util.IsCondMerge(n))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/control_flow_util_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.compare_and_bitpack_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class CompareAndBitpackTest(test.TestCase):
def _testCompareAndBitpack(self,
x, threshold,
truth,
expected_err_re=None):
ans = math_ops.compare_and_bitpack(x, threshold)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertShapeEqual(truth, ans)
self.assertAllEqual(tf_ans, truth)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBasic(self, dtype):
rows = 371
cols = 294
x = np.random.randn(rows, cols * 8)
if dtype == np.bool:
x = x > 0
else:
x = x.astype(dtype)
threshold = dtype(0)
# np.packbits flattens the tensor, so we reshape it back to the
# expected dimensions.
truth = np.packbits(x > threshold).reshape(rows, cols)
self._testCompareAndBitpack(x, threshold, truth)
def testBasicFloat32(self):
self._testBasic(np.float32)
def testBasicFloat64(self):
self._testBasic(np.float64)
def testBasicFloat16(self):
self._testBasic(np.float16)
def testBasicBool(self):
self._testBasic(np.bool)
def testBasicInt8(self):
self._testBasic(np.int8)
def testBasicInt16(self):
self._testBasic(np.int16)
def testBasicInt32(self):
self._testBasic(np.int32)
def testBasicInt64(self):
self._testBasic(np.int64)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/compare_and_bitpack_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Test cases shared by MatrixDiagV2, MatrixDiagPartV2, and MatrixSetDiagV2.
def square_cases():
# pyformat: disable
mat = np.array([[[1, 2, 3, 4, 5],
[6, 7, 8, 9, 1],
[3, 4, 5, 6, 7],
[8, 9, 1, 2, 3],
[4, 5, 6, 7, 8]],
[[9, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 1],
[2, 3, 4, 5, 6]]])
tests = dict()
tests[(-1, -1)] = (np.array([[6, 4, 1, 7],
[5, 2, 8, 5]]),
np.array([[[0, 0, 0, 0, 0],
[6, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 7, 0]],
[[0, 0, 0, 0, 0],
[5, 0, 0, 0, 0],
[0, 2, 0, 0, 0],
[0, 0, 8, 0, 0],
[0, 0, 0, 5, 0]]]))
tests[(-4, -3)] = (np.array([[[8, 5],
[4, 0]],
[[6, 3],
[2, 0]]]),
np.array([[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[8, 0, 0, 0, 0],
[4, 5, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[6, 0, 0, 0, 0],
[2, 3, 0, 0, 0]]]))
tests[(-2, 1)] = (np.array([[[2, 8, 6, 3, 0],
[1, 7, 5, 2, 8],
[6, 4, 1, 7, 0],
[3, 9, 6, 0, 0]],
[[1, 7, 4, 1, 0],
[9, 6, 3, 9, 6],
[5, 2, 8, 5, 0],
[1, 7, 4, 0, 0]]]),
np.array([[[1, 2, 0, 0, 0],
[6, 7, 8, 0, 0],
[3, 4, 5, 6, 0],
[0, 9, 1, 2, 3],
[0, 0, 6, 7, 8]],
[[9, 1, 0, 0, 0],
[5, 6, 7, 0, 0],
[1, 2, 3, 4, 0],
[0, 7, 8, 9, 1],
[0, 0, 4, 5, 6]]]))
tests[(2, 4)] = (np.array([[[5, 0, 0],
[4, 1, 0],
[3, 9, 7]],
[[4, 0, 0],
[3, 9, 0],
[2, 8, 5]]]),
np.array([[[0, 0, 3, 4, 5],
[0, 0, 0, 9, 1],
[0, 0, 0, 0, 7],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 2, 3, 4],
[0, 0, 0, 8, 9],
[0, 0, 0, 0, 5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]]))
# pyformat: enable
return (mat, tests)
def tall_cases():
# pyformat: disable
mat = np.array([[[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[9, 8, 7],
[6, 5, 4]],
[[3, 2, 1],
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[9, 8, 7]]])
tests = dict()
tests[(0, 0)] = (np.array([[1, 5, 9],
[3, 2, 6]]),
np.array([[[1, 0, 0],
[0, 5, 0],
[0, 0, 9],
[0, 0, 0]],
[[3, 0, 0],
[0, 2, 0],
[0, 0, 6],
[0, 0, 0]]]))
tests[(-4, -3)] = (np.array([[[9, 5],
[6, 0]],
[[7, 8],
[9, 0]]]),
np.array([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[9, 0, 0],
[6, 5, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[7, 0, 0],
[9, 8, 0]]]))
tests[(-2, -1)] = (np.array([[[4, 8, 7],
[7, 8, 4]],
[[1, 5, 9],
[4, 8, 7]]]),
np.array([[[0, 0, 0],
[4, 0, 0],
[7, 8, 0],
[0, 8, 7],
[0, 0, 4]],
[[0, 0, 0],
[1, 0, 0],
[4, 5, 0],
[0, 8, 9],
[0, 0, 7]]]))
tests[(-2, 1)] = (np.array([[[2, 6, 0],
[1, 5, 9],
[4, 8, 7],
[7, 8, 4]],
[[2, 3, 0],
[3, 2, 6],
[1, 5, 9],
[4, 8, 7]]]),
np.array([[[1, 2, 0],
[4, 5, 6],
[7, 8, 9],
[0, 8, 7],
[0, 0, 4]],
[[3, 2, 0],
[1, 2, 3],
[4, 5, 6],
[0, 8, 9],
[0, 0, 7]]]))
tests[(1, 2)] = (np.array([[[3, 0],
[2, 6]],
[[1, 0],
[2, 3]]]),
np.array([[[0, 2, 3],
[0, 0, 6],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 2, 1],
[0, 0, 3],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]]))
# pyformat: enable
return (mat, tests)
def fat_cases():
# pyformat: disable
mat = np.array([[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 1, 2, 3]],
[[4, 5, 6, 7],
[8, 9, 1, 2],
[3, 4, 5, 6]]])
tests = dict()
tests[(2, 2)] = (np.array([[3, 8],
[6, 2]]),
np.array([[[0, 0, 3, 0],
[0, 0, 0, 8],
[0, 0, 0, 0]],
[[0, 0, 6, 0],
[0, 0, 0, 2],
[0, 0, 0, 0]]]))
tests[(-2, 0)] = (np.array([[[1, 6, 2],
[5, 1, 0],
[9, 0, 0]],
[[4, 9, 5],
[8, 4, 0],
[3, 0, 0]]]),
np.array([[[1, 0, 0, 0],
[5, 6, 0, 0],
[9, 1, 2, 0]],
[[4, 0, 0, 0],
[8, 9, 0, 0],
[3, 4, 5, 0]]]))
tests[(-1, 1)] = (np.array([[[2, 7, 3],
[1, 6, 2],
[5, 1, 0]],
[[5, 1, 6],
[4, 9, 5],
[8, 4, 0]]]),
np.array([[[1, 2, 0, 0],
[5, 6, 7, 0],
[0, 1, 2, 3]],
[[4, 5, 0, 0],
[8, 9, 1, 0],
[0, 4, 5, 6]]]))
tests[(0, 3)] = (np.array([[[4, 0, 0],
[3, 8, 0],
[2, 7, 3],
[1, 6, 2]],
[[7, 0, 0],
[6, 2, 0],
[5, 1, 6],
[4, 9, 5]]]),
np.array([[[1, 2, 3, 4],
[0, 6, 7, 8],
[0, 0, 2, 3]],
[[4, 5, 6, 7],
[0, 9, 1, 2],
[0, 0, 5, 6]]]))
# pyformat: enable
return (mat, tests)
class MatrixDiagTest(test.TestCase):
def _moreCases(self):
# Diagonal bands.
# pyformat: disable
vecs = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)
[5, 6, 7, 8],
[9, 8, 7, 6]],
[[5, 4, 3, 2],
[1, 2, 3, 4],
[5, 6, 7, 8]]])
tests = dict()
tests[(-3, -1)] = (vecs,
np.array([[[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[5, 2, 0, 0, 0],
[9, 6, 3, 0, 0],
[0, 8, 7, 4, 0]],
[[0, 0, 0, 0, 0],
[5, 0, 0, 0, 0],
[1, 4, 0, 0, 0],
[5, 2, 3, 0, 0],
[0, 6, 3, 2, 0]]]))
tests[(-1, 1)] = (vecs,
np.array([[[5, 1, 0, 0],
[9, 6, 2, 0],
[0, 8, 7, 3],
[0, 0, 7, 8]],
[[1, 5, 0, 0],
[5, 2, 4, 0],
[0, 6, 3, 3],
[0, 0, 7, 4]]]))
tests[(2, 4)] = (vecs,
np.array([[[0, 0, 9, 5, 1, 0],
[0, 0, 0, 8, 6, 2],
[0, 0, 0, 0, 7, 7],
[0, 0, 0, 0, 0, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
[[0, 0, 5, 1, 5, 0],
[0, 0, 0, 6, 2, 4],
[0, 0, 0, 0, 7, 3],
[0, 0, 0, 0, 0, 8],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]]))
# pyformat: enable
return (None, tests)
@test_util.run_deprecated_v1
def testVector(self):
with self.session(use_gpu=True):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
v_diag = array_ops.matrix_diag(v)
self.assertEqual((3, 3), v_diag.get_shape())
self.assertAllEqual(v_diag.eval(), mat)
if compat.forward_compatible(2019, 7, 4):
# {Sub,Super}diagonals.
for offset in [1, -2, 5]:
mat = np.diag(v, offset)
v_diag = array_ops.matrix_diag(v, k=offset)
self.assertEqual(mat.shape, v_diag.get_shape())
self.assertAllEqual(v_diag.eval(), mat)
# Diagonal bands.
for _, tests in [self._moreCases(), square_cases()]:
for diags, (vecs, solution) in tests.items():
v_diags = array_ops.matrix_diag(vecs[0], k=diags)
self.assertEqual(v_diags.get_shape(), solution[0].shape)
self.assertAllEqual(v_diags.eval(), solution[0])
def _testBatchVector(self, dtype):
with self.cached_session(use_gpu=True):
v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]]).astype(dtype)
v_batch_diag = array_ops.matrix_diag(v_batch)
self.assertEqual((2, 3, 3), v_batch_diag.get_shape())
self.assertAllEqual(v_batch_diag.eval(), mat_batch)
if compat.forward_compatible(2019, 7, 4):
# {Sub,Super}diagonals.
for offset in [1, -2, 5]:
v_batch_diag = array_ops.matrix_diag(v_batch, k=offset)
mats = [
np.diag(v_batch[i], offset) for i in range(0, v_batch.shape[0])
]
mat_batch = np.stack(mats, axis=0)
self.assertEqual(mat_batch.shape, v_batch_diag.get_shape())
self.assertAllEqual(v_batch_diag.eval(), mat_batch)
# Diagonal bands with padding.
for padding in [0, 555, -11]:
for _, tests in [self._moreCases(), square_cases()]:
for diags, (vecs, solution) in tests.items():
v_diags = array_ops.matrix_diag(
vecs.astype(dtype), k=diags, padding=padding)
mask = solution == 0
solution = (solution + padding * mask).astype(dtype)
self.assertEqual(v_diags.get_shape(), solution.shape)
self.assertAllEqual(v_diags.eval(), solution)
@test_util.run_deprecated_v1
def testBatchVector(self):
self._testBatchVector(np.float32)
self._testBatchVector(np.float64)
self._testBatchVector(np.int32)
self._testBatchVector(np.int64)
self._testBatchVector(np.bool)
@test_util.run_deprecated_v1
def testRectangularBatch(self):
if compat.forward_compatible(2019, 7, 4):
with self.cached_session(use_gpu=True):
# Stores expected num_rows and num_cols (when the other is given).
# expected[(d_lower, d_upper)] = (expected_num_rows, expected_num_cols)
test_list = list()
# Square cases:
expected = {
(-1, -1): (5, 4),
(-4, -3): (5, 2),
(-2, 1): (5, 5),
(2, 4): (3, 5),
}
test_list.append((expected, square_cases()))
# More cases:
expected = {(-3, -1): (5, 4), (-1, 1): (4, 4), (2, 4): (4, 6)}
test_list.append((expected, self._moreCases()))
# Tall cases
expected = {
(0, 0): (3, 3),
(-4, -3): (5, 2),
(-2, -1): (4, 3),
(-2, 1): (3, 3),
(1, 2): (2, 3)
}
test_list.append((expected, tall_cases()))
# Fat cases
expected = {
(2, 2): (2, 4),
(-2, 0): (3, 3),
(-1, 1): (3, 3),
(0, 3): (3, 3)
}
test_list.append((expected, fat_cases()))
for padding in [0, 555, -11]:
# Giving both num_rows and num_cols
for _, tests in [tall_cases(), fat_cases()]:
for diags, (vecs, solution) in tests.items():
v_diags = array_ops.matrix_diag(
vecs,
k=diags,
num_rows=solution.shape[-2],
num_cols=solution.shape[-1],
padding=padding)
mask = solution == 0
solution = solution + padding * mask
self.assertEqual(v_diags.get_shape(), solution.shape)
self.assertAllEqual(v_diags.eval(), solution)
# Giving just num_rows.
for expected, (_, tests) in test_list:
for diags, (_, new_num_cols) in expected.items():
vecs, solution = tests[diags]
solution = solution.take(indices=range(new_num_cols), axis=-1)
v_diags = array_ops.matrix_diag(
vecs, k=diags, num_rows=solution.shape[-2], padding=padding)
mask = solution == 0
solution = solution + padding * mask
self.assertEqual(v_diags.get_shape(), solution.shape)
self.assertAllEqual(v_diags.eval(), solution)
# Giving just num_cols.
for expected, (_, tests) in test_list:
for diags, (new_num_rows, _) in expected.items():
vecs, solution = tests[diags]
solution = solution.take(indices=range(new_num_rows), axis=-2)
v_diags = array_ops.matrix_diag(
vecs, k=diags, num_cols=solution.shape[-1], padding=padding)
mask = solution == 0
solution = solution + padding * mask
self.assertEqual(v_diags.get_shape(), solution.shape)
self.assertAllEqual(v_diags.eval(), solution)
@test_util.run_deprecated_v1
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
array_ops.matrix_diag(0)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testInvalidShapeAtEval(self):
with self.session(use_gpu=True):
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("diagonal must be at least 1-dim"):
array_ops.matrix_diag(v).eval(feed_dict={v: 0.0})
@test_util.run_deprecated_v1
def testGrad(self):
shapes = ((3,), (7, 4))
with self.session(use_gpu=True):
for shape in shapes:
x = constant_op.constant(np.random.rand(*shape), np.float32)
y = array_ops.matrix_diag(x)
error = gradient_checker.compute_gradient_error(x,
x.get_shape().as_list(),
y,
y.get_shape().as_list())
self.assertLess(error, 1e-4)
if compat.forward_compatible(2019, 7, 4):
# {Sub,super}diagonals/band.
tests = dict() # tests[shape] = (d_lower, d_upper)
tests[(3,)] = (-1, -1)
tests[(7, 3, 4)] = (-1, 1)
with self.session(use_gpu=True):
for shape, diags in tests.items():
x = constant_op.constant(np.random.rand(*shape), np.float32)
y = array_ops.matrix_diag(x, k=diags)
error = gradient_checker.compute_gradient_error(
x,
x.get_shape().as_list(), y,
y.get_shape().as_list())
self.assertLess(error, 1e-4)
class MatrixSetDiagTest(test.TestCase):
@test_util.run_deprecated_v1
def testSquare(self):
with self.session(use_gpu=True):
v = np.array([1.0, 2.0, 3.0])
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]])
mat_set_diag = np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0],
[1.0, 1.0, 3.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag, self.evaluate(output))
if compat.forward_compatible(2019, 7, 4):
# Diagonal bands.
_, tests = square_cases()
for diags, pair in tests.items():
vecs, banded_mat = pair
mask = banded_mat[0] == 0
input_mat = np.random.randint(10, size=mask.shape)
solution = input_mat * mask + banded_mat[0]
output = array_ops.matrix_set_diag(input_mat, vecs[0], k=diags)
self.assertEqual(output.get_shape(), solution.shape)
self.assertAllEqual(output.eval(), solution)
@test_util.run_deprecated_v1
def testRectangular(self):
with self.session(use_gpu=True):
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((2, 3), output.get_shape())
self.assertAllEqual(expected, self.evaluate(output))
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
expected = np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 2), output.get_shape())
self.assertAllEqual(expected, self.evaluate(output))
if compat.forward_compatible(2019, 7, 4):
# Diagonal bands.
for _, tests in [tall_cases(), fat_cases()]:
for diags, pair in tests.items():
vecs, banded_mat = pair
mask = banded_mat[0] == 0
input_mat = np.random.randint(10, size=mask.shape)
solution = input_mat * mask + banded_mat[0]
output = array_ops.matrix_set_diag(input_mat, vecs[0], k=diags)
self.assertEqual(output.get_shape(), solution.shape)
self.assertAllEqual(output.eval(), solution)
def _testSquareBatch(self, dtype):
with self.cached_session(use_gpu=True):
v_batch = np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]]).astype(dtype)
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0],
[2.0, 0.0, 6.0]]]).astype(dtype)
mat_set_diag_batch = np.array([[[-1.0, 0.0, 3.0], [0.0, 0.0, 0.0],
[1.0, 0.0, -3.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0],
[2.0, 0.0, -6.0]]]).astype(dtype)
output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, self.evaluate(output))
if compat.forward_compatible(2019, 7, 4):
# Diagonal bands.
_, tests = square_cases()
for diags, pair in tests.items():
vecs, banded_mat = pair
mask = banded_mat == 0
input_mat = np.random.randint(10, size=mask.shape).astype(dtype)
solution = (input_mat * mask + banded_mat).astype(dtype)
output = array_ops.matrix_set_diag(
input_mat, vecs.astype(dtype), k=diags)
self.assertEqual(output.get_shape(), solution.shape)
self.assertAllEqual(output.eval(), solution)
@test_util.run_deprecated_v1
def testSquareBatch(self):
self._testSquareBatch(np.float32)
self._testSquareBatch(np.float64)
self._testSquareBatch(np.int32)
self._testSquareBatch(np.int64)
self._testSquareBatch(np.bool)
@test_util.run_deprecated_v1
def testRectangularBatch(self):
with self.session(use_gpu=True):
v_batch = np.array([[-1.0, -2.0], [-4.0, -5.0]])
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]])
mat_set_diag_batch = np.array([[[-1.0, 0.0, 3.0], [0.0, -2.0, 0.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0]]])
output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 2, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, self.evaluate(output))
if compat.forward_compatible(2019, 7, 4):
# Diagonal bands.
for _, tests in [tall_cases(), fat_cases()]:
for diags, pair in tests.items():
vecs, banded_mat = pair
mask = banded_mat == 0
input_mat = np.random.randint(10, size=mask.shape)
solution = input_mat * mask + banded_mat
output = array_ops.matrix_set_diag(input_mat, vecs, k=diags)
self.assertEqual(output.get_shape(), solution.shape)
self.assertAllEqual(output.eval(), solution)
@test_util.run_deprecated_v1
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
array_ops.matrix_set_diag(0, [0])
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
array_ops.matrix_set_diag([[0]], 0)
# TODO(penporn): Un-skip the XLA test when XLA has MatrixSetDiagV2.
@test_util.run_deprecated_v1
@test_util.disable_xla("XLA op hasn't supported new features in V2, which"
"change the shape requirements.")
def testInvalidShapeAtEval(self):
with self.session(use_gpu=True):
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})
with self.assertRaisesOpError("diagonal must be at least 1-dim"):
array_ops.matrix_set_diag([[v]], v).eval(feed_dict={v: 0.0})
if compat.forward_compatible(2019, 7, 4):
d = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError(
"first dimensions of diagonal don't match"):
array_ops.matrix_set_diag(v, d).eval(feed_dict={
v: np.zeros((2, 3, 3)),
d: np.ones((2, 4))
})
@test_util.run_deprecated_v1
def testGrad(self):
shapes = ((3, 4, 4), (3, 3, 4), (3, 4, 3), (7, 4, 8, 8))
with self.session(use_gpu=True):
for shape in shapes:
x = constant_op.constant(
np.random.rand(*shape), dtype=dtypes_lib.float32)
diag_shape = shape[:-2] + (min(shape[-2:]),)
x_diag = constant_op.constant(
np.random.rand(*diag_shape), dtype=dtypes_lib.float32)
y = array_ops.matrix_set_diag(x, x_diag)
error_x = gradient_checker.compute_gradient_error(
x,
x.get_shape().as_list(), y,
y.get_shape().as_list())
self.assertLess(error_x, 1e-4)
error_x_diag = gradient_checker.compute_gradient_error(
x_diag,
x_diag.get_shape().as_list(), y,
y.get_shape().as_list())
self.assertLess(error_x_diag, 1e-4)
@test_util.run_deprecated_v1
def testGradWithNoShapeInformation(self):
with self.session(use_gpu=True) as sess:
v = array_ops.placeholder(dtype=dtypes_lib.float32)
mat = array_ops.placeholder(dtype=dtypes_lib.float32)
grad_input = array_ops.placeholder(dtype=dtypes_lib.float32)
output = array_ops.matrix_set_diag(mat, v)
grads = gradients_impl.gradients(output, [mat, v], grad_ys=grad_input)
grad_input_val = np.random.rand(3, 3).astype(np.float32)
grad_vals = sess.run(
grads,
feed_dict={
v: 2 * np.ones(3),
mat: np.ones((3, 3)),
grad_input: grad_input_val
})
self.assertAllEqual(np.diag(grad_input_val), grad_vals[1])
self.assertAllEqual(grad_input_val - np.diag(np.diag(grad_input_val)),
grad_vals[0])
class MatrixDiagPartTest(test.TestCase):
@test_util.run_deprecated_v1
def testSquare(self):
with self.session(use_gpu=True):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
mat_diag = array_ops.matrix_diag_part(mat)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag.eval(), v)
if compat.forward_compatible(2019, 7, 4):
for offset in [-2, 3]:
mat = np.diag(v, offset)
mat_diag = array_ops.matrix_diag_part(mat, k=offset)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag.eval(), v)
# Diagonal bands.
mat, tests = square_cases()
for diags, pair in tests.items():
solution, _ = pair
mat_diag = array_ops.matrix_diag_part(mat[0], k=diags)
self.assertEqual(mat_diag.get_shape(), solution[0].shape)
self.assertAllEqual(mat_diag.eval(), solution[0])
@test_util.run_deprecated_v1
def testRectangular(self):
with self.session(use_gpu=True):
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 5.0]))
mat = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 4.0]))
if compat.forward_compatible(2019, 7, 4):
# Diagonal bands.
for mat, tests in [tall_cases(), fat_cases()]:
for diags, pair in tests.items():
solution, _ = pair
mat_diag = array_ops.matrix_diag_part(mat[0], k=diags)
self.assertEqual(mat_diag.get_shape(), solution[0].shape)
self.assertAllEqual(mat_diag.eval(), solution[0])
def _testSquareBatch(self, dtype):
with self.cached_session(use_gpu=True):
v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]]).astype(dtype)
self.assertEqual(mat_batch.shape, (2, 3, 3))
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 3), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
if compat.forward_compatible(2019, 7, 4):
# Diagonal bands with padding.
mat, tests = square_cases()
for padding in [0, 555, -11]:
for diags, pair in tests.items():
solution, _ = pair
mat_batch_diag = array_ops.matrix_diag_part(
mat.astype(dtype), k=diags, padding=padding)
mask = solution == 0
solution = (solution + padding * mask).astype(dtype)
self.assertEqual(mat_batch_diag.get_shape(), solution.shape)
self.assertAllEqual(mat_batch_diag.eval(), solution)
@test_util.run_deprecated_v1
def testSquareBatch(self):
self._testSquareBatch(np.float32)
self._testSquareBatch(np.float64)
self._testSquareBatch(np.int32)
self._testSquareBatch(np.int64)
self._testSquareBatch(np.bool)
@test_util.run_deprecated_v1
def testRectangularBatch(self):
with self.session(use_gpu=True):
v_batch = np.array([[1.0, 2.0], [4.0, 5.0]])
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0]]])
self.assertEqual(mat_batch.shape, (2, 2, 3))
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 2), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
if compat.forward_compatible(2019, 7, 4):
# Diagonal bands with padding.
for padding in [0, 555, -11]:
for mat, tests in [tall_cases(), fat_cases()]:
for diags, pair in tests.items():
solution, _ = pair
mat_batch_diag = array_ops.matrix_diag_part(
mat, k=diags, padding=padding)
mask = solution == 0
solution = solution + padding * mask
self.assertEqual(mat_batch_diag.get_shape(), solution.shape)
self.assertAllEqual(mat_batch_diag.eval(), solution)
@test_util.run_deprecated_v1
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
array_ops.matrix_diag_part(0)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testInvalidShapeAtEval(self):
with self.session(use_gpu=True):
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0})
@test_util.run_deprecated_v1
def testGrad(self):
shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3))
with self.session(use_gpu=True):
for shape in shapes:
x = constant_op.constant(np.random.rand(*shape), dtype=np.float32)
y = array_ops.matrix_diag_part(x)
error = gradient_checker.compute_gradient_error(x,
x.get_shape().as_list(),
y,
y.get_shape().as_list())
self.assertLess(error, 1e-4)
class DiagTest(test.TestCase):
def _diagOp(self, diag, dtype, expected_ans, use_gpu):
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.diag(ops.convert_to_tensor(diag.astype(dtype)))
out = self.evaluate(tf_ans)
tf_ans_inv = array_ops.diag_part(expected_ans)
inv_out = self.evaluate(tf_ans_inv)
self.assertAllClose(out, expected_ans)
self.assertAllClose(inv_out, diag)
self.assertShapeEqual(expected_ans, tf_ans)
self.assertShapeEqual(diag, tf_ans_inv)
def diagOp(self, diag, dtype, expected_ans):
self._diagOp(diag, dtype, expected_ans, False)
self._diagOp(diag, dtype, expected_ans, True)
def testEmptyTensor(self):
x = np.array([])
expected_ans = np.empty([0, 0])
self.diagOp(x, np.int32, expected_ans)
def testRankOneIntTensor(self):
x = np.array([1, 2, 3])
expected_ans = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
self.diagOp(x, np.int32, expected_ans)
self.diagOp(x, np.int64, expected_ans)
def testRankOneFloatTensor(self):
x = np.array([1.1, 2.2, 3.3])
expected_ans = np.array([[1.1, 0, 0], [0, 2.2, 0], [0, 0, 3.3]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankOneComplexTensor(self):
for dtype in [np.complex64, np.complex128]:
x = np.array([1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j], dtype=dtype)
expected_ans = np.array(
[[1.1 + 1.1j, 0 + 0j, 0 + 0j], [0 + 0j, 2.2 + 2.2j, 0 + 0j],
[0 + 0j, 0 + 0j, 3.3 + 3.3j]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
def testRankTwoIntTensor(self):
x = np.array([[1, 2, 3], [4, 5, 6]])
expected_ans = np.array([[[[1, 0, 0], [0, 0, 0]], [[0, 2, 0], [0, 0, 0]],
[[0, 0, 3], [0, 0, 0]]],
[[[0, 0, 0], [4, 0, 0]], [[0, 0, 0], [0, 5, 0]],
[[0, 0, 0], [0, 0, 6]]]])
self.diagOp(x, np.int32, expected_ans)
self.diagOp(x, np.int64, expected_ans)
def testRankTwoFloatTensor(self):
x = np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]])
expected_ans = np.array(
[[[[1.1, 0, 0], [0, 0, 0]], [[0, 2.2, 0], [0, 0, 0]],
[[0, 0, 3.3], [0, 0, 0]]], [[[0, 0, 0], [4.4, 0, 0]],
[[0, 0, 0], [0, 5.5, 0]], [[0, 0, 0],
[0, 0, 6.6]]]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankTwoComplexTensor(self):
for dtype in [np.complex64, np.complex128]:
x = np.array(
[[1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j],
[4.4 + 4.4j, 5.5 + 5.5j, 6.6 + 6.6j]],
dtype=dtype)
expected_ans = np.array(
[[[[1.1 + 1.1j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]], [
[0 + 0j, 2.2 + 2.2j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]
], [[0 + 0j, 0 + 0j, 3.3 + 3.3j], [0 + 0j, 0 + 0j, 0 + 0j]]], [[
[0 + 0j, 0 + 0j, 0 + 0j], [4.4 + 4.4j, 0 + 0j, 0 + 0j]
], [[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 5.5 + 5.5j, 0 + 0j]
], [[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 6.6 + 6.6j]]]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
def testRankThreeFloatTensor(self):
x = np.array([[[1.1, 2.2], [3.3, 4.4]], [[5.5, 6.6], [7.7, 8.8]]])
expected_ans = np.array([[[[[[1.1, 0], [0, 0]], [[0, 0], [0, 0]]],
[[[0, 2.2], [0, 0]], [[0, 0], [0, 0]]]],
[[[[0, 0], [3.3, 0]], [[0, 0], [0, 0]]],
[[[0, 0], [0, 4.4]], [[0, 0], [0, 0]]]]],
[[[[[0, 0], [0, 0]], [[5.5, 0], [0, 0]]],
[[[0, 0], [0, 0]], [[0, 6.6], [0, 0]]]],
[[[[0, 0], [0, 0]], [[0, 0], [7.7, 0]]],
[[[0, 0], [0, 0]], [[0, 0], [0, 8.8]]]]]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankThreeComplexTensor(self):
for dtype in [np.complex64, np.complex128]:
x = np.array(
[[[1.1 + 1.1j, 2.2 + 2.2j], [3.3 + 3.3j, 4.4 + 4.4j]],
[[5.5 + 5.5j, 6.6 + 6.6j], [7.7 + 7.7j, 8.8 + 8.8j]]],
dtype=dtype)
expected_ans = np.array(
[[[[[[1.1 + 1.1j, 0 + 0j], [0 + 0j, 0 + 0j]], [[0 + 0j, 0 + 0j], [
0 + 0j, 0 + 0j
]]], [[[0 + 0j, 2.2 + 2.2j], [0 + 0j, 0 + 0j]], [[0 + 0j, 0 + 0j], [
0 + 0j, 0 + 0j
]]]], [[[[0 + 0j, 0 + 0j], [3.3 + 3.3j, 0 + 0j]], [[0 + 0j, 0 + 0j], [
0 + 0j, 0 + 0j
]]], [[[0 + 0j, 0 + 0j], [0 + 0j, 4.4 + 4.4j]], [[0 + 0j, 0 + 0j], [
0 + 0j, 0 + 0j
]]]]], [[[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], [
[5.5 + 5.5j, 0 + 0j], [0 + 0j, 0 + 0j]
]], [[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], [[0 + 0j, 6.6 + 6.6j], [
0 + 0j, 0 + 0j
]]]], [[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], [[0 + 0j, 0 + 0j], [
7.7 + 7.7j, 0 + 0j
]]], [[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 8.8 + 8.8j]]]]]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
def testRankFourNumberTensor(self):
for dtype in [np.float32, np.float64, np.int64, np.int32]:
# Input with shape [2, 1, 2, 3]
x = np.array(
[[[[1, 2, 3], [4, 5, 6]]], [[[7, 8, 9], [10, 11, 12]]]], dtype=dtype)
# Output with shape [2, 1, 2, 3, 2, 1, 2, 3]
expected_ans = np.array(
[[[[[[[[1, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]], [
[[[0, 2, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]
], [[[[0, 0, 3], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]]], [[
[[[0, 0, 0], [4, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]
], [[[[0, 0, 0], [0, 5, 0]]], [[[0, 0, 0], [0, 0, 0]]]], [
[[[0, 0, 0], [0, 0, 6]]], [[[0, 0, 0], [0, 0, 0]]]
]]]], [[[[[[[0, 0, 0], [0, 0, 0]]], [[[7, 0, 0], [0, 0, 0]]]], [
[[[0, 0, 0], [0, 0, 0]]], [[[0, 8, 0], [0, 0, 0]]]
], [[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 9], [0, 0, 0]]]]], [[
[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [10, 0, 0]]]
], [[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 11, 0]]]
], [[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 12]]]]]]]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
@test_util.run_deprecated_v1
def testInvalidRank(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
array_ops.diag(0.0)
class DiagPartOpTest(test.TestCase):
def setUp(self):
np.random.seed(0)
def _diagPartOp(self, tensor, dtype, expected_ans, use_gpu):
with self.cached_session(use_gpu=use_gpu):
tensor = ops.convert_to_tensor(tensor.astype(dtype))
tf_ans_inv = array_ops.diag_part(tensor)
inv_out = self.evaluate(tf_ans_inv)
self.assertAllClose(inv_out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans_inv)
def diagPartOp(self, tensor, dtype, expected_ans):
self._diagPartOp(tensor, dtype, expected_ans, False)
self._diagPartOp(tensor, dtype, expected_ans, True)
def testRankTwoFloatTensor(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankFourFloatTensorUnknownShape(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
for shape in None, (None, 3), (3, None):
with self.cached_session(use_gpu=False):
t = ops.convert_to_tensor(x.astype(np.float32))
t.set_shape(shape)
tf_ans = array_ops.diag_part(t)
out = self.evaluate(tf_ans)
self.assertAllClose(out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans)
def testRankFourFloatTensor(self):
x = np.random.rand(2, 3, 2, 3)
i = np.arange(2)[:, None]
j = np.arange(3)
expected_ans = x[i, j, i, j]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankSixFloatTensor(self):
x = np.random.rand(2, 2, 2, 2, 2, 2)
i = np.arange(2)[:, None, None]
j = np.arange(2)[:, None]
k = np.arange(2)
expected_ans = x[i, j, k, i, j, k]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankEightComplexTensor(self):
x = np.random.rand(2, 2, 2, 3, 2, 2, 2, 3)
i = np.arange(2)[:, None, None, None]
j = np.arange(2)[:, None, None]
k = np.arange(2)[:, None]
l = np.arange(3)
expected_ans = x[i, j, k, l, i, j, k, l]
self.diagPartOp(x, np.complex64, expected_ans)
self.diagPartOp(x, np.complex128, expected_ans)
@test_util.run_deprecated_v1
def testOddRank(self):
w = np.random.rand(2)
x = np.random.rand(2, 2, 2)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
with self.assertRaises(ValueError):
array_ops.diag_part(0.0)
@test_util.run_deprecated_v1
def testUnevenDimensions(self):
w = np.random.rand(2, 5)
x = np.random.rand(2, 1, 2, 3)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
class DiagGradOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testDiagGrad(self):
np.random.seed(0)
shapes = ((3,), (3, 3), (3, 3, 3))
dtypes = (dtypes_lib.float32, dtypes_lib.float64)
with self.session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)
y = array_ops.diag(x1)
error = gradient_checker.compute_gradient_error(
x1,
x1.get_shape().as_list(), y,
y.get_shape().as_list())
tf_logging.info("error = %f", error)
self.assertLess(error, 1e-4)
class DiagGradPartOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testDiagPartGrad(self):
np.random.seed(0)
shapes = ((3, 3), (3, 3, 3, 3))
dtypes = (dtypes_lib.float32, dtypes_lib.float64)
with self.session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)
y = array_ops.diag_part(x1)
error = gradient_checker.compute_gradient_error(
x1,
x1.get_shape().as_list(), y,
y.get_shape().as_list())
tf_logging.info("error = %f", error)
self.assertLess(error, 1e-4)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/diag_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Helpers for creating Example objects
example = example_pb2.Example
feature = feature_pb2.Feature
features = lambda d: feature_pb2.Features(feature=d)
bytes_feature = lambda v: feature(bytes_list=feature_pb2.BytesList(value=v))
int64_feature = lambda v: feature(int64_list=feature_pb2.Int64List(value=v))
float_feature = lambda v: feature(float_list=feature_pb2.FloatList(value=v))
# Helpers for creating SequenceExample objects
feature_list = lambda l: feature_pb2.FeatureList(feature=l)
feature_lists = lambda d: feature_pb2.FeatureLists(feature_list=d)
sequence_example = example_pb2.SequenceExample
def empty_sparse(dtype, shape=None):
if shape is None:
shape = [0]
return (np.empty(shape=(0, len(shape)), dtype=np.int64),
np.array([], dtype=dtype), np.array(shape, dtype=np.int64))
def flatten(list_of_lists):
"""Flatten one level of nesting."""
return itertools.chain.from_iterable(list_of_lists)
def flatten_values_tensors_or_sparse(tensors_list):
"""Flatten each SparseTensor object into 3 Tensors for session.run()."""
return list(
flatten([[v.indices, v.values, v.dense_shape] if isinstance(
v, sparse_tensor.SparseTensor) else [v] for v in tensors_list]))
def _compare_output_to_expected(tester, dict_tensors, expected_tensors,
flat_output):
tester.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))
i = 0 # Index into the flattened output of session.run()
for k, v in dict_tensors.items():
expected_v = expected_tensors[k]
tf_logging.info("Comparing key: %s", k)
if isinstance(v, sparse_tensor.SparseTensor):
# Three outputs for SparseTensor : indices, values, shape.
tester.assertEqual([k, len(expected_v)], [k, 3])
tester.assertAllEqual(expected_v[0], flat_output[i])
tester.assertAllEqual(expected_v[1], flat_output[i + 1])
tester.assertAllEqual(expected_v[2], flat_output[i + 2])
i += 3
else:
# One output for standard Tensor.
tester.assertAllEqual(expected_v, flat_output[i])
i += 1
class ParseExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.cached_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
out = parsing_ops.parse_single_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
return
else:
# Returns dict w/ Tensors and SparseTensors.
out = parsing_ops.parse_single_example(**kwargs)
# Also include a test with the example names specified to retain
# code coverage of the unfused version, and ensure that the two
# versions produce the same results.
out_with_example_name = parsing_ops.parse_single_example(
example_names="name", **kwargs)
for result_dict in [out, out_with_example_name]:
result = flatten_values_tensors_or_sparse(result_dict.values())
# Check values.
tf_result = self.evaluate(result)
_compare_output_to_expected(self, result_dict, expected_values,
tf_result)
for k, f in kwargs["features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(tuple(out[k].get_shape().as_list()), f.shape)
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(out[k].dense_shape.get_shape().as_list()), (1,))
@test_util.run_deprecated_v1
def testEmptySerializedWithAllDefaults(self):
sparse_name = "st_a"
a_name = "a"
b_name = "b"
c_name = "c:has_a_tricky_name"
a_default = [0, 42, 0]
b_default = np.random.rand(3, 3).astype(bytes)
c_default = np.random.rand(2).astype(np.float32)
expected_st_a = ( # indices, values, shape
np.empty((0, 1), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([0], dtype=np.int64)) # max_elems = 0
expected_output = {
sparse_name: expected_st_a,
a_name: np.array([a_default]),
b_name: np.array(b_default),
c_name: np.array(c_default),
}
self._test({
"serialized": ops.convert_to_tensor(""),
"features": {
sparse_name:
parsing_ops.VarLenFeature(dtypes.int64),
a_name:
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
b_name:
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
c_name:
parsing_ops.FixedLenFeature(
(2,), dtypes.float32, default_value=c_default),
}
}, expected_output)
def testEmptySerializedWithoutDefaultsShouldFail(self):
input_features = {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=[0, 42, 0]),
"b":
parsing_ops.FixedLenFeature(
(3, 3),
dtypes.string,
default_value=np.random.rand(3, 3).astype(bytes)),
# Feature "c" is missing a default, this gap will cause failure.
"c":
parsing_ops.FixedLenFeature(
(2,), dtype=dtypes.float32),
}
# Edge case where the key is there but the feature value is empty
original = example(features=features({"c": feature()}))
self._test(
{
"serialized": original.SerializeToString(),
"features": input_features,
},
expected_err=(errors_impl.OpError,
"Feature: c \\(data type: float\\) is required"))
# Standard case of missing key and value.
self._test(
{
"serialized": "",
"features": input_features,
},
expected_err=(errors_impl.OpError,
"Feature: c \\(data type: float\\) is required"))
def testDenseNotMatchingShapeShouldFail(self):
original = example(features=features({
"a": float_feature([-1, -1]),
}))
serialized = original.SerializeToString()
self._test(
{
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature((1, 3), dtypes.float32)
}
},
# TODO(mrry): Consider matching the `io.parse_example()` error message.
expected_err=(errors_impl.OpError, "Key: a."))
def testDenseDefaultNoShapeShouldFail(self):
original = example(features=features({
"a": float_feature([1, 1, 3]),
}))
serialized = original.SerializeToString()
self._test(
{
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature(None, dtypes.float32)
}
},
expected_err=(ValueError, "Missing shape for feature a"))
@test_util.run_deprecated_v1
def testSerializedContainingSparse(self):
original = [
example(features=features({
"st_c": float_feature([3, 4])
})),
example(features=features({
"st_c": float_feature([]), # empty float list
})),
example(features=features({
"st_d": feature(), # feature with nothing in it
})),
example(features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
}))
]
expected_outputs = [{
"st_c": (np.array([[0], [1]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32),
np.array([2], dtype=np.int64)),
"st_d":
empty_sparse(bytes)
}, {
"st_c": empty_sparse(np.float32),
"st_d": empty_sparse(bytes)
}, {
"st_c": empty_sparse(np.float32),
"st_d": empty_sparse(bytes)
}, {
"st_c": (np.array([[0], [1], [2]], dtype=np.int64),
np.array([1.0, 2.0, -1.0], dtype=np.float32),
np.array([3], dtype=np.int64)),
"st_d": (np.array([[0]], dtype=np.int64), np.array(["hi"], dtype=bytes),
np.array([1], dtype=np.int64))
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"st_c": parsing_ops.VarLenFeature(dtypes.float32),
"st_d": parsing_ops.VarLenFeature(dtypes.string)
},
}, expected_output)
def testSerializedContainingSparseFeature(self):
original = [
example(features=features({
"val": float_feature([3, 4]),
"idx": int64_feature([5, 10])
})),
example(features=features({
"val": float_feature([]), # empty float list
"idx": int64_feature([])
})),
example(features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(features=features({
"val": float_feature([1, 2, -1]),
"idx":
int64_feature([0, 9, 3]) # unsorted
}))
]
expected_outputs = [{
"sp": (np.array([[5], [10]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32),
np.array([13], dtype=np.int64))
}, {
"sp": empty_sparse(np.float32, shape=[13])
}, {
"sp": empty_sparse(np.float32, shape=[13])
}, {
"sp": (np.array([[0], [3], [9]], dtype=np.int64),
np.array([1.0, -1.0, 2.0], dtype=np.float32),
np.array([13], dtype=np.int64))
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.float32,
[13])
}
}, expected_output)
def testSerializedContainingSparseFeatureReuse(self):
original = [
example(features=features({
"val1": float_feature([3, 4]),
"val2": float_feature([5, 6]),
"idx": int64_feature([5, 10])
})),
example(features=features({
"val1": float_feature([]), # empty float list
"idx": int64_feature([])
})),
]
expected_outputs = [{
"sp1": (np.array([[5], [10]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32),
np.array([13], dtype=np.int64)),
"sp2": (np.array([[5], [10]], dtype=np.int64),
np.array([5.0, 6.0], dtype=np.float32),
np.array([7], dtype=np.int64))
}, {
"sp1": empty_sparse(np.float32, shape=[13]),
"sp2": empty_sparse(np.float32, shape=[7])
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"sp1":
parsing_ops.SparseFeature("idx", "val1", dtypes.float32, 13),
"sp2":
parsing_ops.SparseFeature(
"idx",
"val2",
dtypes.float32,
size=7,
already_sorted=True)
}
}, expected_output)
def testSerializedContaining3DSparseFeature(self):
original = [
example(features=features({
"val": float_feature([3, 4]),
"idx0": int64_feature([5, 10]),
"idx1": int64_feature([0, 2]),
})),
example(features=features({
"val": float_feature([]), # empty float list
"idx0": int64_feature([]),
"idx1": int64_feature([]),
})),
example(features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(features=features({
"val": float_feature([1, 2, -1]),
"idx0": int64_feature([0, 9, 3]), # unsorted
"idx1": int64_feature([1, 0, 2]),
}))
]
expected_outputs = [{
"sp": (np.array([[5, 0], [10, 2]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32),
np.array([13, 3], dtype=np.int64))
}, {
"sp": empty_sparse(np.float32, shape=[13, 3])
}, {
"sp": empty_sparse(np.float32, shape=[13, 3])
}, {
"sp": (np.array([[0, 1], [3, 2], [9, 0]], dtype=np.int64),
np.array([1.0, -1.0, 2.0], dtype=np.float32),
np.array([13, 3], dtype=np.int64))
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"sp":
parsing_ops.SparseFeature(["idx0", "idx1"], "val",
dtypes.float32, [13, 3])
}
}, expected_output)
def testSerializedContainingDense(self):
aname = "a"
bname = "b*has+a:tricky_name"
original = [
example(features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
})), example(features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b""]),
}))
]
expected_outputs = [{
aname: np.array([1, 1], dtype=np.float32).reshape(1, 2, 1),
bname: np.array(["b0_str"], dtype=bytes).reshape(1, 1, 1, 1)
}, {
aname: np.array([-1, -1], dtype=np.float32).reshape(1, 2, 1),
bname: np.array([""], dtype=bytes).reshape(1, 1, 1, 1)
}]
for proto, expected_output in zip(original, expected_outputs):
# No defaults, values required
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature(
(1, 1, 1, 1), dtype=dtypes.string),
}
}, expected_output)
# This test is identical as the previous one except
# for the creation of 'serialized'.
def testSerializedContainingDenseWithConcat(self):
aname = "a"
bname = "b*has+a:tricky_name"
# TODO(lew): Feature appearing twice should be an error in future.
original = [
(example(features=features({
aname: float_feature([10, 10]),
})), example(features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
}))),
(
example(features=features({
bname: bytes_feature([b"b100"]),
})),
example(features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"]),
})),),
]
expected_outputs = [{
aname: np.array([1, 1], dtype=np.float32).reshape(1, 2, 1),
bname: np.array(["b0_str"], dtype=bytes).reshape(1, 1, 1, 1)
}, {
aname: np.array([-1, -1], dtype=np.float32).reshape(1, 2, 1),
bname: np.array(["b1"], dtype=bytes).reshape(1, 1, 1, 1)
}]
for (m, n), expected_output in zip(original, expected_outputs):
# No defaults, values required
self._test({
"serialized":
ops.convert_to_tensor(
m.SerializeToString() + n.SerializeToString()),
"features": {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature(
(1, 1, 1, 1), dtype=dtypes.string),
}
}, expected_output)
def testSerializedContainingDenseScalar(self):
original = [
example(features=features({
"a": float_feature([1]),
})), example(features=features({}))
]
expected_outputs = [{
"a": np.array([1], dtype=np.float32)
}, {
"a": np.array([-1], dtype=np.float32)
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"a":
parsing_ops.FixedLenFeature(
(1,), dtype=dtypes.float32, default_value=-1),
}
}, expected_output)
def testSerializedContainingDenseWithDefaults(self):
original = [
example(features=features({
"a": float_feature([1, 1]),
})),
example(features=features({
"b": bytes_feature([b"b1"]),
})),
example(features=features({
"b": feature()
})),
]
expected_outputs = [{
"a": np.array([1, 1], dtype=np.float32).reshape(1, 2, 1),
"b": np.array("tmp_str", dtype=bytes).reshape(1, 1, 1, 1)
}, {
"a": np.array([3, -3], dtype=np.float32).reshape(1, 2, 1),
"b": np.array("b1", dtype=bytes).reshape(1, 1, 1, 1)
}, {
"a": np.array([3, -3], dtype=np.float32).reshape(1, 2, 1),
"b": np.array("tmp_str", dtype=bytes).reshape(1, 1, 1, 1)
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"a":
parsing_ops.FixedLenFeature(
(1, 2, 1),
dtype=dtypes.float32,
default_value=[3.0, -3.0]),
"b":
parsing_ops.FixedLenFeature(
(1, 1, 1, 1),
dtype=dtypes.string,
default_value="tmp_str"),
}
}, expected_output)
@test_util.run_deprecated_v1
def testSerializedContainingSparseAndSparseFeatureAndDenseWithNoDefault(self):
original = [
example(features=features({
"c": float_feature([3, 4]),
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})), example(features=features({
"c": float_feature([1, 2]),
"val": bytes_feature([b"c"]),
"idx": int64_feature([7])
}))
]
a_default = np.array([[1, 2, 3]], dtype=np.int64)
b_default = np.random.rand(3, 3).astype(bytes)
expected_st_a = empty_sparse(np.int64)
expected_outputs = [{
"st_a":
expected_st_a,
"sp": (np.array([[0], [3]], dtype=np.int64),
np.array(["a", "b"], dtype=bytes), np.array(
[13], dtype=np.int64)),
"a":
a_default,
"b":
b_default,
"c":
np.array([3, 4], dtype=np.float32)
}, {
"st_a":
expected_st_a,
"sp": (np.array([[7]], dtype=np.int64), np.array(["c"], dtype=bytes),
np.array([13], dtype=np.int64)),
"a":
a_default,
"b":
b_default,
"c":
np.array([1, 2], dtype=np.float32)
}]
for proto, expected_output in zip(original, expected_outputs):
self._test(
{
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature("idx", "val", dtypes.string, 13
),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature((2,), dtypes.float32),
}
},
expected_output)
@test_util.run_deprecated_v1
def testSerializedContainingSparseAndSparseFeatureWithReuse(self):
original = [
example(features=features({
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})), example(features=features({
"val": bytes_feature([b"c", b"d"]),
"idx": int64_feature([7, 1])
}))
]
expected_outputs = [{
"idx": (np.array([[0], [1]], dtype=np.int64),
np.array([0, 3], dtype=np.int64), np.array([2],
dtype=np.int64)),
"sp": (np.array([[0], [3]], dtype=np.int64),
np.array(["a", "b"], dtype=bytes), np.array(
[13], dtype=np.int64))
},
{
"idx": (np.array([[0], [1]], dtype=np.int64),
np.array([7, 1], dtype=np.int64),
np.array([2], dtype=np.int64)),
"sp": (np.array([[1], [7]], dtype=np.int64),
np.array(["d", "c"], dtype=bytes),
np.array([13], dtype=np.int64))
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"idx":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.string, [13]
),
}
}, expected_output)
@test_util.run_deprecated_v1
def testSerializedContainingVarLenDense(self):
aname = "a"
bname = "b"
cname = "c"
dname = "d"
original = [
example(features=features({
cname: int64_feature([2]),
})),
example(features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str", b"b1_str"]),
})),
example(features=features({
aname: float_feature([-1, -1, 2, 2]),
bname: bytes_feature([b"b1"]),
})),
example(features=features({
aname: float_feature([]),
cname: int64_feature([3]),
})),
]
expected_outputs = [
{
aname: np.empty(shape=(0, 2, 1), dtype=np.int64),
bname: np.empty(shape=(0, 1, 1, 1), dtype=bytes),
cname: np.array([2], dtype=np.int64),
dname: np.empty(shape=(0,), dtype=bytes)
},
{
aname:
np.array([[[1], [1]]], dtype=np.float32),
bname:
np.array(["b0_str", "b1_str"], dtype=bytes).reshape(2, 1, 1, 1),
cname:
np.empty(shape=(0,), dtype=np.int64),
dname:
np.empty(shape=(0,), dtype=bytes)
},
{
aname: np.array([[[-1], [-1]], [[2], [2]]], dtype=np.float32),
bname: np.array(["b1"], dtype=bytes).reshape(1, 1, 1, 1),
cname: np.empty(shape=(0,), dtype=np.int64),
dname: np.empty(shape=(0,), dtype=bytes)
},
{
aname: np.empty(shape=(0, 2, 1), dtype=np.int64),
bname: np.empty(shape=(0, 1, 1, 1), dtype=bytes),
cname: np.array([3], dtype=np.int64),
dname: np.empty(shape=(0,), dtype=bytes)
},
]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
}, expected_output)
# Test with padding values.
# NOTE(mrry): Since we parse a single example at a time, the fixed-length
# sequences will not be padded, and the padding value will be ignored.
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
}, expected_output)
# Change number of required values so the inputs are not a
# multiple of this size.
self._test(
{
"serialized":
ops.convert_to_tensor(original[2].SerializeToString()),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
# TODO(mrry): Consider matching the `io.parse_example()` error message.
expected_err=(errors_impl.OpError, "Key: b."))
self._test(
{
"serialized": ops.convert_to_tensor(""),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=[]),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"Cannot reshape a tensor with 0 elements to shape"))
self._test(
{
"serialized": ops.convert_to_tensor(""),
"features": {
aname:
parsing_ops.FixedLenFeature(
(None, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"First dimension of shape for feature a unknown. "
"Consider using FixedLenSequenceFeature."))
self._test(
{
"serialized": ops.convert_to_tensor(""),
"features": {
cname:
parsing_ops.FixedLenFeature(
(1, None), dtype=dtypes.int64, default_value=[[1]]),
}
},
expected_err=(ValueError,
"All dimensions of shape for feature c need to be known "
r"but received \(1, None\)."))
self._test(
{
"serialized": ops.convert_to_tensor(""),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=False),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"Unsupported: FixedLenSequenceFeature requires "
"allow_missing to be True."))
class ParseSingleExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.cached_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
out = parsing_ops.parse_single_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
else:
# Returns dict w/ Tensors and SparseTensors.
out = parsing_ops.parse_single_example(**kwargs)
# Check values.
tf_result = sess.run(flatten_values_tensors_or_sparse(out.values()))
_compare_output_to_expected(self, out, expected_values, tf_result)
# Check shapes.
for k, f in kwargs["features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(tuple(out[k].get_shape()),
tensor_shape.as_shape(f.shape))
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(out[k].dense_shape.get_shape().as_list()), (1,))
@test_util.run_deprecated_v1
def testSingleExampleWithSparseAndSparseFeatureAndDense(self):
original = example(features=features({
"c": float_feature([3, 4]),
"d": float_feature([0.0, 1.0]),
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3]),
"st_a": float_feature([3.0, 4.0])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array(
[[0], [1]], dtype=np.int64), # indices
np.array(
[3.0, 4.0], dtype=np.float32), # values
np.array(
[2], dtype=np.int64)) # shape: max_values = 2
expected_sp = ( # indices, values, shape
np.array(
[[0], [3]], dtype=np.int64), np.array(
["a", "b"], dtype="|S"), np.array(
[13], dtype=np.int64)) # max_values = 13
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
expected_output = {
"st_a": expected_st_a,
"sp": expected_sp,
"a": [a_default],
"b": b_default,
"c": np.array([3, 4], dtype=np.float32),
"d": np.array([0.0, 1.0], dtype=np.float32),
}
self._test(
{
"serialized":
ops.convert_to_tensor(serialized),
"features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.float32),
"sp":
parsing_ops.SparseFeature(
["idx"], "val", dtypes.string, [13]),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature(2, dtypes.float32),
"d":
parsing_ops.FixedLenSequenceFeature([],
dtypes.float32,
allow_missing=True)
}
},
expected_output)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/parse_single_example_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.gen_linalg_ops.matrix_logarithm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class LogarithmOpTest(test.TestCase):
def _verifyLogarithm(self, x, np_type):
inp = x.astype(np_type)
with test_util.use_gpu():
# Verify that expm(logm(A)) == A.
tf_ans = linalg_impl.matrix_exponential(
gen_linalg_ops.matrix_logarithm(inp))
out = self.evaluate(tf_ans)
self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
def _verifyLogarithmComplex(self, x):
for np_type in [np.complex64, np.complex128]:
self._verifyLogarithm(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0),
np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
@test_util.run_v1_only("b/120545219")
def testNonsymmetric(self):
# 2x2 matrices
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyLogarithmComplex(matrix1)
self._verifyLogarithmComplex(matrix2)
# Complex batch
self._verifyLogarithmComplex(self._makeBatch(matrix1, matrix2))
@test_util.run_v1_only("b/120545219")
def testSymmetricPositiveDefinite(self):
# 2x2 matrices
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyLogarithmComplex(matrix1)
self._verifyLogarithmComplex(matrix2)
# Complex batch
self._verifyLogarithmComplex(self._makeBatch(matrix1, matrix2))
@test_util.run_v1_only("b/120545219")
def testNonSquareMatrix(self):
# When the logarithm of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
gen_linalg_ops.matrix_logarithm(
np.array([[1., 2., 3.], [3., 4., 5.]], dtype=np.complex64))
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self):
# The input to the logarithm should be at least a 2-dimensional tensor.
tensor3 = constant_op.constant([1., 2.], dtype=dtypes.complex64)
with self.assertRaises(ValueError):
gen_linalg_ops.matrix_logarithm(tensor3)
@test_util.run_v1_only("b/120545219")
def testEmpty(self):
self._verifyLogarithmComplex(np.empty([0, 2, 2], dtype=np.complex64))
self._verifyLogarithmComplex(np.empty([2, 0, 0], dtype=np.complex64))
@test_util.run_v1_only("b/120545219")
def testRandomSmallAndLargeComplex64(self):
np.random.seed(42)
for batch_dims in [(), (1,), (3,), (2, 2)]:
for size in 8, 31, 32:
shape = batch_dims + (size, size)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape)).reshape(shape).astype(np.complex64)
self._verifyLogarithmComplex(matrix)
@test_util.run_v1_only("b/120545219")
def testRandomSmallAndLargeComplex128(self):
np.random.seed(42)
for batch_dims in [(), (1,), (3,), (2, 2)]:
for size in 8, 31, 32:
shape = batch_dims + (size, size)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape)).reshape(shape).astype(np.complex128)
self._verifyLogarithmComplex(matrix)
@test_util.run_v1_only("b/120545219")
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
matrix1 = math_ops.cast(
random_ops.random_normal([5, 5], seed=42), dtypes.complex64)
matrix2 = math_ops.cast(
random_ops.random_normal([5, 5], seed=42), dtypes.complex64)
logm1 = gen_linalg_ops.matrix_logarithm(matrix1)
logm2 = gen_linalg_ops.matrix_logarithm(matrix2)
logm = self.evaluate([logm1, logm2])
self.assertAllEqual(logm[0], logm[1])
class MatrixLogarithmBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.complex64) / (
2.0 * n) + np.diag(np.ones(n).astype(np.complex64))
return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))
def benchmarkMatrixLogarithmOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
logm = gen_linalg_ops.matrix_logarithm(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(logm),
min_iters=25,
name="matrix_logarithm_cpu_{shape}".format(
shape=shape))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/matrix_logarithm_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
class VariablesTestCase(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def testDistributeStrategy(self):
v = variables.VariableV1(0.0)
self.assertIsNone(v._distribute_strategy)
@test_util.run_v1_only("b/120545219")
def testInitialization(self):
with self.cached_session():
var0 = variables.VariableV1(0.0)
self.assertEqual("Variable:0", var0.name)
self.assertEqual("Variable", var0._shared_name)
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.shape)
var1 = variables.VariableV1(1.1)
self.assertEqual("Variable_1:0", var1.name)
self.assertEqual("Variable_1", var1._shared_name)
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.shape)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var0)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var0))
self.assertAllClose(1.1, self.evaluate(var1))
@test_util.run_v1_only("b/120545219")
def testInitializationOrder(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd")
self.assertEqual("rnd:0", rnd.name)
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.shape)
dep = variables.Variable(rnd.initialized_value(), name="dep")
self.assertEqual("dep:0", dep.name)
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.shape)
# Currently have to set the shape manually for Add.
added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
added_val.set_shape(rnd.get_shape())
depdep = variables.Variable(added_val, name="depdep")
self.assertEqual("depdep:0", depdep.name)
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.shape)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(rnd), self.evaluate(dep))
self.assertAllClose(
self.evaluate(rnd) + self.evaluate(dep) + 2.0, self.evaluate(depdep))
@test_util.run_deprecated_v1
def testCyclicInitializer(self):
with self.cached_session():
cyclic = control_flow_ops.while_loop(
cond=lambda i: i < 10,
body=lambda i: i + 1,
loop_vars=(constant_op.constant(0),))
initial_value = variables._try_guard_against_uninitialized_dependencies(
"test", cyclic)
self.assertIs(initial_value, cyclic)
def testIterable(self):
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in variables.Variable(0.0):
pass
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in variables.Variable([0.0, 1.0]):
pass
@test_util.run_deprecated_v1
def testAssignments(self):
with self.cached_session():
var = variables.Variable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.assertAllClose(1.0, self.evaluate(plus_one))
self.assertAllClose(1.0, self.evaluate(var))
self.assertAllClose(-1.0, self.evaluate(minus_one))
self.assertAllClose(-1.0, self.evaluate(var))
self.assertAllClose(4.0, self.evaluate(four))
self.assertAllClose(4.0, self.evaluate(var))
@test_util.run_deprecated_v1
def testResourceAssignments(self):
with self.session(use_gpu=True):
var = resource_variable_ops.ResourceVariable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.evaluate(plus_one)
self.assertAllClose(1.0, self.evaluate(var))
self.evaluate(minus_one)
self.assertAllClose(-1.0, self.evaluate(var))
self.evaluate(four)
self.assertAllClose(4.0, self.evaluate(var))
def testAssignDifferentShapesEagerNotAllowed(self):
with context.eager_mode():
var = variables.Variable(np.zeros(shape=[1, 1]))
with self.assertRaisesRegexp(ValueError,
"Shapes.*and.*are incompatible"):
var.assign(np.zeros(shape=[2, 2]))
@test_util.run_in_graph_and_eager_modes
def testAssignDifferentShapesAllowed(self):
var = variables.Variable(np.zeros(shape=[1, 1]),
shape=tensor_shape.TensorShape(None))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(np.zeros(shape=[1, 1]), var.read_value())
self.evaluate(var.assign(np.zeros(shape=[2, 2])))
self.assertAllEqual(np.zeros(shape=[2, 2]), var.read_value())
def testZeroSizeStringAssign(self):
with self.cached_session() as sess:
array = variables.VariableV1(
initial_value=array_ops.zeros((0,), dtype=dtypes.string),
name="foo",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
self.evaluate(variables.local_variables_initializer())
old_value = array.value()
copy_op = array.assign(old_value)
self.assertEqual([], list(self.evaluate(copy_op)))
def _countUpToTest(self, dtype):
with self.cached_session():
zero = constant_op.constant(0, dtype=dtype)
var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(var))
self.assertEqual(0, self.evaluate(count_up_to))
self.assertEqual(1, self.evaluate(var))
self.assertEqual(1, self.evaluate(count_up_to))
self.assertEqual(2, self.evaluate(var))
self.assertEqual(2, self.evaluate(count_up_to))
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
@test_util.run_deprecated_v1
def testCountUpToInt32(self):
self._countUpToTest(dtypes.int32)
@test_util.run_deprecated_v1
def testCountUpToInt64(self):
self._countUpToTest(dtypes.int64)
@test_util.run_v1_only("b/120545219")
def testControlDepsNone(self):
with self.cached_session():
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dep.
d = constant_op.constant(2.0)
# variables do not.
var_x = variables.VariableV1(2.0)
self.assertEqual([c.op], d.op.control_inputs)
self.assertEqual([], var_x.initializer.control_inputs)
self.assertEqual([], var_x.value().op.control_inputs)
self.assertEqual([], var_x._ref().op.control_inputs) # pylint: disable=protected-access
@test_util.run_v1_only("b/120545219")
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variables.Variable(0, name="v0")
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variables.Variable(1, name="v1")
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variables.Variable(2, name="v2")
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual([1], self.evaluate(v1))
self.evaluate(v2.initializer)
self.assertEqual([2], self.evaluate(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
self.evaluate(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
self.evaluate(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
self.evaluate(add)
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = variables.Variable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegexp(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
@test_util.run_deprecated_v1
def testUseVariableAsTensor(self):
with self.cached_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(2.0, self.evaluate(var_x))
self.assertAllClose(3.0, self.evaluate(var_y))
self.assertAllClose(5.0, self.evaluate(math_ops.add(var_x, var_y)))
@test_util.run_deprecated_v1
def testZeroSizeVarSameAsConst(self):
with self.cached_session():
zero_size_var = variables.Variable(array_ops.zeros([0, 2]))
zero_size_const = array_ops.ones([2, 0])
variable_mul = math_ops.matmul(zero_size_const, zero_size_var)
const_mul = math_ops.matmul(
zero_size_const, zero_size_const, transpose_b=True)
self.evaluate(variables.global_variables_initializer())
variable_output = self.evaluate(variable_mul)
self.assertAllClose(self.evaluate(const_mul), variable_output)
self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
@test_util.run_deprecated_v1
def testCachingDevice(self):
with self.cached_session():
var = variables.Variable(2.0)
self.assertEqual(var.device, var.value().device)
self.assertEqual(var.device, var.initialized_value().device)
var_cached = variables.Variable(2.0, caching_device="/job:foo")
self.assertFalse(var_cached.device.startswith("/job:foo"))
self.assertTrue(var_cached.value().device.startswith("/job:foo"))
@test_util.run_deprecated_v1
def testCollections(self):
with self.cached_session():
var_x = variables.VariableV1(2.0)
var_y = variables.VariableV1(2.0, trainable=False)
var_z = variables.VariableV1(2.0, trainable=True)
var_t = variables.VariableV1(
2.0,
trainable=True,
collections=[
ops.GraphKeys.TRAINABLE_VARIABLES, ops.GraphKeys.GLOBAL_VARIABLES
])
self.assertEqual([var_x, var_y, var_z, var_t],
variables.global_variables())
self.assertEqual([var_x, var_z, var_t], variables.trainable_variables())
@test_util.run_deprecated_v1
def testCollectionsWithScope(self):
with self.cached_session():
with ops.name_scope("scope_1"):
var_x = variables.VariableV1(2.0)
with ops.name_scope("scope_2"):
var_y = variables.VariableV1(2.0)
self.assertEqual([var_x, var_y], variables.global_variables())
self.assertEqual([var_x], variables.global_variables("scope_1"))
self.assertEqual([var_y], variables.global_variables("scope_2"))
self.assertEqual([var_x, var_y], variables.trainable_variables())
self.assertEqual([var_x], variables.trainable_variables("scope_1"))
self.assertEqual([var_y], variables.trainable_variables("scope_2"))
def testOperatorWrapping(self):
for attr in functools.WRAPPER_ASSIGNMENTS:
self.assertEqual(
getattr(variables.Variable.__add__, attr),
getattr(ops.Tensor.__add__, attr))
@test_util.run_deprecated_v1
def testOperators(self):
with self.cached_session():
var_f = variables.Variable([2.0])
add = var_f + 0.0
radd = 1.0 + var_f
sub = var_f - 1.0
rsub = 1.0 - var_f
mul = var_f * 10.0
rmul = 10.0 * var_f
div = var_f / 10.0
rdiv = 10.0 / var_f
lt = var_f < 3.0
rlt = 3.0 < var_f
le = var_f <= 2.0
rle = 2.0 <= var_f
gt = var_f > 3.0
rgt = 3.0 > var_f
ge = var_f >= 2.0
rge = 2.0 >= var_f
neg = -var_f
abs_v = abs(var_f)
var_i = variables.Variable([20])
mod = var_i % 7
rmod = 103 % var_i
var_b = variables.Variable([True, False])
and_v = operator.and_(var_b, [True, True])
or_v = operator.or_(var_b, [False, True])
xor_v = operator.xor(var_b, [False, False])
invert_v = ~var_b
rnd = np.random.rand(4, 4).astype("f")
var_t = variables.Variable(rnd)
slice_v = var_t[2, 0:0]
var_m = variables.Variable([[2.0, 3.0]])
matmul = var_m.__matmul__([[10.0], [20.0]])
rmatmul = var_m.__rmatmul__([[10.0], [20.0]])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([2.0], self.evaluate(add))
self.assertAllClose([3.0], self.evaluate(radd))
self.assertAllClose([1.0], self.evaluate(sub))
self.assertAllClose([-1.0], self.evaluate(rsub))
self.assertAllClose([20.0], self.evaluate(mul))
self.assertAllClose([20.0], self.evaluate(rmul))
self.assertAllClose([0.2], self.evaluate(div))
self.assertAllClose([5.0], self.evaluate(rdiv))
self.assertAllClose([-2.0], self.evaluate(neg))
self.assertAllClose([2.0], self.evaluate(abs_v))
self.assertAllClose([True], self.evaluate(lt))
self.assertAllClose([False], self.evaluate(rlt))
self.assertAllClose([True], self.evaluate(le))
self.assertAllClose([True], self.evaluate(rle))
self.assertAllClose([False], self.evaluate(gt))
self.assertAllClose([True], self.evaluate(rgt))
self.assertAllClose([True], self.evaluate(ge))
self.assertAllClose([True], self.evaluate(rge))
self.assertAllClose([6], self.evaluate(mod))
self.assertAllClose([3], self.evaluate(rmod))
self.assertAllClose([True, False], self.evaluate(and_v))
self.assertAllClose([True, True], self.evaluate(or_v))
self.assertAllClose([True, False], self.evaluate(xor_v))
self.assertAllClose([False, True], self.evaluate(invert_v))
self.assertAllClose(rnd[2, 0:0], self.evaluate(slice_v))
self.assertAllClose([[80.0]], self.evaluate(matmul))
self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], self.evaluate(rmatmul))
@test_util.run_deprecated_v1
def testSession(self):
with self.cached_session() as sess:
var = variables.Variable([1, 12])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1, 12], self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testColocation(self):
with ops.device("/job:ps"):
var = variables.VariableV1(0, name="v")
with ops.device("/job:worker/task:7"):
assign_op = var.assign(1)
self.assertDeviceEqual("/job:ps", assign_op.device)
self.assertEqual([b"loc:@v"], assign_op.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testInitializerFunction(self):
value = [[-42], [133.7]]
shape = [2, 1]
with self.cached_session():
initializer = lambda: constant_op.constant(value)
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertEqual(shape, v1.shape)
self.assertAllClose(value, self.evaluate(v1.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v1)
v2 = variables.Variable(
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertEqual(v1.shape, v2.shape)
self.assertAllClose(np.negative(value), self.evaluate(v2.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v2)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.negative(value), self.evaluate(v2))
def testConstraintArg(self):
constraint = lambda x: x
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
@test_util.run_v1_only("b/120545219")
def testNoRefDataRace(self):
with self.cached_session():
a = variables.Variable([1, 2, 3], dtype=dtypes.float32)
b = variables.Variable(a.initialized_value() + 2)
c = variables.Variable(b.initialized_value() + 2)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(a), [1, 2, 3])
self.assertAllEqual(self.evaluate(b), [3, 4, 5])
self.assertAllEqual(self.evaluate(c), [5, 6, 7])
@test_util.run_deprecated_v1
def testInitializerFunctionDevicePlacement(self):
with self.cached_session():
initializer = lambda: constant_op.constant(42.0)
with ops.device("/cpu:100"):
v1 = variables.Variable(initializer, dtype=dtypes.float32, name="v1")
expected_device = "/device:CPU:100"
expected_group_v1 = [b"loc:@v1"]
self.assertEqual(expected_device, v1.op.device)
self.assertEqual(expected_group_v1, v1.op.colocation_groups())
for i in v1.initializer.inputs:
self.assertEqual(expected_group_v1, i.op.colocation_groups())
v2 = variables.Variable(initializer, dtype=dtypes.float32, name="v2")
expected_group_v2 = [b"loc:@v2"]
self.assertEqual(expected_group_v2, v2.op.colocation_groups())
for i in v2.initializer.inputs:
self.assertEqual(expected_group_v2, i.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v_def = variables.Variable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session() as sess:
# v describes a VariableDef-based variable without an initial value.
v = variables.Variable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, self.evaluate(v.initialized_value()))
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session() as sess:
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = variables.Variable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = variables.Variable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
variables.Variable(variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = variables.Variable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
variables.Variable(variable_def=trainable_variable.to_proto())
.trainable)
def testSynchronizationAndAggregationSaved(self):
with ops.Graph().as_default():
original_variable = variables.Variable(
initial_value=constant_op.constant(10.0),
synchronization=variables.VariableSynchronization.NONE,
aggregation=variables.VariableAggregationV2.ONLY_FIRST_REPLICA)
self.assertEqual(variables.VariableSynchronization.NONE,
original_variable.synchronization)
self.assertEqual(variables.VariableAggregation.ONLY_FIRST_REPLICA,
original_variable.aggregation)
laundered = variables.Variable(
variable_def=original_variable.to_proto())
self.assertEqual(
variables.VariableSynchronization.NONE,
laundered.synchronization)
self.assertEqual(variables.VariableAggregationV2.ONLY_FIRST_REPLICA,
laundered.aggregation)
@test_util.run_deprecated_v1
def testLoad(self):
with self.cached_session():
var = variables.Variable(np.zeros((5, 5), np.float32))
self.evaluate(variables.global_variables_initializer())
var.load(np.ones((5, 5), np.float32))
self.assertAllClose(np.ones((5, 5), np.float32), self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testRepr(self):
var = variables.VariableV1(np.zeros((5, 5), np.float32), name="noop")
self.assertEqual(
"<tf.Variable 'noop:0' shape=(5, 5) dtype=float32_ref>",
repr(var))
def testVariableNamesPreserveNameScopesWithDefun(self):
@function.defun
def create_variable():
with ops.name_scope("foo"):
v = variables.Variable(0.0, name="bar")
self.assertEqual(v.name, "foo/bar:0")
with ops.get_default_graph().as_default():
create_variable()
@parameterized.parameters(variables.VariableV1, variables.Variable)
def testTrainableVariable(self, cls):
v1 = cls(1.0)
self.assertEqual(True, v1.trainable)
v2 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ)
self.assertEqual(False, v2.trainable)
v3 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=True)
self.assertEqual(True, v3.trainable)
v4 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=False)
self.assertEqual(False, v4.trainable)
class IsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default(), self.cached_session() as sess:
uninited = variables.report_uninitialized_variables()
self.assertEqual(0, self.evaluate(uninited).size)
def testAssertVariablesInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
_ = v, w
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(uninited).size)
@test_util.run_v1_only("b/120545219")
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2], name="v")
w = variables.VariableV1([3, 4], name="w")
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(w.initializer)
self.assertAllEqual(np.array([b"v"]), self.evaluate(uninited))
v.initializer.run()
self.assertEqual(0, self.evaluate(uninited).size)
def testZeroSizeVarInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable(array_ops.zeros([0, 2]), name="v")
uninited = variables.report_uninitialized_variables()
v.initializer.run() # not strictly necessary
self.assertEqual(0, self.evaluate(uninited).size)
def testTrainingWithZeroSizeVar(self):
with ops.Graph().as_default(), self.cached_session() as sess:
a = variables.Variable(array_ops.zeros([0, 2]))
b = variables.Variable(array_ops.ones([2, 2]))
objective = math_ops.reduce_sum(b + math_ops.matmul(
a, a, transpose_a=True))
self.evaluate(variables.global_variables_initializer())
do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
objective)
self.evaluate([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], self.evaluate(b))
@test_util.run_v1_only("b/120545219")
class ObsoleteIsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default():
self.assertEqual(None, variables.assert_variables_initialized())
def testVariables(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
_ = v, w
inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(inited)
self.evaluate(variables.global_variables_initializer())
self.evaluate(inited)
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
inited = variables.assert_variables_initialized([v])
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
self.evaluate(w.initializer)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
v.initializer.run()
inited.op.run()
class PartitionedVariableTest(test.TestCase):
def testPartitionedVariable(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
partitioned_variable = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
concatenated = ops.convert_to_tensor(partitioned_variable)
num_partitions = len(partitioned_variable)
iterated_partitions = list(partitioned_variable)
self.assertEqual(2, num_partitions)
self.assertEqual([v0, v1], iterated_partitions)
self.assertEqual([2], partitioned_variable.get_shape())
self.assertEqual([2], partitioned_variable.shape)
self.assertEqual([2], concatenated.get_shape())
self.assertEqual([2], concatenated.shape)
def testPartitionedVariableFailures(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "empty"):
variables.PartitionedVariable(
name="fail",
shape=2,
dtype=dtypes.int32,
variable_list=[],
partitions=[])
with self.assertRaisesRegexp(ValueError, "must have a save_slice_info"):
v0 = variables.Variable([0])
partitions = [1]
variables.PartitionedVariable(
name="two_vars",
shape=[1],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
with self.assertRaisesRegexp(ValueError, "full shapes must match"):
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
variables.PartitionedVariable(
name="two_vars",
shape=[3],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
with self.assertRaisesRegexp(ValueError, "must be positive"):
v0 = variables.Variable([0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
partitions = [0]
variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
def testPartitionedVariableAssignments(self):
with ops.Graph().as_default(), self.cached_session():
v0 = variables.Variable(initial_value=[0.0])
v1 = variables.Variable(initial_value=[1.0])
v2 = variables.Variable(initial_value=[20.0])
v3 = variables.Variable(initial_value=[30.0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v1.name, [2], [1], [1]))
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo(v2.name, [2], [0], [1]))
v3._set_save_slice_info(
variables.Variable.SaveSliceInfo(v3.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
pv_0 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0, v1],
partitions=partitions)
pv_1 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v2, v3],
partitions=partitions)
deltas_a = constant_op.constant([1.0, 2.0])
deltas_b = constant_op.constant([3.0, 4.0])
ones = array_ops.ones([2])
plus_delta = pv_0.assign_add(deltas_a)
minus_delta = pv_0.assign_sub(deltas_b)
assign_ones = pv_0.assign(ones)
c_0 = constant_op.constant([2.0])
c_1 = constant_op.constant([3.0])
assign_list = pv_1.assign([c_0, c_1])
assign_part_value = pv_1.assign_add(assign_ones)
assign_part_var = pv_1.assign_sub(pv_0)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([1.0], self.evaluate(plus_delta[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([3.0], self.evaluate(plus_delta[1]))
self.assertEqual([3.0], self.evaluate(v1))
self.assertEqual([-2.0], self.evaluate(minus_delta[0]))
self.assertEqual([-2.0], self.evaluate(v0))
self.assertEqual([-1.0], self.evaluate(minus_delta[1]))
self.assertEqual([-1.0], self.evaluate(v1))
self.assertEqual([1.0], self.evaluate(assign_ones[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([1.0], self.evaluate(assign_ones[1]))
self.assertEqual([1.0], self.evaluate(v1))
self.assertEqual([2.0], self.evaluate(assign_list[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_list[1]))
self.assertEqual([3.0], self.evaluate(v3))
self.assertEqual([3.0], self.evaluate(assign_part_value[0]))
self.assertEqual([3.0], self.evaluate(v2))
self.assertEqual([4.0], self.evaluate(assign_part_value[1]))
self.assertEqual([4.0], self.evaluate(v3))
self.assertEqual([2.0], self.evaluate(assign_part_var[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_part_var[1]))
self.assertEqual([3.0], self.evaluate(v3))
class VariableContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
with ops.container("l1"):
v1 = variables.Variable([1])
with ops.container("l2"):
v2 = variables.Variable([2])
special_v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="VariableInL3",
container="l3",
shared_name="")
v3 = variables.Variable([3])
v4 = variables.Variable([4])
self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
class AggregationModesTest(test.TestCase):
def testV1V2Equal(self):
v1 = variables.VariableAggregation
v2 = variables.VariableAggregationV2
self.assertEqual(v1.NONE, v2.NONE)
self.assertEqual(v1.SUM, v2.SUM)
self.assertEqual(v1.MEAN, v2.MEAN)
self.assertEqual(v1.ONLY_FIRST_REPLICA, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v1.ONLY_FIRST_TOWER, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v2.NONE, v1.NONE)
self.assertEqual(v2.SUM, v1.SUM)
self.assertEqual(v2.MEAN, v1.MEAN)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_REPLICA)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_TOWER)
self.assertEqual(hash(v1.NONE), hash(v2.NONE))
self.assertEqual(hash(v1.SUM), hash(v2.SUM))
self.assertEqual(hash(v1.MEAN), hash(v2.MEAN))
self.assertEqual(hash(v1.ONLY_FIRST_REPLICA), hash(v2.ONLY_FIRST_REPLICA))
self.assertEqual(hash(v1.ONLY_FIRST_TOWER), hash(v2.ONLY_FIRST_REPLICA))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/variables_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _upsample_filters(filters, rate):
"""Upsamples the filters by a factor of rate along the spatial dimensions.
Args:
filters: [h, w, in_depth, out_depth]. Original filters.
rate: An int, specifying the upsampling rate.
Returns:
filters_up: [h_up, w_up, in_depth, out_depth]. Upsampled filters with
h_up = h + (h - 1) * (rate - 1)
w_up = w + (w - 1) * (rate - 1)
containing (rate - 1) zeros between consecutive filter values along
the filters' spatial dimensions.
"""
if rate == 1:
return filters
# [h, w, in_depth, out_depth] -> [in_depth, out_depth, h, w]
filters_up = np.transpose(filters, [2, 3, 0, 1])
ker = np.zeros([rate, rate], dtype=np.float32)
ker[0, 0] = 1
filters_up = np.kron(filters_up, ker)[:, :, :-(rate - 1), :-(rate - 1)]
# [in_depth, out_depth, h_up, w_up] -> [h_up, w_up, in_depth, out_depth]
filters_up = np.transpose(filters_up, [2, 3, 0, 1])
return filters_up
class AtrousConv2DTest(test.TestCase):
@test_util.run_deprecated_v1
def testAtrousConv2DForward(self):
with self.session(use_gpu=True):
# Input: [batch, height, width, input_depth]
height = 9
for width in [9, 10]: # Test both odd and even width.
x_shape = [2, height, width, 2]
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
for kernel_height in range(1, 4):
for kernel_width in range(1, 4):
f_shape = [kernel_height, kernel_width, 2, 2]
f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
for rate in range(1, 4):
f_up = _upsample_filters(f, rate)
for padding in ["SAME", "VALID"]:
y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
y2 = nn_ops.conv2d(
x, f_up, strides=[1, 1, 1, 1], padding=padding)
self.assertAllClose(
y1.eval(), self.evaluate(y2), rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testAtrousSequence(self):
"""Tests optimization of sequence of atrous convolutions.
Verifies that a sequence of `atrous_conv2d` operations with identical `rate`
parameters, 'SAME' `padding`, and `filters` with odd heights/ widths:
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
is equivalent to:
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
"""
padding = "SAME" # The padding needs to be "SAME"
np.random.seed(1) # Make it reproducible.
with self.session(use_gpu=True):
# Input: [batch, height, width, input_depth]
for height in range(15, 17):
for width in range(15, 17):
x_shape = [3, height, width, 2]
x = np.random.random_sample(x_shape).astype(np.float32)
for kernel in [1, 3, 5]: # The kernel size needs to be odd.
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
f_shape = [kernel, kernel, 2, 2]
f = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
for rate in range(2, 4):
# y1: three atrous_conv2d in a row.
y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
# y2: space_to_batch, three conv2d in a row, batch_to_space
pad_bottom = 0 if height % rate == 0 else rate - height % rate
pad_right = 0 if width % rate == 0 else rate - width % rate
pad = [[0, pad_bottom], [0, pad_right]]
y2 = array_ops.space_to_batch(x, paddings=pad, block_size=rate)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = array_ops.batch_to_space(y2, crops=pad, block_size=rate)
self.assertAllClose(
y1.eval(), self.evaluate(y2), rtol=1e-2, atol=1e-2)
@test_util.run_deprecated_v1
def testGradient(self):
with self.session(use_gpu=True):
# Input: [batch, height, width, input_depth]
x_shape = [2, 5, 6, 2]
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
f_shape = [3, 3, 2, 2]
# Output: [batch, height, width, output_depth]
y_shape = [2, 5, 6, 2]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float32)
f_val = np.random.random_sample(f_shape).astype(np.float32)
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
for rate in range(1, 4):
output = nn_ops.atrous_conv2d(x, f, rate=rate, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f],
[x_shape, f_shape],
output, y_shape)
print("atrous_conv2d gradient err = %g " % err)
err_tolerance = 4e-3 if test_util.is_xla_enabled() else 1e-3
self.assertLess(err, err_tolerance)
class AtrousConv2DTransposeTest(test.TestCase):
@test_util.run_deprecated_v1
def testAtrousConv2DTransposeForward(self):
with self.session(use_gpu=True):
# Input: [batch, height, width, input_depth]
height = 9
for width in [9, 10]: # Test both odd and even width.
x_shape = [2, height, width, 2]
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
for kernel_height in range(1, 4):
for kernel_width in range(1, 4):
f_shape = [kernel_height, kernel_width, 2, 2]
f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
for rate in range(1, 4):
f_up = _upsample_filters(f, rate)
kernel_height_up = (kernel_height + (kernel_height - 1) *
(rate - 1))
kernel_width_up = kernel_width + (kernel_width - 1) * (rate - 1)
for padding in ["SAME", "VALID"]:
if padding == "SAME":
y_shape = [2, height, width, 2]
else:
y_shape = [
2, height + kernel_height_up - 1,
width + kernel_width_up - 1, 2
]
y1 = nn_ops.atrous_conv2d_transpose(x, f, y_shape, rate,
padding)
y2 = nn_ops.conv2d_transpose(
x, f_up, y_shape, strides=[1, 1, 1, 1], padding=padding)
self.assertAllClose(
y1.eval(), self.evaluate(y2), rtol=1e-3, atol=1e-3)
class AtrousDepthwiseConv2DTest(test.TestCase):
@test_util.run_deprecated_v1
def testAtrousDepthwiseConv2DForward(self):
strides = [1, 1, 1, 1]
with self.session(use_gpu=True):
# Input: [batch, height, width, input_depth]
height = 9
for width in [9, 10]: # Test both odd and even width.
x_shape = [2, height, width, 2]
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
for kernel_height in range(1, 4):
for kernel_width in range(1, 4):
f_shape = [kernel_height, kernel_width, 2, 2]
f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
for rate in range(1, 4):
f_up = _upsample_filters(f, rate)
for padding in ["SAME", "VALID"]:
y1 = nn_impl.depthwise_conv2d(
x, f, strides, padding, rate=[rate, rate])
y2 = nn_impl.depthwise_conv2d(x, f_up, strides, padding)
self.assertAllClose(
y1.eval(), self.evaluate(y2), rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/atrous_conv2d_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom user ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.framework import load_library
from tensorflow.python.framework import test_util
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class AckermannTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
library_filename = os.path.join(resource_loader.get_data_files_path(),
'ackermann_op.so')
ackermann = load_library.load_op_library(library_filename)
self.assertEqual(len(ackermann.OP_LIST.op), 1)
self.assertEqual(ackermann.OP_LIST.op[0].name, 'Ackermann')
with self.cached_session():
self.assertEqual(ackermann.ackermann().eval(), b'A(m, 0) == A(m-1, 1)')
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/ackermann_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import threading
import numpy
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.util import compat
from tensorflow.python.util import tf_inspect
def run_inside_wrap_function_in_eager_mode(graph_function):
"""Decorator to execute the same graph code in eager and graph modes.
In graph mode, we just execute the graph_function passed as argument. In eager
mode, we wrap the function using wrap_function and then execute the wrapped
result.
Args:
graph_function: python function containing graph code to be wrapped
Returns:
decorated function
"""
def wrap_and_execute(self):
if context.executing_eagerly():
wrapped = wrap_function.wrap_function(graph_function, [self])
# use the wrapped graph function
wrapped()
else:
# use the original function
graph_function(self)
return wrap_and_execute
class VariableScopeTest(test.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testResource(self):
vs = variable_scope._get_default_variable_store()
v1 = vs.get_variable("v", [1], use_resource=True)
self.assertTrue(isinstance(v1, resource_variable_ops.ResourceVariable))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set([v.name for v in vs._vars.values()]))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'
@test_util.run_in_graph_and_eager_modes
def testVarScopeInitializer(self):
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("tower0") as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
with variable_scope.variable_scope(tower, initializer=init):
w = variable_scope.get_variable("w", [])
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with variable_scope.variable_scope("tower1") as tower:
with variable_scope.variable_scope("foo", constraint=constraint):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.constraint, constraint)
with variable_scope.variable_scope(tower, constraint=constraint):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.constraint, constraint)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNestingError(self):
with variable_scope.variable_scope("aa"):
scope = variable_scope.variable_scope("bb")
scope.__enter__()
with variable_scope.variable_scope("cc"):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
scope.__exit__(None, None, None)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument <tf.Variable 'string:0' shape=() dtype=string>
# has invalid type <class '...ResourceVariable'>, must be a string or Tensor.
# (Can not convert a ResourceVariable into a Tensor or Operation.)
@test_util.run_deprecated_v1
def testStringDefaultInitializer(self):
with self.cached_session():
v = variable_scope.get_variable("string", shape=[], dtype=dtypes.string)
variables_lib.global_variables_initializer().run()
self.assertAllEqual(compat.as_bytes(self.evaluate(v)), b"")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeDType(self):
with variable_scope.variable_scope("tower2") as tower:
with variable_scope.variable_scope("foo", dtype=dtypes.float16):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, dtypes.float16)
with variable_scope.variable_scope(tower, dtype=dtypes.float16):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, dtypes.float16)
def testGetVariableInGraphNestedUnderEagerContext(self):
with context.eager_mode():
@function.defun
def f():
v = variable_scope.get_variable("should_be_resource", [])
self.assertEqual(type(v), resource_variable_ops.ResourceVariable)
f()
def testEagerVariableStore(self):
with context.eager_mode():
store = variable_scope.EagerVariableStore()
with store.as_default():
v = variable_scope.get_variable("v", shape=(), trainable=True)
w = variable_scope.get_variable("w", shape=(), trainable=False)
self.assertTrue(v in store.variables())
self.assertTrue(w in store.variables())
self.assertTrue(v in store.trainable_variables())
self.assertFalse(w in store.trainable_variables())
self.assertFalse(v in store.non_trainable_variables())
self.assertTrue(w in store.non_trainable_variables())
# Test copying.
new_store = store.copy()
with new_store.as_default():
new_v = variable_scope.get_variable("v")
new_w = variable_scope.get_variable("w")
self.assertEqual(new_v.numpy(), v.numpy())
self.assertEqual(new_w.numpy(), w.numpy())
self.assertTrue(new_v in new_store.variables())
self.assertTrue(new_w in new_store.variables())
self.assertTrue(new_v in new_store.trainable_variables())
self.assertFalse(new_w in new_store.trainable_variables())
self.assertFalse(new_v in new_store.non_trainable_variables())
self.assertTrue(new_w in new_store.non_trainable_variables())
# Check that variables are separate instances.
for v in store.variables():
v.assign(-1)
for v in new_store.variables():
v.assign(1)
for v in store.variables():
self.assertEqual(v.numpy(), -1)
for v in new_store.variables():
self.assertEqual(v.numpy(), 1)
def testEagerVariableStoreWithEagerDefun(self):
with context.eager_mode():
@function.defun
def f():
x = constant_op.constant([[2.0]])
d1 = core_layers.Dense(
1, name="my_dense", kernel_initializer=init_ops.ones_initializer())
_ = d1(x) # create variables
self.assertEqual(len(d1.variables), 2)
v1, v2 = d1.variables
d2 = core_layers.Dense(
1,
name="my_dense",
kernel_initializer=init_ops.ones_initializer(),
_reuse=True)
_ = d2(x)
self.assertEqual(len(d2.variables), 2)
v3, v4 = d2.variables
self.assertEqual(v1, v3)
self.assertEqual(v2, v4)
f()
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_in_graph_and_eager_modes
def testEagerVariablesStoreAddsToCollections(self):
store = variable_scope.EagerVariableStore()
with store.as_default():
trainable = variable_scope.get_variable("v1", [], trainable=True)
not_trainable = variable_scope.get_variable("v2", [], trainable=False)
concat = variable_scope.get_variable(
"v3", [], collections=[ops.GraphKeys.CONCATENATED_VARIABLES])
self.assertEqual(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES),
[trainable, not_trainable])
self.assertEqual(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES),
[trainable, concat])
self.assertEqual(
ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES), [concat])
def testEagerVariablesOutsideStoreNotAddedToCollections(self):
with context.eager_mode():
variable_scope.get_variable("v1", [], trainable=True)
variable_scope.get_variable("v2", [], trainable=False)
self.assertFalse(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertFalse(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'.
@test_util.run_in_graph_and_eager_modes
def testInitFromNonTensorValue(self):
v = variable_scope.get_variable("v4", initializer=4, dtype=dtypes.int32)
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 4)
w = variable_scope.get_variable(
"w4", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), [1, 2, 3])
# A quirk to be revisited?
error = ValueError if context.executing_eagerly() else TypeError
with self.assertRaises(error):
variable_scope.get_variable("x4", initializer={})
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# InvalidArgumentError=: You must feed a value for placeholder tensor
# 'ReadVariableOp/resource' with dtype resource
@test_util.run_in_graph_and_eager_modes
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="xx%d" % i, shape=(3, 4), dtype=dtype)
y = variable_scope.get_variable(
name="yy%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=init_ops.zeros_initializer(dtype=dtype))
self.evaluate(variables_lib.global_variables_initializer())
self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value()))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# InvalidArgumentError: /job:moo/replica:0/task:0/device:CPU:0 unknown device.
@test_util.run_deprecated_v1
def testVarScopeCachingDevice(self):
with self.cached_session():
caching_device = "/job:moo"
with variable_scope.variable_scope("tower"):
with variable_scope.variable_scope(
"caching", caching_device=caching_device):
v = variable_scope.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with variable_scope.variable_scope("child"):
v2 = variable_scope.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with variable_scope.variable_scope("not_cached", caching_device=""):
v2_not_cached = variable_scope.get_variable("v", [])
self.assertFalse(
v2_not_cached.value().device.startswith(caching_device))
with variable_scope.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = variable_scope.get_variable("v", [])
self.assertFalse(
v2_identity_device.value().device.startswith(caching_device))
with variable_scope.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = variable_scope.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AttributeError: Tensor.name is meaningless when eager execution is enabled.
@test_util.run_in_graph_and_eager_modes
def testVarScopeRegularizer(self):
init = init_ops.constant_initializer(0.3)
def regularizer1(v):
return math_ops.reduce_mean(v) + 0.1
def regularizer2(v):
return math_ops.reduce_mean(v) + 0.2
with variable_scope.variable_scope(
"tower3", regularizer=regularizer1) as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
with variable_scope.variable_scope(tower, initializer=init) as vs:
u = variable_scope.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = variable_scope.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = variable_scope.get_variable(
"x", [], regularizer=variable_scope.no_regularizer)
with variable_scope.variable_scope(
"baz", regularizer=variable_scope.no_regularizer):
y = variable_scope.get_variable("y", [])
vs.set_regularizer(variable_scope.no_regularizer)
z = variable_scope.get_variable("z", [])
# Check results.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
self.evaluate(variables_lib.variables_initializer([u, w, x, y, z]))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
self.assertAllClose(self.evaluate(losses[1]), 0.4)
self.assertAllClose(self.evaluate(losses[2]), 0.5)
with variable_scope.variable_scope("foo", reuse=True):
# reuse=True is for now only supported when eager execution is disabled.
if not context.executing_eagerly():
v = variable_scope.get_variable("v",
[]) # "v" is already there, reused
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Tensor-typed variable initializers must either be wrapped in an
# init_scope or callable...
@test_util.run_in_graph_and_eager_modes
def testInitializeFromValue(self):
init = constant_op.constant(0.1)
w = variable_scope.get_variable("v", initializer=init)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
variable_scope.get_variable("u", [1], initializer=init)
with variable_scope.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = variable_scope.get_variable("v")
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = constant_op.constant(1, dtype=dtypes.int32)
t = variable_scope.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, dtypes.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument <tf.Variable 'v0:0' shape=(1,) dtype=float32> has
# invalid type <class '...ops.resource_variable_ops.ResourceVariable'>, must
# be a string or Tensor. (Can not convert a ResourceVariable into a Tensor or
# Operation.)
@test_util.run_deprecated_v1
def testControlDeps(self):
with self.cached_session() as sess:
v0 = variable_scope.get_variable(
"v0", [1], initializer=init_ops.constant_initializer(0))
with ops.control_dependencies([v0.value()]):
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
self.evaluate(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual(1, self.evaluate(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
self.evaluate(v0)
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
self.evaluate(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
self.evaluate(add)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AssertionError: True is not false (last assertFalse)
@test_util.run_deprecated_v1
def testEnableResourceVariables(self):
old = variable_scope._DEFAULT_USE_RESOURCE
try:
variable_scope.enable_resource_variables()
self.assertTrue(isinstance(variables_lib.VariableV1(1.0),
resource_variable_ops.ResourceVariable))
variable_scope.disable_resource_variables()
self.assertFalse(isinstance(variables_lib.VariableV1(1.0),
resource_variable_ops.ResourceVariable))
finally:
variable_scope._DEFAULT_USE_RESOURCE = old
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument None has invalid type <type 'NoneType'>
@test_util.run_deprecated_v1
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variable_scope.get_variable(
"v0", [], initializer=init_ops.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variable_scope.get_variable(
"v2", [1], initializer=init_ops.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual([1], self.evaluate(v1))
self.evaluate(v2.initializer)
self.assertEqual([2], self.evaluate(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
self.evaluate(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
self.evaluate(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
self.evaluate(add)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'.
@test_util.run_in_graph_and_eager_modes
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("bar"):
new_init1 = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
variable_scope.get_variable_scope().set_initializer(init)
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
if not context.executing_eagerly():
# Check that we can set reuse.
variable_scope.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
variable_scope.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init, None)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScope(self):
with variable_scope.variable_scope("tower4") as tower:
self.assertEqual(tower.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower4/scope/")
with variable_scope.variable_scope("tower5"):
with variable_scope.variable_scope("bar") as bar:
self.assertEqual(bar.name, "tower5/bar")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower5/bar/scope/")
with variable_scope.variable_scope("tower6"):
with variable_scope.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower6/tower4/scope/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNameScope(self):
with ops.name_scope("testVarScopeNameScope1"):
with variable_scope.variable_scope("tower") as tower:
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(
tower): # Re-entering acts like another "tower".
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_1/scope2/")
with variable_scope.variable_scope(
"tower"): # Re-entering by string acts the same.
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_2/scope2/")
with ops.name_scope("testVarScopeNameScope2"):
with variable_scope.variable_scope("tower"):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(tower):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower_1/scope2/")
root_var_scope = variable_scope.get_variable_scope()
with ops.name_scope("testVarScopeNameScope3"):
with variable_scope.variable_scope(root_var_scope):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope3/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOriginalNameScope(self):
with self.cached_session():
with ops.name_scope("scope1"):
with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with ops.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower):
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeObjectReuse(self):
with self.cached_session():
vs = None
with variable_scope.variable_scope("jump", reuse=True) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertTrue(jump_no_reuse.reuse) # Inherited, cannot be undone.
with variable_scope.variable_scope("jump", reuse=False) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetOrCreateReuse(self):
with self.cached_session():
def test_value(value):
x = constant_op.constant(value)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = state_ops.assign(variable_scope.get_variable("var", []), x)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = variable_scope.get_variable("var", [])
self.assertEqual(value, self.evaluate(x))
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScope(self):
with self.cached_session():
with ops.name_scope("testVarOpScope1"):
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "tower/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower/testVarOpScope2/")
with variable_scope.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
variable_scope.get_variable("w", [])
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower_1/testVarOpScope2/")
with ops.name_scope("testVarOpScope2"):
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default/testVarOpScope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default_1/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default_1/testVarOpScope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.cached_session():
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesWithJump(self):
with self.cached_session():
with variable_scope.variable_scope("default") as default:
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/layer/w:0")
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_1/w:0")
with variable_scope.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_2/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuse(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True) as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetVar(self):
with self.cached_session():
with variable_scope.variable_scope("root"):
with variable_scope.variable_scope("towerA") as tower_a:
va = variable_scope.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("towerB"):
vb = variable_scope.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with variable_scope.variable_scope("towerA"):
va2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("towerA", reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("foo"):
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va3 = variable_scope.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
with variable_scope.variable_scope(tower_a, reuse=True):
with variable_scope.variable_scope("baz"):
variable_scope.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [1], dtype=dtypes.int32)
self.assertEqual("dtype" in str(exc.exception), True)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNestedOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseParam(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer) as outer:
with variable_scope.variable_scope("tower", "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseError(self):
with self.cached_session():
with self.assertRaises(ValueError):
with variable_scope.variable_scope(None, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeNestedOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBasicWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with variable_scope.variable_scope(
"scope", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "scope/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope(scope, auxiliary_name_scope=False):
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "scope/w1:0")
self.assertEqual(constant_op.constant([], name="c1").name, "c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("scope"):
self.assertEqual(constant_op.constant([], name="c").name, "scope/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
"inner", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/c:0")
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as inner1:
self.assertEqual(inner1.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with variable_scope.variable_scope(
None, default_name="default", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "default/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
None, default_name="default",
auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/default/c:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
root_scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w", []).name, "w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w1", []).name, "w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAuxiliaryNameScopeIsInvalid(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
None, default_name="scope", auxiliary_name_scope="invalid"):
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
"scope", auxiliary_name_scope="invalid"):
pass
with variable_scope.variable_scope("scope") as scope:
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
scope, auxiliary_name_scope="invalid"):
pass
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReuseScopeWithoutNameScopeCollision(self):
# Github issue: #13429
with self.cached_session():
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope("inner") as inner:
pass
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope:
with ops.name_scope(scope.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "inner/c:0")
with variable_scope.variable_scope("another"):
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope1:
with ops.name_scope(scope1.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w1", []).name,
"outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/inner/c1:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "another/inner/c:0")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
# (different assertions failing after wrapping, in both execution modes)
@test_util.run_in_graph_and_eager_modes
def testGetLocalVar(self):
# Check that local variable respects naming.
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
if not context.executing_eagerly():
# Since variable is local, it should be in the local variable collection
# but not the trainable collection.
self.assertIn(local_var,
ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, ops.get_collection("foo"))
self.assertNotIn(local_var,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_local_variable("w", []).name, "outer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSignatureGetVarVsGetLocalVar(self):
"""get_{local,}variable() must take the same list of args."""
arg_names = tf_inspect.getargspec(variable_scope.get_variable)[0]
local_arg_names = tf_inspect.getargspec(
variable_scope.get_local_variable)[0]
self.assertEqual(arg_names, local_arg_names)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVarWithDevice(self):
g = ops.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with ops.device(device_func):
_ = variable_scope.get_variable("x", (100, 200))
_ = variable_scope.get_variable(
"y", dtype=dtypes.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", dtypes.float32))
self.assertEqual(varname_type[1], ("y", dtypes.int64))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetCollection(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
with variable_scope.variable_scope("testGetCollection_foo_") as scope1:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo_/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0"
])
with variable_scope.variable_scope("testGetCollection_foo") as scope2:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
scope = variable_scope.get_variable_scope()
self.assertEqual([
v.name for v in scope.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_a:0", "testGetCollection_b:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0",
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
self.assertEqual([
v.name
for v in scope.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], [
"testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_a:0"
])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetTrainableVariablesWithGetVariable(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetTrainableVariables_a", [])
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.get_variable("testGetTrainableVariables_b", [])
_ = variable_scope.get_variable(
"testGetTrainableVariables_c", [], trainable=False)
# sync `ON_READ` sets trainable=False
_ = variable_scope.get_variable(
"testGetTrainableVariables_d", [],
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
_ = variable_scope.get_variable(
"testGetTrainableVariables_e", [],
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0",
])
# All other sync values sets trainable=True
_ = variable_scope.get_variable(
"testGetTrainableVariables_f", [],
synchronization=variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_f:0",
])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetTrainableVariablesWithVariable(self):
with self.cached_session():
_ = variable_scope.variable(1.0, name="testGetTrainableVariables_a")
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.variable(1.0, name="testGetTrainableVariables_b")
_ = variable_scope.variable(
1.0, name="testGetTrainableVariables_c", trainable=False)
# sync `ON_READ` sets trainable=False
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_d",
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_e",
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0",
])
# All other sync values sets trainable=True
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_f",
synchronization=variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_f:0",
])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetGlobalVariables(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetGlobalVariables_a", [])
with variable_scope.variable_scope("testGetGlobalVariables_foo") as scope:
_ = variable_scope.get_variable("testGetGlobalVariables_b", [])
self.assertEqual(
[v.name for v in scope.global_variables()],
["testGetGlobalVariables_foo/"
"testGetGlobalVariables_b:0"])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetLocalVariables(self):
with self.cached_session():
_ = variable_scope.get_variable(
"a", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
with variable_scope.variable_scope("foo") as scope:
_ = variable_scope.get_variable(
"b", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
_ = variable_scope.get_variable("c", [])
self.assertEqual([v.name for v in scope.local_variables()], ["foo/b:0"])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithRefDtype(self):
v = variable_scope.get_variable("v", shape=[3, 4], dtype=dtypes.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = variable_scope.get_variable("w", shape=[5, 6], dtype=v.dtype)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesNoArgs(self):
v = variable_scope.get_variable("foo", initializer=lambda: [2])
self.assertEqual(v.name, "foo:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesOptionalArgs(self):
v = variable_scope.get_variable("foo", initializer=lambda x=True: [2])
self.assertEqual(v.name, "foo:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesUnprovidedArgsAndNoShape(self):
with self.assertRaisesRegexp(
ValueError,
"The initializer passed is not valid. It should be a callable with no "
"arguments and the shape should not be provided or an instance of "
"`tf.keras.initializers.*' and `shape` should be fully defined."):
variable_scope.get_variable("foo", initializer=lambda x: [2])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with variable_scope.variable_scope("_"):
pass
self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(test.TestCase):
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testResultNameMatchesRequested(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0/part_0:0", [x.name for x in variables])
self.assertIn("scope0/name0/part_1:0", [x.name for x in variables])
self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBreaksIfPartitioningChanges(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into3_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into1_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReturnsExistingConcatenatedValueIfReuse(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1))
variable_scope.get_variable_scope().reuse_variables()
v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAllowsReuseWithoutPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope("scope0", reuse=True):
v_reused = variable_scope.get_variable("name0")
self.assertEqual(v, v_reused)
def testNoReuseInEagerByDefault(self):
with context.eager_mode():
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v1 = variable_scope.get_variable("name0", shape=(3, 1, 1))
v2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertIsNot(v1, v2)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPropagatePartitionerOnReopening(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with variable_scope.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testScalarIgnoresPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=())
self.assertEqual(v.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0:0", [x.name for x in variables])
def _testPartitionConcatenatesAlongCorrectAxis(self, use_resource):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with variable_scope.variable_scope("root", use_resource=use_resource):
v0 = variable_scope.get_variable(
"n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = variable_scope.get_variable(
"n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = list(v0)[0]
n0_1 = list(v0)[1]
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = list(v1)[0]
n1_1 = list(v1)[1]
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxis(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=False)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxisResource(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
def testPartitionConcatenatesAlongCorrectAxisResourceInEager(self):
with context.eager_mode():
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
class VariableScopeWithCustomGetterTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError,
r"custom_getter .* not callable:"):
with variable_scope.variable_scope("scope0", custom_getter=3):
variable_scope.get_variable("name0")
with self.assertRaisesRegexp(ValueError,
r"custom_getter .* not callable:"):
variable_scope.get_variable("name0", custom_getter=3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("new_scope") as new_scope:
v3 = variable_scope.get_variable("v3", [1])
with variable_scope.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = variable_scope.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSynchronizationAndAggregationWithCustomGetter(self):
called = [0]
synchronization = variable_scope.VariableSynchronization.AUTO
aggregation = variable_scope.VariableAggregation.NONE
def custom_getter(getter, *args, **kwargs):
called[0] += 1
# Verify synchronization and aggregation kwargs are as expected.
self.assertEqual(kwargs["synchronization"], synchronization)
self.assertEqual(kwargs["aggregation"], aggregation)
return getter(*args, **kwargs)
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
variable_scope.get_variable("v", [1])
self.assertEqual(1, called[0])
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
synchronization = variable_scope.VariableSynchronization.ON_READ
aggregation = variable_scope.VariableAggregation.MEAN
variable_scope.get_variable(
"v1", [1], synchronization=synchronization, aggregation=aggregation)
self.assertEqual(2, called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCustomGetterWithReuse(self):
# Custom getter can choose to behave differently on reused variables.
def custom_getter(getter, *args, **kwargs):
var = getter(*args, **kwargs)
if kwargs["reuse"]:
# This can be used, e.g., for changing the caching device if needed.
return array_ops.identity(var, name="reused")
else:
return array_ops.identity(var, name="not_reused")
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "not_reused:0")
self.assertEqual(v2.name, "reused:0")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Fetch argument <tf.Tensor 'custom_getter/add:0' shape=(1, 2, 3)
# dtype=float32> cannot be interpreted as a Tensor. (Tensor
# Tensor("custom_getter/add:0", shape=(1, 2, 3), dtype=float32) is not an
# element of this graph.)
@test_util.run_deprecated_v1
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with ops.name_scope("custom_getter"):
return g_0 + g_1
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = self.evaluate([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Fetch argument <tf.Tensor 'sum_getter_2/add:0' shape=(1, 2, 3)
# dtype=float32> cannot be interpreted as a Tensor. (Tensor
# Tensor("sum_getter_2/add:0", shape=(1, 2, 3), dtype=float32) is not an
# element of this graph.)
@test_util.run_deprecated_v1
def testNestedCustomGetters(self):
def sum_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/sum_0" % name, *args, **kwargs)
g_1 = getter("%s/sum_1" % name, *args, **kwargs)
with ops.name_scope("sum_getter"):
return g_0 + g_1
def prod_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/prod_0" % name, *args, **kwargs)
g_1 = getter("%s/prod_1" % name, *args, **kwargs)
with ops.name_scope("prod_getter"):
return g_0 * g_1
with variable_scope.variable_scope("prod_scope", custom_getter=prod_getter):
with variable_scope.variable_scope("sum_scope", custom_getter=sum_getter):
with variable_scope.variable_scope(
"inner_sum_scope", custom_getter=sum_getter):
# take sums of sums of products
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(8, len(true_vars))
template = (
"prod_scope/sum_scope/inner_sum_scope/v/sum_%d/sum_%d/prod_%d:0")
self.assertEqual(template % (0, 0, 0), true_vars[0].name)
self.assertEqual(template % (0, 0, 1), true_vars[1].name)
self.assertEqual(template % (0, 1, 0), true_vars[2].name)
self.assertEqual(template % (0, 1, 1), true_vars[3].name)
self.assertEqual(template % (1, 0, 0), true_vars[4].name)
self.assertEqual(template % (1, 0, 1), true_vars[5].name)
self.assertEqual(template % (1, 1, 0), true_vars[6].name)
self.assertEqual(template % (1, 1, 1), true_vars[7].name)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = self.evaluate([true_vars, v])
# take products of sums of products
self.assertAllClose(
np_v, (((np_vars[0] * np_vars[1]) + (np_vars[2] * np_vars[3])) + (
(np_vars[4] * np_vars[5]) + (np_vars[6] * np_vars[7]))))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreator(self):
variable_names = []
def creator_a(next_creator, **kwargs):
variable_names.append(kwargs.get("name", ""))
return next_creator(**kwargs)
def creator_b(next_creator, **kwargs):
kwargs["name"] = "forced_name"
return next_creator(**kwargs)
with variable_scope.variable_creator_scope(creator_a):
with variable_scope.variable_creator_scope(creator_b):
variable_scope.variable(1.0, name="one_name")
self.assertEqual(variable_names[0], "forced_name")
called = [False]
def creater_c(next_creator, **kwargs):
called[0] = True
self.assertEqual(kwargs["synchronization"],
variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual(kwargs["aggregation"],
variable_scope.VariableAggregation.MEAN)
return next_creator(**kwargs)
with variable_scope.variable_creator_scope(creater_c):
variable_scope.get_variable(
"v", [],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
self.assertTrue(called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreatorNestingError(self):
def creator(next_creator, **kwargs):
return next_creator(**kwargs)
# Save the state so we can clean up at the end.
graph = ops.get_default_graph()
old_creator_stack = graph._variable_creator_stack
try:
scope = variable_scope.variable_creator_scope(creator)
scope.__enter__()
with variable_scope.variable_creator_scope(creator):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
finally:
graph._variable_creator_stack = old_creator_stack
class PartitionInfoTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
class VariableScopeMultithreadedTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoThreadsDisjointScopeEntry(self):
def thread_fn(i, graph):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEquals("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
graph = ops.get_default_graph()
threads = [
threading.Thread(target=thread_fn, args=(
i,
graph,
)) for i in range(2)
]
threads[0].start()
# Allow thread 0 to finish before starting thread 1.
threads[0].join()
threads[1].start()
threads[1].join()
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoThreadsNestedScopeEntry(self):
def thread_fn(i, graph, run_event, pause_event):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEquals("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
pause_event.set()
run_event.wait()
graph = ops.get_default_graph()
run_events = [threading.Event() for _ in range(2)]
pause_events = [threading.Event() for _ in range(2)]
threads = [
threading.Thread(
target=thread_fn, args=(i, graph, run_events[i], pause_events[i]))
for i in range(2)
]
# Start first thread.
threads[0].start()
pause_events[0].wait()
# Start next thread once the first thread has paused.
threads[1].start()
pause_events[1].wait()
# Resume both threads.
run_events[0].set()
run_events[1].set()
threads[0].join()
threads[1].join()
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterMainScope(self):
def thread_fn(graph, main_thread_scope):
with graph.as_default():
# Variable created with main scope will have prefix "main".
with variable_scope.variable_scope(main_thread_scope):
with variable_scope.variable_scope("foo"):
v = variable_scope.get_variable("v", [])
self.assertEquals("main/foo/v:0", v.name)
# Variable created outside main scope will not have prefix "main".
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [])
self.assertEquals("bar/v:0", v.name)
graph = ops.get_default_graph()
with variable_scope.variable_scope("main") as main_thread_scope:
thread = threading.Thread(
target=thread_fn, args=(graph, main_thread_scope))
thread.start()
thread.join()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/variable_scope_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
import numpy as np
from tensorflow.core.util import test_log_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
# Used by SomeRandomBenchmark class below.
_ran_somebenchmark_1 = [False]
_ran_somebenchmark_2 = [False]
_ran_somebenchmark_but_shouldnt = [False]
class SomeRandomBenchmark(test.Benchmark):
"""This Benchmark should automatically be registered in the registry."""
def _dontRunThisBenchmark(self):
_ran_somebenchmark_but_shouldnt[0] = True
def notBenchmarkMethod(self):
_ran_somebenchmark_but_shouldnt[0] = True
def benchmark1(self):
_ran_somebenchmark_1[0] = True
def benchmark2(self):
_ran_somebenchmark_2[0] = True
class TestReportingBenchmark(test.Benchmark):
"""This benchmark (maybe) reports some stuff."""
def benchmarkReport1(self):
self.report_benchmark(iters=1)
def benchmarkReport2(self):
self.report_benchmark(
iters=2,
name="custom_benchmark_name",
extras={"number_key": 3,
"other_key": "string"})
def benchmark_times_an_op(self):
input_size = 5
with session.Session(config=benchmark.benchmark_config()) as sess:
a = array_ops.placeholder(dtype=dtypes.float32, shape=(input_size))
a_plus_a = a + a
return self.run_op_benchmark(
sess,
a_plus_a,
feed_dict={a: np.arange(input_size)},
min_iters=1000,
store_trace=True,
name="op_benchmark")
class BenchmarkTest(test.TestCase):
def testGlobalBenchmarkRegistry(self):
registry = list(benchmark.GLOBAL_BENCHMARK_REGISTRY)
self.assertEqual(len(registry), 2)
self.assertTrue(SomeRandomBenchmark in registry)
self.assertTrue(TestReportingBenchmark in registry)
def testRunSomeRandomBenchmark(self):
# Validate that SomeBenchmark has not run yet
self.assertFalse(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
# Run other benchmarks, but this wont run the one we care about
benchmark._run_benchmarks("unrelated")
# Validate that SomeBenchmark has not run yet
self.assertFalse(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
# Run all the benchmarks, avoid generating any reports
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
benchmark._run_benchmarks("SomeRandom")
# Validate that SomeRandomBenchmark ran correctly
self.assertTrue(_ran_somebenchmark_1[0])
self.assertTrue(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
_ran_somebenchmark_1[0] = False
_ran_somebenchmark_2[0] = False
_ran_somebenchmark_but_shouldnt[0] = False
# Test running a specific method of SomeRandomBenchmark
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
benchmark._run_benchmarks("SomeRandom.*1$")
self.assertTrue(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
@test_util.disable_xla("b/123744455") # GPU memory is incorrect
def testReportingBenchmark(self):
tempdir = test.get_temp_dir()
try:
gfile.MakeDirs(tempdir)
except OSError as e:
# It's OK if the directory already exists.
if " exists:" not in str(e):
raise e
prefix = os.path.join(tempdir,
"reporting_bench_%016x_" % random.getrandbits(64))
expected_output_file = "%s%s" % (prefix,
"TestReportingBenchmark.benchmarkReport1")
expected_output_file_2 = "%s%s" % (
prefix, "TestReportingBenchmark.custom_benchmark_name")
expected_output_file_3 = "%s%s" % (prefix,
"TestReportingBenchmark.op_benchmark")
try:
self.assertFalse(gfile.Exists(expected_output_file))
# Run benchmark but without env, shouldn't write anything
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
reporting = TestReportingBenchmark()
reporting.benchmarkReport1() # This should run without writing anything
self.assertFalse(gfile.Exists(expected_output_file))
# Runbenchmark with env, should write
os.environ[benchmark.TEST_REPORTER_TEST_ENV] = prefix
reporting = TestReportingBenchmark()
reporting.benchmarkReport1() # This should write
reporting.benchmarkReport2() # This should write
benchmark_values3 = reporting.benchmark_times_an_op() # This should write
# Check the files were written
self.assertTrue(gfile.Exists(expected_output_file))
self.assertTrue(gfile.Exists(expected_output_file_2))
self.assertTrue(gfile.Exists(expected_output_file_3))
# Check the contents are correct
expected_1 = test_log_pb2.BenchmarkEntry()
expected_1.name = "TestReportingBenchmark.benchmarkReport1"
expected_1.iters = 1
expected_2 = test_log_pb2.BenchmarkEntry()
expected_2.name = "TestReportingBenchmark.custom_benchmark_name"
expected_2.iters = 2
expected_2.extras["number_key"].double_value = 3
expected_2.extras["other_key"].string_value = "string"
expected_3 = test_log_pb2.BenchmarkEntry()
expected_3.name = "TestReportingBenchmark.op_benchmark"
expected_3.iters = 1000
def read_benchmark_entry(f):
s = gfile.GFile(f, "rb").read()
entries = test_log_pb2.BenchmarkEntries.FromString(s)
self.assertEquals(1, len(entries.entry))
return entries.entry[0]
read_benchmark_1 = read_benchmark_entry(expected_output_file)
self.assertProtoEquals(expected_1, read_benchmark_1)
read_benchmark_2 = read_benchmark_entry(expected_output_file_2)
self.assertProtoEquals(expected_2, read_benchmark_2)
read_benchmark_3 = read_benchmark_entry(expected_output_file_3)
self.assertEquals(expected_3.name, read_benchmark_3.name)
self.assertEquals(expected_3.iters, read_benchmark_3.iters)
self.assertGreater(read_benchmark_3.wall_time, 0)
# Trace is not stored in benchmark entry. Instead we get it from
# return value of `run_op_benchmark` call.
full_trace = benchmark_values3["extras"]["full_trace_chrome_format"]
json_trace = json.loads(full_trace)
self.assertTrue(isinstance(json_trace, dict))
self.assertTrue("traceEvents" in json_trace.keys())
allocator_keys = [k for k in read_benchmark_3.extras.keys()
if k.startswith("allocator_maximum_num_bytes_")]
self.assertGreater(len(allocator_keys), 0)
for k in allocator_keys:
self.assertGreater(read_benchmark_3.extras[k].double_value, 0)
finally:
gfile.DeleteRecursively(tempdir)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/benchmark_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RegexReplace op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
@parameterized.parameters(
(gen_string_ops.regex_replace),
(gen_string_ops.static_regex_replace))
class RegexReplaceOpVariantsTest(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def testForwarding(self, op):
with self.cached_session():
# Generate an input that is uniquely consumed by the regex op.
# This exercises code paths which are optimized for this case
# (e.g., using forwarding).
inp = string_ops.substr(
constant_op.constant(["AbCdEfG",
"HiJkLmN"], dtypes.string),
pos=0,
len=5)
stripped = op(inp, "\\p{Ll}", ".").eval()
self.assertAllEqual([b"A.C.E", b"H.J.L"], stripped)
@test_util.run_deprecated_v1
def testRemovePrefix(self, op):
values = ["a:foo", "a:bar", "a:foo", "b:baz", "b:qux", "ca:b"]
with self.cached_session():
input_vector = constant_op.constant(values, dtypes.string)
stripped = op(input_vector, "^(a:|b:)", "", replace_global=False).eval()
self.assertAllEqual([b"foo", b"bar", b"foo", b"baz", b"qux", b"ca:b"],
stripped)
@test_util.run_deprecated_v1
def testRegexReplace(self, op):
values = ["aba\naba", "abcdabcde"]
with self.cached_session():
input_vector = constant_op.constant(values, dtypes.string)
stripped = op(input_vector, "a.*a", "(\\0)").eval()
self.assertAllEqual([b"(aba)\n(aba)", b"(abcda)bcde"], stripped)
@test_util.run_deprecated_v1
def testEmptyMatch(self, op):
values = ["abc", "1"]
with self.cached_session():
input_vector = constant_op.constant(values, dtypes.string)
stripped = op(input_vector, "", "x").eval()
self.assertAllEqual([b"xaxbxcx", b"x1x"], stripped)
@test_util.run_deprecated_v1
def testInvalidPattern(self, op):
values = ["abc", "1"]
with self.cached_session():
input_vector = constant_op.constant(values, dtypes.string)
invalid_pattern = "A["
replace = op(input_vector, invalid_pattern, "x")
with self.assertRaisesOpError("Invalid pattern"):
self.evaluate(replace)
@test_util.run_deprecated_v1
def testGlobal(self, op):
values = ["ababababab", "abcabcabc", ""]
with self.cached_session():
input_vector = constant_op.constant(values, dtypes.string)
stripped = op(input_vector, "ab", "abc", True).eval()
self.assertAllEqual([b"abcabcabcabcabc", b"abccabccabcc", b""], stripped)
def as_string(s):
return s
def as_tensor(s):
return constant_op.constant(s, dtypes.string)
class RegexReplaceTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(as_string, as_tensor),
(as_tensor, as_string),
(as_tensor, as_tensor))
@test_util.run_deprecated_v1
def testRegexReplaceDelegation(self, pattern_fn, rewrite_fn):
with self.cached_session():
input_vector = constant_op.constant("foo", dtypes.string)
pattern = pattern_fn("[a-z]")
replace = rewrite_fn(".")
op = string_ops.regex_replace(input_vector, pattern, replace)
self.assertTrue(op.name.startswith("RegexReplace"))
@test_util.run_deprecated_v1
def testStaticRegexReplaceDelegation(self):
with self.cached_session():
input_vector = constant_op.constant("foo", dtypes.string)
pattern = "[a-z]"
replace = "."
op = string_ops.regex_replace(input_vector, pattern, replace)
self.assertTrue(op.name.startswith("StaticRegexReplace"))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/regex_replace_op_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for unsorted_segment_join_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class UnicodeTestCase(test.TestCase):
"""Test case with Python3-compatible string comparator."""
def assertAllEqualUnicode(self, truth, actual):
self.assertAllEqual(
np.array(truth).astype('U'),
np.array(actual).astype('U'))
@test_util.run_all_in_graph_and_eager_modes
class UnsortedSegmentJoinOpTest(UnicodeTestCase, parameterized.TestCase):
def test_basic_np_array(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids = [1, 0, 1]
num_segments = 2
separator = ':'
output_array = [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']]
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqual(res.shape, np.array(output_array).shape)
self.assertAllEqualUnicode(res, output_array)
def test_segment_id_and_input_empty(self):
inputs = np.array([], dtype=np.string_)
segment_ids = np.array([], dtype=np.int32)
num_segments = 3
separator = ':'
output_array = ['', '', '']
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqual(res.shape, np.array(output_array).shape)
self.assertAllEqualUnicode(res, output_array)
def test_type_check(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids = np.array([1, 0, 1], dtype=np.int32)
num_segments = np.array(2, dtype=np.int32)
separator = ':'
output_array = [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']]
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqual(res.shape, np.array(output_array).shape)
self.assertAllEqualUnicode(res, output_array)
segment_ids = np.array([1, 0, 1], dtype=np.int64)
num_segments = np.array(2, dtype=np.int64)
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqual(res.shape, np.array(output_array).shape)
self.assertAllEqualUnicode(res, output_array)
def test_basic_tensor(self):
inputs = constant_op.constant([['Y', 'q', 'c'], ['Y', '6', '6'],
['p', 'G', 'a']])
segment_ids = constant_op.constant([1, 0, 1])
num_segments = 2
separator = ':'
output_array = constant_op.constant([['Y', '6', '6'], ['Y:p', 'q:G',
'c:a']])
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqual(res, output_array)
self.assertAllEqual(res.shape, output_array.get_shape())
def test_multiple_segment_join(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids_1 = [1, 0, 1]
num_segments_1 = 2
separator_1 = ':'
output_array_1 = [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']]
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids_1,
num_segments=num_segments_1,
separator=separator_1))
self.assertAllEqualUnicode(res, output_array_1)
self.assertAllEqual(res.shape, np.array(output_array_1).shape)
segment_ids_2 = [1, 1]
num_segments_2 = 2
separator_2 = ''
output_array_2 = [['', '', ''], ['YY:p', '6q:G', '6c:a']]
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=res,
segment_ids=segment_ids_2,
num_segments=num_segments_2,
separator=separator_2))
self.assertAllEqualUnicode(res, output_array_2)
self.assertAllEqual(res.shape, np.array(output_array_2).shape)
@parameterized.parameters([
{
'inputs': [[[['q'], ['s']], [['f'], ['F']], [['h'], ['0']]],
[[['E'], ['j']], [['2'], ['k']], [['N'], ['d']]],
[[['G'], ['M']], [['1'], ['S']], [['N'], ['7']]],
[[['8'], ['W']], [['W'], ['G']], [['j'], ['d']]]],
'segment_ids': [1, 1, 0, 2],
'num_segments':
3,
'separator':
':',
'output_array': [[[['G'], ['M']], [['1'], ['S']], [['N'], ['7']]],
[[['q:E'], ['s:j']], [['f:2'], ['F:k']],
[['h:N'], ['0:d']]],
[[['8'], ['W']], [['W'], ['G']], [['j'], ['d']]]],
},
{
'inputs': [[['Q', 'b'], ['c', 'p']], [['i', '9'], ['n', 'b']],
[['T', 'h'], ['g', 'z']]],
'segment_ids': [[0, 1], [1, 0], [1, 0]],
'num_segments': 2,
'separator': ':',
'output_array': [['Q:n:g', 'b:b:z'], ['c:i:T', 'p:9:h']]
},
{
'inputs': [[['Q', 'b'], ['b', 'p']], [['i', '9'], ['n', 'b']],
[['T', 'h'], ['g', 'z']]],
'segment_ids': [[[2, 1], [0, 0]], [[2, 0], [2, 2]], [[0, 2], [1, 0]]],
'num_segments': 3,
'separator': ':',
'output_array': ['b:p:9:T:z', 'b:g', 'Q:i:n:b:h']
},
{
'inputs': [[['z'], ['h']], [['c'], ['z']], [['V'], ['T']]],
'segment_ids': [0, 1, 1],
'num_segments': 3,
'separator': ':',
'output_array': [[['z'], ['h']], [['c:V'], ['z:T']], [[''], ['']]]
},
])
def test_multiple_cases_with_different_dims(self, inputs, segment_ids,
num_segments, separator,
output_array):
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqualUnicode(res, output_array)
self.assertAllEqual(res.shape, np.array(output_array).shape)
@parameterized.parameters([
{
'separator': '',
'output_array': ['thisisatest']
},
{
'separator': ':',
'output_array': ['this:is:a:test']
},
{
'separator': 'UNK',
'output_array': ['thisUNKisUNKaUNKtest']
},
])
def testSeparator(self, separator, output_array):
inputs = ['this', 'is', 'a', 'test']
segment_ids = [0, 0, 0, 0]
num_segments = 1
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqual(res.shape, np.array(output_array).shape)
self.assertAllEqualUnicode(res, output_array)
def test_fail_segment_id_exceeds_segment_nums(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids = [1, 0, 1]
num_segments = 1
separator = ':'
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
def test_fail_segment_id_dim_does_not_match(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids = [1, 0, 1, 1]
num_segments = 2
separator = ':'
if not context.executing_eagerly():
with self.assertRaises(ValueError):
self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
else:
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
def test_fail_segment_id_empty_input_non_empty(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids = np.array([], dtype=np.int32)
num_segments = 2
separator = ':'
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
def test_empty_input(self):
inputs = np.array([], dtype=np.string_)
segment_ids = [1, 0, 1]
num_segments = 2
separator = ':'
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
def test_fail_negative_segment_id(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids = [-1, 0, -1]
num_segments = 1
separator = ':'
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/unsorted_segment_join_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
def _indexedslice(x, noshape=False):
x = np.array(x)
dense_shape = x.shape
ndim = len(dense_shape)
indices = np.where(np.sum(x, tuple(range(1, ndim))))[0]
values = x[indices]
if noshape:
dense_shape = None
return ops.IndexedSlices(
indices=indices.tolist(), values=values, dense_shape=dense_shape)
class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
def _assertEqual_indexedslices(self, expected_tensor, result):
self.assertAllEqual(expected_tensor.indices, result.indices)
self.assertAllEqual(expected_tensor.values, result.values)
if (result.dense_shape is not None and
expected_tensor.dense_shape is not None):
self.assertAllEqual(expected_tensor.dense_shape, result.dense_shape)
def _assertEqual_nparray(self, expected_array, result, sess):
expected_tensor = _indexedslice(expected_array)
self._assertEqual_indexedslices(expected_tensor, result)
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q")
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals(
"""
name:'Q' op:'SparseConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { unknown_rank: true} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
attr { key: 'reduction_type' value {s: 'MEAN'} }
""", q.accumulator_ref.op.node_def)
def testConstructorWithInvalidArg(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", reduction_type="Invalid")
def testConstructorWithShape(self):
with ops.Graph().as_default():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1, 5, 2, 8]))
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals(
"""
name:'Q' op:'SparseConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { dim {size: 1 }
dim {size: 5 }
dim {size: 2 }
dim {size: 8 }
} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
attr { key: 'reduction_type' value {s: 'MEAN'} }
""", q.accumulator_ref.op.node_def)
@test_util.run_deprecated_v1
def testAccumulatorSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q")
self.assertEqual(q.num_accumulated().eval(), 0)
@test_util.run_deprecated_v1
def testAccumulatorSetGlobalStep(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
set_global_step_op = q.set_global_step(1)
set_global_step_op.run()
@test_util.run_deprecated_v1
def testAccumulatorApplyGradFloat32(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
accum_op = q.apply_indexed_slices_grad(
ops.IndexedSlices(
indices=[0, 2],
values=np.array([[0, 0, 1], [3, 0, 4]]).astype(np.float32)))
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
@test_util.run_deprecated_v1
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [dtypes_lib.float16, dtypes_lib.float32, dtypes_lib.float64]
for i in range(len(dtypes)):
dtype = dtypes[i]
q = data_flow_ops.SparseConditionalAccumulator(
dtype, shape=tensor_shape.TensorShape([3, 3, 3]))
elems = np.arange(2)
sum_elems = np.zeros([3, 3, 3]).astype(dtype.as_numpy_dtype)
for e in elems:
mat_to_add = np.zeros([3, 3, 3]).astype(dtype.as_numpy_dtype)
mat_to_add[i, i, i] = e + 1
sum_elems += mat_to_add
t = _indexedslice(mat_to_add)
q.apply_indexed_slices_grad(t).run()
result = self.evaluate(q.take_indexed_slices_grad(1))
self._assertEqual_nparray(sum_elems / len(elems), result, sess)
@test_util.run_deprecated_v1
def testAccumulatorMultipleAccumulators(self):
with self.cached_session() as sess:
q_f32_0 = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
q_f32_1 = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
q_f16_0 = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([2, 2]))
q_f16_1 = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([2, 2]))
accums = [q_f16_0, q_f16_1, q_f32_0, q_f32_1]
elems = [[[1, 0], [0, 0]], [[0, 1], [0, 0]], [[0, 0], [1, 0]], [[0, 0],
[0, 1]]]
expected_tensors = []
for i in range(len(accums)):
tensor_to_add = np.array(elems[i]).astype(accums[i]
.dtype.as_numpy_dtype)
expected_tensor = _indexedslice(tensor_to_add)
expected_tensors.append(expected_tensor)
st = _indexedslice(tensor_to_add)
accums[i].apply_indexed_slices_grad(st).run()
for i in range(len(accums)):
result = sess.run(accums[i].take_indexed_slices_grad(1))
self._assertEqual_indexedslices(expected_tensors[i], result)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradMean(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=())
grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1], values=np.array([[1, 0], [0, 2]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices)
accum_op.run()
accum_op = q.apply_grad([0, 2],
np.array([[0, 1], [3, 0]]).astype(np.float32),
[3, 2])
accum_op.run()
takeg_t = q.take_indexed_slices_grad(1)
val = self.evaluate(takeg_t)
self.assertAllEqual([0, 1, 2], val.indices)
self.assertAllEqual([[0.5, 0.5], [0, 2], [3, 0]], val.values)
self.assertAllEqual([-1, 2], val.dense_shape)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradSum(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(), reduction_type="SUM")
grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1], values=np.array([[1, 0], [0, 2]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices)
accum_op.run()
accum_op = q.apply_grad([0, 2],
np.array([[0, 1], [3, 0]]).astype(np.float32),
[3, 2])
accum_op.run()
takeg_t = q.take_indexed_slices_grad(1)
val = self.evaluate(takeg_t)
self.assertAllEqual([0, 1, 2], val.indices)
self.assertAllEqual([[1, 1], [0, 2], [3, 0]], val.values)
self.assertAllEqual([-1, 2], val.dense_shape)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradInvalidReductionType(self):
with self.assertRaises(ValueError):
data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(), reduction_type="Invalid")
@test_util.run_deprecated_v1
def testAccumulatorRepeatedTakeGrad(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=())
grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1], values=np.array([[1, 0], [0, 2]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices, local_step=0)
accum_op.run()
accum_op = q.apply_grad(
[0, 2],
np.array([[0, 1], [3, 0]]).astype(np.float32), [3, 2],
local_step=0)
accum_op.run()
takeg_t = q.take_indexed_slices_grad(1)
val = self.evaluate(takeg_t)
self.assertAllEqual(val.indices, [0, 1, 2])
self.assertAllEqual(val.values, [[0.5, 0.5], [0, 2], [3, 0]])
self.assertAllEqual(val.dense_shape, [-1, 2])
grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1],
values=np.array([[10, 0], [0, 20]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices, local_step=1)
accum_op.run()
accum_op = q.apply_grad(
[0, 2],
np.array([[0, 10], [30, 0]]).astype(np.float32), [3, 2],
local_step=1)
accum_op.run()
takeg_t = q.take_indexed_slices_grad(1)
val = self.evaluate(takeg_t)
self.assertAllEqual(val.indices, [0, 1, 2])
self.assertAllEqual(val.values, [[5, 5], [0, 20], [30, 0]])
self.assertAllEqual(val.dense_shape, [-1, 2])
@test_util.run_v1_only("b/120545219")
def testParallelApplyGradMean(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = []
for x in elems:
x = _indexedslice(np.array([[x, 0], [0, x]]).astype(np.float32))
accum_ops.append(q.apply_indexed_slices_grad(x, local_step=0))
takeg_t = q.take_indexed_slices_grad(1)
def apply_indexed_slices_grad(accum_op):
self.evaluate(accum_op)
threads = [
self.checkedThread(
target=apply_indexed_slices_grad, args=(o,)) for o in accum_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
val = self.evaluate(takeg_t)
expected_val = sum(elems) / len(elems)
self._assertEqual_nparray(
np.array([[expected_val, 0], [0, expected_val]]).astype(np.float32),
val, sess)
@test_util.run_v1_only("b/120545219")
def testParallelApplyGradSum(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([2, 2]),
reduction_type="SUM")
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = []
for x in elems:
x = _indexedslice(np.array([[x, 0], [0, x]]).astype(np.float32))
accum_ops.append(q.apply_indexed_slices_grad(x, local_step=0))
takeg_t = q.take_indexed_slices_grad(1)
def apply_indexed_slices_grad(accum_op):
self.evaluate(accum_op)
threads = [
self.checkedThread(target=apply_indexed_slices_grad, args=(o,))
for o in accum_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
val = self.evaluate(takeg_t)
expected_val = 550.0
self._assertEqual_nparray(
np.array([[expected_val, 0], [0, expected_val]]).astype(np.float32),
val, sess)
@test_util.run_v1_only("b/120545219")
def testParallelTakeGrad(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
elems = [e + 1 for e in range(10)]
accum_ops = []
for e in elems:
v = _indexedslice(np.array([[0, 0], [e, 0]]).astype(np.float32))
accum_ops.append(q.apply_indexed_slices_grad(v, local_step=e - 1))
takeg_t = q.take_indexed_slices_grad(1)
results = []
def apply_indexed_slices_grad():
for accum_op in accum_ops:
time.sleep(1.0)
self.evaluate(accum_op)
apply_indexed_slices_grad_thread = self.checkedThread(
target=apply_indexed_slices_grad)
def take_grad():
t = self.evaluate(takeg_t)
results.append(t)
threads = [self.checkedThread(target=take_grad) for _ in range(10)]
for thread in threads:
thread.start()
apply_indexed_slices_grad_thread.start()
for thread in threads:
thread.join()
apply_indexed_slices_grad_thread.join()
for i in range(len(accum_ops)):
self._assertEqual_nparray(
np.array([[0, 0], [elems[i], 0]]), results[i], sess)
@test_util.run_v1_only("b/120545219")
def testAccumulatorApplyAndBlockingTake(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
elems = [10.0, 20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = []
for x in elems:
x = _indexedslice(np.array([[0, x], [0, 0]]).astype(np.float32))
accum_ops.append(q.apply_indexed_slices_grad(x, local_step=0))
takeg_t = q.take_indexed_slices_grad(3)
results = []
def apply_indexed_slices_grad():
for accum_op in accum_ops:
self.evaluate(accum_op)
def take_grad():
results.append(self.evaluate(takeg_t))
accum_thread = self.checkedThread(target=apply_indexed_slices_grad)
takeg_thread = self.checkedThread(target=take_grad)
accum_thread.start()
takeg_thread.start()
accum_thread.join()
takeg_thread.join()
self._assertEqual_nparray([[0, elems_ave], [0, 0]], results[0], sess)
def _blocking_takeg(self, sess, takeg_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(takeg_op)
@test_util.run_v1_only("b/120545219")
def testAccumulatorCancel(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1, 2, 3]))
takeg_t = q.take_indexed_slices_grad(1)
takeg_thread = self.checkedThread(
self._blocking_takeg, args=(sess, takeg_t))
takeg_thread.start()
time.sleep(1.0)
sess.close() # Will cancel blocked operation
takeg_thread.join()
@test_util.run_v1_only("b/120545219")
def testNonVectorIndices(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Input indices should be vector but received shape:"):
q.apply_grad(
grad_indices=[[0, 1], [1, 0]],
grad_values=np.array([1, 2]).astype(np.float32)).run()
@test_util.run_v1_only("b/120545219")
def testZeroDimensionValues(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Values cannot be 0-dimensional."):
q.apply_grad(
grad_indices=[0], grad_values=np.array(1).astype(np.float32)).run()
@test_util.run_v1_only("b/120545219")
def testWrongNonEmptyInputValues(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
" non-empty input values, got "):
q.apply_grad(
grad_indices=[0, 1],
grad_values=np.array([[0, 1, 1]]).astype(np.float32)).run()
@test_util.run_v1_only("b/120545219")
def testDynamicNonVectorIndices(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
x_indices = array_ops.placeholder(dtypes_lib.int64)
x_values = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(grad_indices=x_indices, grad_values=x_values)
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Input indices should be vector but received shape:"):
sess.run(accum_op,
feed_dict={
x_indices: [[0, 1], [1, 0]],
x_values: np.array([1, 2]).astype(np.float32)
})
@test_util.run_v1_only("b/120545219")
def testDynamicWrongNonEmptyInputValues(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
x_indices = array_ops.placeholder(dtypes_lib.int64)
x_values = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(grad_indices=x_indices, grad_values=x_values)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
" non-empty input values, got "):
sess.run(accum_op,
feed_dict={
x_indices: [0, 1],
x_values: np.array([[0, 1, 1]]).astype(np.float32)
})
@test_util.run_v1_only("b/120545219")
def testEmptyShapeApply(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([]))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Input indices should be vector"):
q.apply_grad(grad_indices=0, grad_values=[1.0], grad_shape=[]).run()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Input indices should be vector"):
q.apply_grad(grad_indices=0, grad_values=[1.0]).run()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Values cannot be 0-dimensional."):
q.apply_grad(grad_indices=[0], grad_values=1.0, grad_shape=[]).run()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Values cannot be 0-dimensional."):
q.apply_grad(grad_indices=[0], grad_values=1.0).run()
# The right way to apply a scalar
q.apply_grad(grad_indices=[0], grad_values=[1.0], grad_shape=[]).run()
q.apply_grad(grad_indices=[0], grad_values=[1.0]).run()
@test_util.run_v1_only("b/120545219")
def testValidateShape(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=[2, 2, None])
# Provided shape has wrong rank
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected shape rank at least 3, got 2"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array([[1, 2]]).astype(np.float32),
grad_shape=[2, 2]).run()
# Provided shape has wrong dim
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected shape dim 1 to be 2, got 3"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array([[[1, 2], [3, 4], [5, 6]]]).astype(np.float32),
grad_shape=[2, 3, 2]).run()
# Indices exceeded accumulator's shape's limits
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: index of slice 0 exceeded limits of shape;"
" index is 3 exceeded 2"):
q.apply_grad(
grad_indices=[3],
grad_values=np.array([[[1, 2], [3, 4]]]).astype(np.float32)).run()
# Values' rank does not match shape
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values rank at least 3, got 2"):
q.apply_grad(
grad_indices=[0, 1],
grad_values=np.array([[1, 2], [3, 4]]).astype(np.float32)).run()
# Values' dim does not match shape
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values dim 1 to be 2, got 3"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[1, 2], [3, 4], [5, 6]]]).astype(np.float32)).run()
# First successful gradient creates additional constraints
# Shape will be additionally be constrained to [None,2,2,2] hereafter.
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]).astype(np.float32)).run()
# Values' rank does not match accumulated gradient
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values rank 4, got 3"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array([[[1, 2], [3, 4]]]).astype(np.float32)).run()
# Values' dim does not match accumulated gradient
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values dim 3 to be 2, got 3"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]).astype(
np.float32)).run()
# After take grad, constraints on accumulated gradient are removed
self.evaluate(q.take_grad(1))
# First successful gradient imposes new constraints.
# Hereafter, shape will additionally constrained to [None,2,2,3]
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]).astype(
np.float32),
local_step=1).run()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values dim 3 to be 3, got 2"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]).astype(np.float32),
local_step=1).run()
@test_util.run_deprecated_v1
def testReturnShape(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=[2, None])
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]).astype(np.float32)).run()
val = self.evaluate(q.take_indexed_slices_grad(1))
self.assertAllEqual(val.dense_shape, [2, 2, 2, 2])
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=[None, 2])
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]).astype(
np.float32)).run()
val = self.evaluate(q.take_indexed_slices_grad(1))
self.assertAllEqual(val.dense_shape, [-1, 2, 2, 3])
@test_util.run_deprecated_v1
def testApplyGradtInt32IndicesAndShape(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
accum_op = q.apply_grad(
grad_indices=constant_op.constant(
[0, 2], dtype=dtypes_lib.int32),
grad_values=constant_op.constant(
[[0, 0, 1], [3, 0, 4]], dtype=dtypes_lib.float32),
grad_shape=constant_op.constant(
[3, 3], dtype=dtypes_lib.int32))
accum_op.run()
accum_op = q.apply_indexed_slices_grad(
ops.IndexedSlices(
indices=constant_op.constant(
[0, 2], dtype=dtypes_lib.int32),
values=constant_op.constant(
[[0, 0, 1], [3, 0, 4]], dtype=dtypes_lib.float32),
dense_shape=constant_op.constant(
[3, 3], dtype=dtypes_lib.int32)))
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
val = self.evaluate(q.take_indexed_slices_grad(1))
self.assertAllEqual(val.indices, [0, 2])
self.assertAllEqual(val.values, [[0, 0, 1], [3, 0, 4]])
self.assertAllEqual(val.dense_shape, [3, 3])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_conditional_accumulator_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.logging_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.framework import test_util
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class PrintV2LoggingLevelTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testPrintOneTensorLogInfo(self):
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(
tensor, output_stream=tf_logging.info)
self.evaluate(print_op)
self.assertTrue("I" in printed.contents())
expected = "[0 1 2 ... 7 8 9]"
self.assertTrue(expected in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintOneTensorLogWarning(self):
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(
tensor, output_stream=tf_logging.warning)
self.evaluate(print_op)
self.assertTrue("W" in printed.contents())
expected = "[0 1 2 ... 7 8 9]"
self.assertTrue(expected in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintOneTensorLogError(self):
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(
tensor, output_stream=tf_logging.error)
self.evaluate(print_op)
self.assertTrue("E" in printed.contents())
expected = "[0 1 2 ... 7 8 9]"
self.assertTrue(expected in printed.contents())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/logging_ops_logging_level_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NDHWC", False), ("NDHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCDHW" format is only supported on CUDA.
test_configs += [("NCDHW", True)]
return test_configs
class Conv3DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
if use_gpu:
if not test_util.GpuSupportsHalfMatMulAndConv():
return [dtypes.float64, dtypes.float32]
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float64, dtypes.float32, dtypes.float16]
else:
return [dtypes.float64, dtypes.float32, dtypes.float16]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
padding, data_format, dtype, use_gpu):
total_size_tensor = 1
total_size_filter = 1
for s in tensor_in_sizes:
total_size_tensor *= s
for s in filter_in_sizes:
total_size_filter *= s
# Initializes the input tensor with array containing numbers from 0 to 1.
# We keep the input tensor values fairly small to avoid overflowing float16
# during the conv3d.
x1 = [f * 1.0 / total_size_tensor for f in range(1, total_size_tensor + 1)]
x2 = [f * 1.0 / total_size_filter for f in range(1, total_size_filter + 1)]
with self.cached_session(use_gpu=use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if data_format == "NCDHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv3d(t1, t2, strides, padding=padding,
data_format=data_format)
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
results = []
for data_format, use_gpu in GetTestConfigs():
for dtype in self._DtypesToTest(use_gpu):
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_format,
dtype,
use_gpu=use_gpu)
results.append(result)
with self.cached_session() as sess:
values = self.evaluate(results)
for value in values:
print("expected = ", expected)
print("actual = ", value)
tol = 1e-6
if value.dtype == np.float16:
tol = 1e-3
self.assertAllClose(expected, value.flatten(), atol=tol, rtol=tol)
def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,
stride, dilation, padding, data_format,
use_gpu):
total_size_tensor = 1
total_size_filter = 1
for s in tensor_in_sizes:
total_size_tensor *= s
for s in filter_in_sizes:
total_size_filter *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_tensor + 1)]
x2 = [f * 1.0 for f in range(1, total_size_filter + 1)]
with self.cached_session(use_gpu=use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections.Iterable):
strides = list(stride)
else:
strides = [stride, stride, stride]
if data_format == "NCDHW":
t1 = test_util.NHWCToNCHW(t1)
full_strides = [1, 1] + strides
full_dilation = [1, 1] + dilation
else:
full_strides = [1] + strides + [1]
full_dilation = [1] + dilation + [1]
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilation,
data_format=data_format)
computed = nn_ops.conv3d(
t1,
t2,
strides=full_strides,
dilations=full_dilation,
padding=padding,
data_format=data_format)
if data_format == "NCDHW":
expected = test_util.NCHWToNHWC(expected)
computed = test_util.NCHWToNHWC(computed)
return expected, computed
def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, stride,
padding, dilations):
expected_results = []
computed_results = []
default_dilations = (
dilations[0] == 1 and dilations[1] == 1 and dilations[2] == 1)
for data_format, use_gpu in GetTestConfigs():
# If any dilation rate is larger than 1, only do test on the GPU
# because we currently do not have a CPU implementation for arbitrary
# dilation rates.
if default_dilations or use_gpu:
expected, computed = self._ComputeReferenceDilatedConv(
tensor_in_sizes, filter_in_sizes, stride, dilations, padding,
data_format, use_gpu)
expected_results.append(expected)
computed_results.append(computed)
tolerance = 1e-2 if use_gpu else 1e-5
with self.cached_session() as sess:
expected_values = self.evaluate(expected_results)
computed_values = self.evaluate(computed_results)
for e_value, c_value in zip(expected_values, computed_values):
print("expected = ", e_value)
print("actual = ", c_value)
self.assertAllClose(
e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=1e-6)
def testConv3D1x1x1Filter(self):
expected_output = [
0.18518519, 0.22222222, 0.25925926, 0.40740741, 0.5, 0.59259259,
0.62962963, 0.77777778, 0.92592593, 0.85185185, 1.05555556, 1.25925926,
1.07407407, 1.33333333, 1.59259259, 1.2962963, 1.61111111, 1.92592593
]
# These are equivalent to the Conv2D1x1 case.
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 1, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 1, 2, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv3D1x1x1Filter2x1x1Dilation(self):
if test.is_gpu_available(cuda_only=True):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 3, 6, 1, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=1,
padding="VALID",
dilations=[2, 1, 1])
# Expected values computed using scipy's correlate function.
def testConv3D2x2x2Filter(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 4.2650463, 4.35763889, 4.45023148,
6.73032407, 6.89236111, 7.05439815, 7.22337963, 7.39930556, 7.57523148,
9.68865741, 9.93402778, 10.17939815, 10.18171296, 10.44097222,
10.70023148
]
# expected_shape = [1, 3, 1, 2, 5]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
stride=1,
padding="VALID",
expected=expected_output)
def testConv3D2x2x2Filter1x2x1Dilation(self):
if test.is_gpu_available(cuda_only=True):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 4, 6, 3, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=1,
padding="VALID",
dilations=[1, 2, 1])
def testConv3DStrides(self):
expected_output = [
0.06071429, 0.08988095, 0.10238095, 0.11488095, 0.12738095, 0.13988095,
0.08452381, 0.26071429, 0.35238095, 0.36488095, 0.37738095, 0.38988095,
0.40238095, 0.23452381, 0.46071429, 0.61488095, 0.62738095, 0.63988095,
0.65238095, 0.66488095, 0.38452381, 1.12738095, 1.48988095, 1.50238095,
1.51488095, 1.52738095, 1.53988095, 0.88452381, 1.32738095, 1.75238095,
1.76488095, 1.77738095, 1.78988095, 1.80238095, 1.03452381, 1.52738095,
2.01488095, 2.02738095, 2.03988095, 2.05238095, 2.06488095, 1.18452381,
2.19404762, 2.88988095, 2.90238095, 2.91488095, 2.92738095, 2.93988095,
1.68452381, 2.39404762, 3.15238095, 3.16488095, 3.17738095, 3.18988095,
3.20238095, 1.83452381, 2.59404762, 3.41488095, 3.42738095, 3.43988095,
3.45238095, 3.46488095, 1.98452381
]
self._VerifyValues(
tensor_in_sizes=[1, 5, 8, 7, 1],
filter_in_sizes=[1, 2, 3, 1, 1],
stride=[2, 3, 1], # different stride for each spatial dimension
padding="SAME",
expected=expected_output)
def testConv3D2x2x2FilterStride2(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 9.68865741, 9.93402778, 10.17939815
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="VALID",
expected=expected_output)
def testConv3DStride3(self):
expected_output = [
1.51140873, 1.57167659, 1.63194444, 1.56349206, 1.62673611, 1.68998016,
1.6155754, 1.68179563, 1.74801587, 1.9280754, 2.01215278, 2.09623016,
1.98015873, 2.0672123, 2.15426587, 2.03224206, 2.12227183, 2.21230159,
4.4280754, 4.65500992, 4.88194444, 4.48015873, 4.71006944, 4.93998016,
4.53224206, 4.76512897, 4.99801587, 4.84474206, 5.09548611, 5.34623016,
4.8968254, 5.15054563, 5.40426587, 4.94890873, 5.20560516, 5.46230159
]
self._VerifyValues(
tensor_in_sizes=[1, 6, 7, 8, 2],
filter_in_sizes=[3, 2, 1, 2, 3],
stride=3,
padding="VALID",
expected=expected_output)
def testConv3D2x2x2FilterStride2Same(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 2.0162037, 2.06597222, 2.11574074,
9.68865741, 9.93402778, 10.17939815, 4.59953704, 4.73263889, 4.86574074
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="SAME",
expected=expected_output)
def testKernelSmallerThanStride(self):
expected_output = [
0.03703704, 0.11111111, 0.25925926, 0.33333333, 0.7037037, 0.77777778,
0.92592593, 1.
]
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="SAME",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="VALID",
expected=expected_output)
expected_output = [
0.54081633, 0.58017493, 0.28061224, 0.81632653, 0.85568513, 0.40306122,
0.41873178, 0.4340379, 0.19642857, 2.46938776, 2.50874636, 1.1377551,
2.74489796, 2.78425656, 1.26020408, 1.16873178, 1.1840379, 0.51785714,
1.09511662, 1.10604956, 0.44642857, 1.17164723, 1.18258017, 0.47704082,
0.3691691, 0.37244898, 0.125
]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="SAME",
expected=expected_output)
expected_output = [
0.540816, 0.580175, 0.816327, 0.855685, 2.469388, 2.508746, 2.744898,
2.784257
]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="VALID",
expected=expected_output)
def testKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 2, 1],
filter_in_sizes=[2, 1, 2, 1, 2],
stride=1,
padding="VALID",
expected=[1.5625, 1.875])
def _ConstructAndTestGradientForConfig(
self, batch, input_shape, filter_shape, in_depth, out_depth, stride,
padding, test_input, data_format, use_gpu):
input_planes, input_rows, input_cols = input_shape
filter_planes, filter_rows, filter_cols = filter_shape
input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
filter_shape = [
filter_planes, filter_rows, filter_cols, in_depth, out_depth
]
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if padding == "VALID":
output_planes = int(
math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
output_rows = int(
math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
output_cols = int(
math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
else:
output_planes = int(math.ceil(float(input_planes) / strides[1]))
output_rows = int(math.ceil(float(input_rows) / strides[2]))
output_cols = int(math.ceil(float(input_cols) / strides[3]))
output_shape = [batch, output_planes, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
for data_type in self._DtypesToTest(use_gpu=use_gpu):
# TODO(mjanusz): Modify gradient_checker to also provide max relative
# error and synchronize the tolerance levels between the tests for forward
# and backward computations.
if data_type == dtypes.float64:
tolerance = 1e-8
elif data_type == dtypes.float32:
tolerance = 5e-3
elif data_type == dtypes.float16:
tolerance = 1e-3
with self.cached_session(use_gpu=use_gpu):
orig_input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=data_type, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=data_type, name="filter")
if data_format == "NCDHW":
input_tensor = test_util.NHWCToNCHW(orig_input_tensor)
new_strides = test_util.NHWCToNCHW(strides)
else:
input_tensor = orig_input_tensor
new_strides = strides
conv = nn_ops.conv3d(
input_tensor,
filter_tensor,
new_strides,
padding,
data_format=data_format,
name="conv")
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
self.assertEqual(conv.shape, tensor_shape.TensorShape(output_shape))
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(
orig_input_tensor, input_shape, conv, output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(
filter_tensor, filter_shape, conv, output_shape)
if data_type != dtypes.float16:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
print("conv3d gradient error = ", err)
self.assertLess(err, tolerance)
def ConstructAndTestGradient(self, **kwargs):
for data_format, use_gpu in GetTestConfigs():
self._ConstructAndTestGradientForConfig(data_format=data_format,
use_gpu=use_gpu, **kwargs)
@test_util.run_deprecated_v1
def testInputGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 5, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(4, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 5),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(7, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 7, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(4, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 2, 2),
filter_shape=(3, 2, 1),
in_depth=2,
out_depth=1,
stride=1,
padding="SAME",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="SAME",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(7, 3, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 3, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def disabledtestFilterGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=False)
# Test the fast path in gemm_pack_rhs/gemm_pack_colmajor_block, when channel
# dimension is a multiple of packet size.
@test_util.run_deprecated_v1
def testInputGradientValidPaddingStrideOneFastPath(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 5, 4),
filter_shape=(2, 2, 2),
in_depth=8,
out_depth=2,
stride=1,
padding="VALID",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientValidPaddingStrideOneFastPath(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(4, 6, 5),
filter_shape=(2, 2, 2),
in_depth=8,
out_depth=2,
stride=1,
padding="VALID",
test_input=False)
# Testing for backprops
def _RunAndVerifyBackprop(self, input_sizes, filter_sizes, output_sizes,
strides, dilations, padding, data_format, use_gpu,
err, mode):
total_input_size = 1
total_filter_size = 1
for s in input_sizes:
total_input_size *= s
for s in filter_sizes:
total_filter_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_input_size + 1)]
x2 = [f * 1.0 for f in range(1, total_filter_size + 1)]
default_dilations = (
dilations[0] == 1 and dilations[1] == 1 and dilations[2] == 1)
# If any dilation rate is larger than 1, only do test on the GPU
# because we currently do not have a CPU implementation for arbitrary
# dilation rates.
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCDHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCDHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
actual = nn_ops.conv3d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCDHW":
actual = test_util.NCHWToNHWC(actual)
expected = test_util.NCHWToNHWC(expected)
actual_grad = gradients_impl.gradients(actual, t1
if mode == "input" else t2)[0]
expected_grad = gradients_impl.gradients(expected, t1
if mode == "input" else t2)[0]
# "values" consists of two tensors for two backprops
actual_value = self.evaluate(actual_grad)
expected_value = self.evaluate(expected_grad)
self.assertShapeEqual(actual_value, actual_grad)
self.assertShapeEqual(expected_value, expected_grad)
print("expected = ", expected_value)
print("actual = ", actual_value)
self.assertArrayNear(expected_value.flatten(), actual_value.flatten(),
err)
@test_util.run_deprecated_v1
def testConv3D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackprop(
input_sizes=[1, 3, 6, 1, 1],
filter_sizes=[2, 2, 1, 1, 1],
output_sizes=[1, 1, 5, 1, 1],
strides=[1, 1, 1],
dilations=[2, 1, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5,
mode="filter")
@test_util.run_deprecated_v1
def testConv3D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackprop(
input_sizes=[1, 3, 6, 1, 1],
filter_sizes=[2, 2, 1, 1, 1],
output_sizes=[1, 1, 5, 1, 1],
strides=[1, 1, 1],
dilations=[2, 1, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5,
mode="input")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/conv_ops_3d_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeRaw op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
class DecodeRawOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testToUint8(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[2])
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.uint8)
self.assertEqual([2, None], decode.get_shape().as_list())
result = decode.eval(feed_dict={in_bytes: ["A", "a"]})
self.assertAllEqual([[ord("A")], [ord("a")]], result)
result = decode.eval(feed_dict={in_bytes: ["wer", "XYZ"]})
self.assertAllEqual([[ord("w"), ord("e"), ord("r")],
[ord("X"), ord("Y"), ord("Z")]], result)
with self.assertRaisesOpError(
"DecodeRaw requires input strings to all be the same size, but "
"element 1 has size 5 != 6"):
decode.eval(feed_dict={in_bytes: ["short", "longer"]})
@test_util.run_deprecated_v1
def testToInt16(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.int16)
self.assertEqual([None, None], decode.get_shape().as_list())
result = decode.eval(feed_dict={in_bytes: ["AaBC"]})
self.assertAllEqual(
[[ord("A") + ord("a") * 256, ord("B") + ord("C") * 256]], result)
with self.assertRaisesOpError(
"Input to DecodeRaw has length 3 that is not a multiple of 2, the "
"size of int16"):
decode.eval(feed_dict={in_bytes: ["123", "456"]})
@test_util.run_deprecated_v1
def testEndianness(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode_le = parsing_ops.decode_raw(
in_bytes, out_type=dtypes.int32, little_endian=True)
decode_be = parsing_ops.decode_raw(
in_bytes, out_type=dtypes.int32, little_endian=False)
result = decode_le.eval(feed_dict={in_bytes: ["\x01\x02\x03\x04"]})
self.assertAllEqual([[0x04030201]], result)
result = decode_be.eval(feed_dict={in_bytes: ["\x01\x02\x03\x04"]})
self.assertAllEqual([[0x01020304]], result)
@test_util.run_deprecated_v1
def testToFloat16(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.float16)
self.assertEqual([None, None], decode.get_shape().as_list())
expected_result = np.matrix([[1, -2, -3, 4]], dtype="<f2")
result = decode.eval(feed_dict={in_bytes: [expected_result.tostring()]})
self.assertAllEqual(expected_result, result)
@test_util.run_deprecated_v1
def testToBool(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.bool)
self.assertEqual([None, None], decode.get_shape().as_list())
expected_result = np.matrix([[True, False, False, True]], dtype="<b1")
result = decode.eval(feed_dict={in_bytes: [expected_result.tostring()]})
self.assertAllEqual(expected_result, result)
@test_util.run_deprecated_v1
def testToComplex64(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.complex64)
self.assertEqual([None, None], decode.get_shape().as_list())
expected_result = np.matrix([[1 + 1j, 2 - 2j, -3 + 3j, -4 - 4j]],
dtype="<c8")
result = decode.eval(feed_dict={in_bytes: [expected_result.tostring()]})
self.assertAllEqual(expected_result, result)
@test_util.run_deprecated_v1
def testToComplex128(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.complex128)
self.assertEqual([None, None], decode.get_shape().as_list())
expected_result = np.matrix([[1 + 1j, 2 - 2j, -3 + 3j, -4 - 4j]],
dtype="<c16")
result = decode.eval(feed_dict={in_bytes: [expected_result.tostring()]})
self.assertAllEqual(expected_result, result)
@test_util.run_deprecated_v1
def testEmptyStringInput(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.float16)
for num_inputs in range(3):
result = decode.eval(feed_dict={in_bytes: [""] * num_inputs})
self.assertEqual((num_inputs, 0), result.shape)
@test_util.run_deprecated_v1
def testToUInt16(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.uint16)
self.assertEqual([None, None], decode.get_shape().as_list())
# Use FF/EE/DD/CC so that decoded value is higher than 32768 for uint16
result = decode.eval(feed_dict={in_bytes: [b"\xFF\xEE\xDD\xCC"]})
self.assertAllEqual(
[[0xFF + 0xEE * 256, 0xDD + 0xCC * 256]], result)
with self.assertRaisesOpError(
"Input to DecodeRaw has length 3 that is not a multiple of 2, the "
"size of uint16"):
decode.eval(feed_dict={in_bytes: ["123", "456"]})
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/decode_raw_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv3DBackpropFilterV2GradTest(test.TestCase):
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
for padding in ["SAME", "VALID"]:
for stride in [1, 2]:
np.random.seed(1)
in_shape = [2, 4, 3, 3, 2]
in_val = constant_op.constant(
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 3, 2, 3]
strides = [1, stride, stride, stride, 1]
# Make a convolution op with the current settings, just to easily get
# the shape of the output.
conv_out = nn_ops.conv3d(in_val,
array_ops.zeros(filter_shape), strides,
padding)
out_backprop_shape = conv_out.get_shape().as_list()
out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
dtype=dtypes.float32)
output = nn_ops.conv3d_backprop_filter_v2(in_val, filter_shape,
out_backprop_val, strides,
padding)
err = gradient_checker.compute_gradient_error(
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
output, filter_shape)
print("conv3d_backprop_filter gradient err = %g " % err)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/conv3d_backprop_filter_v2_grad_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def GetDeviceScope(self, use_gpu=False):
if context.executing_eagerly():
if use_gpu and test.is_gpu_available():
return ops.device("GPU:0")
return ops.device("CPU:0")
else:
return self.session(use_gpu=use_gpu)
def GetTestConfigs(include_nchw_vect_c=False):
"""Get all the valid tests configs to run.
Args:
include_nchw_vect_c: Whether to include NCHW_VECT_C in the test configs.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
if not test.is_gpu_available(cuda_only=True):
tf_logging.info("NCHW and NCHW_VECT_C tests skipped because not run with "
"--config=cuda or no GPUs available.")
return test_configs
# "NCHW" format is currently supported exclusively on CUDA GPUs.
test_configs += [("NCHW", True)]
if include_nchw_vect_c:
if test.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(6, 1)):
test_configs += [("NCHW_VECT_C", True)]
else:
tf_logging.info("NCHW_VECT_C test skipped because no GPUs with "
"compute capability >= 6.1 are available.")
return test_configs
def GetShrunkInceptionMaxPoolShapes(shrink=30):
"""Iterator for some of the max pool ops in the Inception 2015 model.
Args:
shrink: Factor to shrink depth relative to Inception.
Yields:
Tuple (name, input_size, filter_size, out_size, strides, padding)
"""
names = ["maxpool2", "maxpool3", "maxpool4", "maxpool5"]
input_sizes = [[32, 71, 71, 192], [32, 35, 35, 288], [32, 17, 17, 1248],
[32, 8, 8, 2048]]
filter_sizes = [[1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1]]
output_sizes = [[32, 35, 35, 192], [32, 17, 17, 288], [32, 8, 8, 1248],
[32, 8, 8, 2048]]
strides = [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1], [1, 1, 1, 1]]
# Shrink each depth value
for i in input_sizes:
i[3] //= shrink
for o in output_sizes:
o[3] //= shrink
paddings = ["VALID", "VALID", "VALID", "SAME"]
for n, i, f, o, s, p in zip(names, input_sizes, filter_sizes, output_sizes,
strides, paddings):
yield n, i, f, o, s, p
class PoolingTest(test.TestCase):
def _VerifyOneType(self, pool_func, input_sizes, ksize, strides, padding,
data_format, data_type, expected, use_gpu, v2):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
data_type: The data type to use to run the pooling operation.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
"""
total_size = 1
for s in input_sizes:
total_size *= s
if v2 and data_format != "NHWC":
tf_logging.info("v2 not supported for %s", data_format)
return
if data_format == "NCHW_VECT_C":
if data_type != dtypes.float32:
tf_logging.info("quantization to qint8 not implemented for %r",
data_type)
return
if input_sizes[-1] % 4 != 0:
tf_logging.info("Skipping test for depth %d", input_sizes[-1])
return
tf_logging.info("Running %s test. %r %r %d %r %r %r %s", data_format, v2,
input_sizes, total_size, pool_func, ksize, strides,
data_type)
# Initializes the input tensor with array containing incrementing
# numbers from 1, wrapping round to -127 after 127 to support int8.
x = [((f + 128) % 255) - 127 for f in range(total_size)]
with self.cached_session(use_gpu=use_gpu):
t = constant_op.constant(x, shape=input_sizes, dtype=data_type)
if data_format in ("NCHW", "NCHW_VECT_C"):
if data_format == "NCHW_VECT_C":
t = test_util.NHWCToNCHW_VECT_C(t)
t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
else:
t = test_util.NHWCToNCHW(t)
ksize = test_util.NHWCToNCHW(ksize)
strides = test_util.NHWCToNCHW(strides)
ksize_placeholder = array_ops.placeholder(dtypes.int32, shape=[4])
strides_placeholder = array_ops.placeholder(dtypes.int32, shape=[4])
if v2:
t = pool_func(
t,
ksize=ksize_placeholder,
strides=strides_placeholder,
padding=padding,
data_format=data_format)
else:
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW_VECT_C":
t = gen_array_ops.dequantize(t, -128, 127)
t = test_util.NCHW_VECT_CToNHWC(t)
elif data_format == "NCHW":
t = test_util.NCHWToNHWC(t)
if v2:
actual = t.eval(feed_dict={
ksize_placeholder: ksize,
strides_placeholder: strides
})
else:
actual = self.evaluate(t)
self.assertShapeEqual(actual, t)
self.assertAllCloseAccordingToType(expected, actual.flatten())
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
data_format, expected, use_gpu, v2):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
"""
if data_format == "NCHW_VECT_C":
avg_pool_func = nn_ops.avg_pool
tf_logging.info("pool_func=%s", pool_func)
if pool_func == avg_pool_func:
tf_logging.info("NCHW_VECT_C not yet implemented for avg_pool")
return
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float32, expected, use_gpu, v2)
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float64, expected, use_gpu, v2)
if not use_gpu or test_util.GpuSupportsHalfMatMulAndConv():
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float16, expected, use_gpu, v2)
def _VerifyValues(self,
pool_func,
input_sizes,
ksize,
strides,
padding,
expected,
use_gpu,
v2=False):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
"""
for (data_format, use_gpu_2) in GetTestConfigs(True):
if use_gpu_2 == use_gpu:
self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,
data_format, expected, use_gpu, v2)
def _testAvgPoolValidPadding(self, use_gpu):
expected_output = [7.0, 8.0, 9.0]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output,
use_gpu=use_gpu)
def _testAvgPoolSamePadding(self, use_gpu):
expected_output = [8.5, 9.5, 10.5, 14.5, 15.5, 16.5]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 4, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
def _testAvgPoolSamePaddingNonSquareWindow(self, use_gpu):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
# [avg(1.0, 2.0), avg(2.0, padded0),
# avg(3.0, 4.0), avg(4.0, padded0)]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[1.5, 2.0, 3.5, 4.0],
use_gpu=use_gpu)
# Window of [x,
# x] should do:
# [avg(1.0, 3.0), avg(2.0, 4.0)
# avg(3.0, padded0), avg(4.0, padded0)]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 2, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 3.0, 3.0, 4.0],
use_gpu=use_gpu)
def _testAvgPoolSamePaddingNonSquareWindowMultiBatch(self, use_gpu):
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[2, 2, 2, 2],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[
2.0, 3.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 10.0, 11.0, 11.0, 12.0,
14.0, 15.0, 15.0, 16.0
],
use_gpu=use_gpu)
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[2, 2, 2, 2],
ksize=[1, 2, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[
3.0, 4.0, 5.0, 6.0, 5.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0,
13.0, 14.0, 15.0, 16.0
],
use_gpu=use_gpu)
def _testAvgPoolValidPaddingUnevenStride(self, use_gpu):
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0],
use_gpu=use_gpu)
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
use_gpu=use_gpu)
def _testAvgPoolSamePadding4(self, use_gpu):
expected_output = [
11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0, 44.0, 45.0, 46.0,
51.0, 52.0, 53.0, 54.0
]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
def _testAvgPoolSamePaddingPacket4(self, use_gpu):
expected_output = [
21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0, 45.0, 46.0, 47.0, 48.0,
51.0, 52.0, 53.0, 54.0
]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
def _testAvgPoolSamePaddingPacket8(self, use_gpu):
expected_output = [
-12.0, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, 4.0, 5.0, 6.0, 7.0,
8.0, 9.0, 10.0, 11.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0,
32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, -3.5, -54.0, -53.0, -52.0,
-51.0, -50.0, -49.0, -48.0, -47.0, -38.0, -37.0, -36.0, -35.0, -34.0,
-33.0, -32.0, -31.0, -22.0, -21.0, -20.0, -19.0, -18.0, -17.0, -16.0,
-15.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0, -11.0, -10.0,
-9.0, -8.0, -7.0, -6.0, -5.0, -4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0,
12.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 33.0, 34.0, 35.0,
36.0, 37.0, 38.0, -3.5, -2.5, -85.0, -84.0, -83.0, -82.0, -81.0, -80.0,
-79.0, -78.0, -69.0, -68.0, -67.0, -66.0, -65.0, -64.0, -63.0, -62.0,
-53.0, -52.0, -51.0, -50.0, -49.0, -48.0, -47.0, -46.0, -41.0, -40.0,
-39.0, -38.0, -37.0, -36.0, -35.0, -34.0
]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
def _testAvgPoolEmptyInput(self, use_gpu):
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[0, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[],
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testAvgPooling(self):
for use_gpu in True, False:
self._testAvgPoolValidPadding(use_gpu)
self._testAvgPoolSamePadding(use_gpu)
self._testAvgPoolSamePaddingNonSquareWindow(use_gpu)
self._testAvgPoolSamePaddingNonSquareWindowMultiBatch(use_gpu)
self._testAvgPoolValidPaddingUnevenStride(use_gpu)
self._testAvgPoolSamePadding4(use_gpu)
self._testAvgPoolSamePaddingPacket4(use_gpu)
self._testAvgPoolSamePaddingPacket8(use_gpu)
self._testAvgPoolEmptyInput(use_gpu)
def _testMaxPoolValidPadding(self, use_gpu):
expected_output = [13.0, 14.0, 15.0]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output,
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output,
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolSamePadding(self, use_gpu):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolSamePaddingNonSquareWindow(self, use_gpu):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0],
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0],
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolValidPaddingUnevenStride(self, use_gpu):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0],
use_gpu=use_gpu)
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0],
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0],
use_gpu=use_gpu,
v2=v2)
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0],
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolSamePaddingPacket4(self, use_gpu):
expected_output = [
21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,
61.0, 62.0, 63.0, 64.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolSamePaddingPacket8(self, use_gpu):
expected_output = [
81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 97.0, 98.0, 99.0, 100.0,
101.0, 102.0, 103.0, 104.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0,
119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0, 120.0,
18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 34.0, 35.0, 36.0, 37.0,
38.0, 39.0, 40.0, 41.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0,
58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 82.0, 83.0, 84.0, 85.0,
86.0, 87.0, 88.0, 89.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0,
105.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0,
123.0, 124.0, 125.0, 126.0, 127.0, 120.0, 121.0, -45.0, -44.0, -43.0,
-42.0, -41.0, -40.0, -39.0, -38.0, -29.0, -28.0, -27.0, -26.0, -25.0,
-24.0, -23.0, -22.0, -13.0, -12.0, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0,
-5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolEmptyInput(self, use_gpu):
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[0, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[],
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testMaxPooling(self):
for use_gpu in True, False:
self._testMaxPoolValidPadding(use_gpu)
self._testMaxPoolSamePadding(use_gpu)
self._testMaxPoolSamePaddingNonSquareWindow(use_gpu)
self._testMaxPoolValidPaddingUnevenStride(use_gpu)
self._testMaxPoolSamePaddingPacket4(use_gpu)
self._testMaxPoolSamePaddingPacket8(use_gpu)
self._testMaxPoolEmptyInput(use_gpu)
# Tests for DepthwiseMaxPooling on CPU only.
@test_util.run_deprecated_v1
def testDepthwiseMaxPool1x1DepthWindow1(self):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0],
use_gpu=False)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0],
use_gpu=False,
v2=v2)
@test_util.run_deprecated_v1
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0],
use_gpu=False)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0],
use_gpu=False,
v2=v2)
@test_util.run_deprecated_v1
def testKernelSmallerThanStrideValid(self):
for use_gpu in [True, False]:
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33],
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33],
use_gpu=use_gpu,
v2=v2)
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[5, 8, 26, 29],
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testKernelSmallerThanStrideSame(self):
for use_gpu in [True, False]:
for pool_func in [nn_ops.max_pool, nn_ops.avg_pool]:
self._VerifyValues(
pool_func,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9],
use_gpu=use_gpu)
self._VerifyValues(
pool_func,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11],
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9],
use_gpu=use_gpu,
v2=v2)
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11],
use_gpu=use_gpu,
v2=v2)
def _testDepthwiseMaxPoolInvalidConfig(self,
in_size,
ksize,
strides,
error_msg,
use_gpu=False):
with self.cached_session(use_gpu=use_gpu):
t = constant_op.constant(1.0, shape=in_size)
with self.assertRaisesRegexp(errors_impl.UnimplementedError, error_msg):
t = nn_ops.max_pool(
t, ksize=ksize, strides=strides, padding="SAME").eval()
@test_util.disable_xla("b/123338077") # Passes with XLA
def testDepthwiseMaxPoolInvalidConfigs(self):
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 2, 2, 2], [1, 1, 1, 2],
"exactly one of pooling across depth")
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 1, 1, 2], [1, 1, 1, 1],
"depth window to equal the depth stride")
self._testDepthwiseMaxPoolInvalidConfig([1, 2, 2, 4], [1, 1, 1, 3],
[1, 1, 1, 3], "evenly divide")
if test.is_gpu_available():
with self.session(use_gpu=True):
t = variables.Variable(np.ones([1, 2, 2, 4]))
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesOpError("for CPU devices"):
nn_ops.max_pool(
t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
padding="SAME").eval()
# The following are tests that verify that the CPU and GPU implementations
# produce the same results.
def _CompareMaxPoolingFwd(self, input_shape, ksize, strides, padding):
for dtype in np.float64, np.float32, np.float16:
tensor_input = np.random.rand(*input_shape).astype(dtype)
with self.cached_session(use_gpu=True):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op, _ = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op = nn_ops.max_pool(t, ksize, strides, padding)
cpu_val = self.evaluate(out_op)
self.assertAllCloseAccordingToType(cpu_val, gpu_val)
def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides,
padding):
for dtype in np.float64, np.float32, np.float16:
# Generate numbers in a narrow range, so that there are many duplicates
# in the input.
tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
tensor_output = np.random.rand(*output_shape).astype(dtype)
with self.cached_session(use_gpu=True):
t = constant_op.constant(tensor_input, shape=input_shape)
_, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
argmax = self.evaluate(argmax_op)
grad_in = constant_op.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops.max_pool_grad_with_argmax(t, grad_in, argmax, ksize,
strides, padding)
gpu_val = self.evaluate(out_op)
self.assertShapeEqual(gpu_val, out_op)
with self.cached_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op = nn_ops.max_pool(t, ksize, strides, padding)
orig_out = self.evaluate(out_op)
grad_in = constant_op.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops.max_pool_grad(t, orig_out, grad_in, ksize, strides,
padding)
cpu_val = self.evaluate(out_op)
self.assertShapeEqual(cpu_val, out_op)
# The CPU version accumulates its gradient on fp16, so it's less
# accurate than the GPU version that does the accumulation on fp32
self.assertAllCloseAccordingToType(
cpu_val, gpu_val, half_rtol=0.01, half_atol=0.01)
def _CompareMaxPoolingGradBk(self, input_shape, output_shape, ksize, strides,
padding):
for dtype in np.float64, np.float32, np.float16:
# Generate numbers in a narrow range, so that there are many duplicates
# in the input.
tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
with self.cached_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
_, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
argmax = self.evaluate(argmax_op)
grad_in = constant_op.constant(tensor_input, shape=input_shape)
out_op = gen_nn_ops.max_pool_grad_grad_with_argmax(
t, grad_in, argmax, ksize, strides, padding)
gpu_val = self.evaluate(out_op)
self.assertShapeEqual(gpu_val, out_op)
with self.cached_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op = nn_ops.max_pool(t, ksize, strides, padding)
orig_out = self.evaluate(out_op)
grad_in = constant_op.constant(tensor_input, shape=input_shape)
out_op = gen_nn_ops.max_pool_grad_grad(t, orig_out, grad_in, ksize,
strides, padding)
cpu_val = self.evaluate(out_op)
self.assertShapeEqual(cpu_val, out_op)
# The CPU version accumulates its gradient on fp16, so it's less
# accurate than the GPU version that does the accumulation on fp32
self.assertAllCloseAccordingToType(
cpu_val, gpu_val, half_rtol=0.01, half_atol=0.01)
def testMaxPoolingWithArgmax(self):
tensor_input = [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 1.0
]
Config = collections.namedtuple(
"Config", ["use_gpu", "include_batch_in_index", "argmax"])
configs = [
Config(False, False, [0, 1, 3, 5, 0, 2, 6, 8]),
Config(False, True, [0, 1, 3, 5, 9, 11, 15, 17]),
Config(True, False, [0, 1, 3, 5, 0, 2, 6, 8]),
Config(True, True, [0, 1, 3, 5, 9, 11, 15, 17])
]
for config in configs:
with GetDeviceScope(self, use_gpu=config.use_gpu):
t = constant_op.constant(tensor_input, shape=[2, 3, 3, 1])
out_op, argmax_op = nn_ops.max_pool_with_argmax(
t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
Targmax=dtypes.int64,
padding="VALID",
include_batch_in_index=config.include_batch_in_index)
out, argmax = self.evaluate([out_op, argmax_op])
self.assertShapeEqual(out, out_op)
self.assertShapeEqual(argmax, argmax_op)
self.assertAllClose(out.ravel(),
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
self.assertAllEqual(argmax.ravel(), config.argmax)
def testMaxPoolingGradWithArgmax(self):
orig_input = [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 1.0
]
tensor_input = [11.0, 12.0, 13.0, 14.0, 21.0, 22.0, 23.0, 24.0]
Config = collections.namedtuple(
"Config", ["use_gpu", "include_batch_in_index", "argmax"])
configs = [
Config(False, False, [0, 1, 3, 5, 0, 2, 6, 8]),
Config(False, True, [0, 1, 3, 5, 9, 11, 15, 17]),
Config(True, False, [0, 1, 3, 5, 0, 2, 6, 8]),
Config(True, True, [0, 1, 3, 5, 9, 11, 15, 17])
]
for config in configs:
with GetDeviceScope(self, config.use_gpu):
orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])
t = constant_op.constant(tensor_input, shape=[2, 2, 2, 1])
argmax_t = constant_op.constant(
config.argmax, shape=[2, 2, 2, 1], dtype=dtypes.int64)
out_op = gen_nn_ops.max_pool_grad_with_argmax(
orig_in,
t,
argmax_t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID",
include_batch_in_index=config.include_batch_in_index)
out = self.evaluate(out_op).flatten()
self.assertAllClose(out, [
11.0, 12.0, 0.0, 13.0, 0.0, 14.0, 0.0, 0.0, 0.0, 21.0, 0.0, 22.0,
0.0, 0.0, 0.0, 23.0, 0.0, 24.0
])
def testMaxPoolingGradGradWithArgmax(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
orig_input = [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 1.0
]
tensor_input = [
11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 21.0, 22.0, 23.0,
24.0, 25.0, 26.0, 27.0, 28.0, 29.0
]
Config = collections.namedtuple(
"Config", ["use_gpu", "include_batch_in_index", "argmax"])
configs = [
Config(True, False, [0, 1, 3, 5, 0, 2, 6, 8]),
Config(True, True, [0, 1, 3, 5, 9, 11, 15, 17])
]
for config in configs:
with GetDeviceScope(self, config.use_gpu):
orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])
t = constant_op.constant(tensor_input, shape=[2, 3, 3, 1])
argmax_t = constant_op.constant(
config.argmax, shape=[2, 2, 2, 1], dtype=dtypes.int64)
out_op = gen_nn_ops.max_pool_grad_grad_with_argmax(
orig_in,
t,
argmax_t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID",
include_batch_in_index=config.include_batch_in_index)
out = self.evaluate(out_op).flatten()
self.assertAllClose(out,
[11.0, 12.0, 14.0, 16.0, 21.0, 23.0, 27.0, 29.0])
def _ConstructAndTestGradient(self,
pool_func,
input_sizes,
output_sizes,
window_rows,
window_cols,
row_stride,
col_stride,
padding,
data_format,
use_gpu,
x_init_value=None):
"""Verifies the gradients of the avg pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window_rows: kernel size in row dim
window_cols: kernel size in col dim
row_stride: Row Stride.
col_stride: Col Stride.
padding: Padding type.
data_format: Data format.
use_gpu: whether we are running on GPU
x_init_value: Values to be passed to the gradient checker.
"""
assert input_sizes[0] == output_sizes[0]
assert input_sizes[3] == output_sizes[3]
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
if pool_func == nn_ops.avg_pool:
func_name = "avg_pool"
err_tolerance = 1e-4
else:
if x_init_value is None:
x_init_value = np.asfarray(
np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool"
err_tolerance = 1e-3
if data_format == "NCHW":
ksize = [1, 1, window_rows, window_rows]
strides = [1, 1, row_stride, col_stride]
t = test_util.NHWCToNCHW(input_tensor)
else:
ksize = [1, window_rows, window_rows, 1]
strides = [1, row_stride, col_stride, 1]
t = input_tensor
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=func_name)
if data_format == "NCHW":
t = test_util.NCHWToNHWC(t)
err = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t,
output_sizes,
x_init_value=x_init_value,
delta=1e-2)
tf_logging.info("%s gradient error = " % func_name, err)
self.assertLess(err, err_tolerance)
def _ConstructAndTestSecondGradient(self,
pool_func,
input_sizes,
output_sizes,
window_rows,
window_cols,
row_stride,
col_stride,
padding,
data_format,
use_gpu,
x_init_value=None):
"""Verifies the second-order gradients of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window_rows: kernel size in row dim
window_cols: kernel size in col dim
row_stride: Row Stride.
col_stride: Col Stride.
padding: Padding type.
data_format: Data format.
use_gpu: whether we are running on GPU
x_init_value: Values to be passed to the gradient checker.
"""
assert input_sizes[0] == output_sizes[0]
assert input_sizes[3] == output_sizes[3]
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
if pool_func == nn_ops.avg_pool:
func_name = "avg_pool"
err_tolerance = 1e-3
else:
if x_init_value is None:
x_init_value = np.asfarray(
np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool"
err_tolerance = 1e-2
if data_format == "NCHW":
ksize = [1, 1, window_rows, window_rows]
strides = [1, 1, row_stride, col_stride]
t = test_util.NHWCToNCHW(input_tensor)
else:
ksize = [1, window_rows, window_rows, 1]
strides = [1, row_stride, col_stride, 1]
t = input_tensor
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=func_name)
if data_format == "NCHW":
t = test_util.NHWCToNCHW(t)
t_g = gradients_impl.gradients(t**2, input_tensor)[0]
err = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t_g,
input_sizes,
x_init_value=x_init_value,
delta=1e-2)
tf_logging.info("%s second-order gradient error = " % func_name, err)
self.assertLess(err, err_tolerance)
def _testMaxPoolGradValidPadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
output_sizes=[1, 3, 3, 1],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_6(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 6, 6, 3],
output_sizes=[2, 5, 5, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_7(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 7, 7, 3],
output_sizes=[2, 6, 6, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding1_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
output_sizes=[1, 2, 2, 1],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding3_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testMaxPoolGrad(self):
for (data_format, use_gpu) in GetTestConfigs():
self._testMaxPoolGradValidPadding1_1(data_format, use_gpu)
self._testMaxPoolGradValidPadding1_2(data_format, use_gpu)
self._testMaxPoolGradValidPadding2_1_6(data_format, use_gpu)
self._testMaxPoolGradValidPadding2_1_7(data_format, use_gpu)
self._testMaxPoolGradValidPadding2_2(data_format, use_gpu)
self._testMaxPoolGradSamePadding1_1(data_format, use_gpu)
self._testMaxPoolGradSamePadding1_2(data_format, use_gpu)
self._testMaxPoolGradSamePadding2_1(data_format, use_gpu)
self._testMaxPoolGradSamePadding2_2(data_format, use_gpu)
self._testMaxPoolGradSamePadding3_1(data_format, use_gpu)
def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows,
window_cols, row_stride, col_stride, padding, v2):
"""Max Pooling Gradient.
Args:
orig_input: A float Tensor. The original input tensor.
orig_output: A float Tensor. The original output tensor.
grad: A float Tensor.
The 4D (batch x rows x cols x depth) output backprop.
window_rows: integer. Kernel size along rows dimension.
window_cols: integer. Kernel size along cols dimension.
row_stride: integer. Stride along rows dimension
col_stride: integer. Stride along cols dimension
padding: PoolingOpDef.Padding. Padding type.
Returns:
A Tensor.
"""
pool_func = gen_nn_ops.max_pool_grad_v2 if v2 else gen_nn_ops.max_pool_grad
return pool_func(orig_input, orig_output, grad,
[1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
def _testMaxPoolGradDirect(self, input_data, output_backprop,
expected_input_backprop, input_sizes, output_sizes,
window_rows, window_cols, row_stride, col_stride,
padding, use_gpu, v2):
pool_func = gen_nn_ops.max_pool_v2 if v2 else nn_ops.max_pool
with self.cached_session(use_gpu=use_gpu):
input_tensor = variables.Variable(
np.array(input_data, dtype=np.float32).reshape(input_sizes))
self.evaluate(variables.global_variables_initializer())
output_tensor = pool_func(input_tensor, [1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
output_backprop_tensor = constant_op.constant(
output_backprop, shape=output_sizes)
input_backprop_tensor = self._MaxPoolGrad(
input_tensor, output_tensor, output_backprop_tensor, window_rows,
window_cols, row_stride, col_stride, padding, v2)
actual_input_backprop = self.evaluate(input_backprop_tensor)
self.assertShapeEqual(actual_input_backprop, input_backprop_tensor)
actual_input_backprop = actual_input_backprop.flatten()
actual_input_backprop = self._GetNdArray(actual_input_backprop)
actual_output = self.evaluate(output_tensor).flatten()
actual_output = self._GetNdArray(actual_output)
self.assertAllClose(
expected_input_backprop, actual_input_backprop, rtol=1e-6, atol=1e-6)
def _testMaxPoolGradDirect1_1(self):
input_data = [
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0
]
for use_gpu in True, False:
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradDirect1_2(self):
input_data = [
1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 17.0, 19.0, 0.0, 41.0, 0.0, 0.0,
0.0, 0.0, 0.0
]
for use_gpu in True, False:
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradDirect1_3(self):
input_data = [
1.0,
0.0,
1.0,
0.0,
0.0,
1.0,
0.0,
1.0,
1.0,
0.0,
1.0,
0.0,
0.0,
1.0,
0.0,
1.0,
]
output_backprop = [
11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0,
23.0, 24.0, 25.0, 26.0
]
expected_input_backprop = [
54,
0.0,
62,
0.0,
0.0,
60,
0.0,
22.0,
47,
0.0,
51,
0.0,
0.0,
0.0,
0.0,
0.0,
]
for use_gpu in True, False:
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 4, 4, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
use_gpu=use_gpu,
v2=v2)
@test_util.no_xla_auto_jit("b/123923733") # NaNs handled differently
def _testMaxPoolGradDirectWithNans2_1(self):
input_data = [float("nan")] * 16
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=False,
v2=v2)
if not test.is_gpu_available():
return
# Test the GPU implementation that uses cudnn for now.
saved_nanprop = os.environ.get("TF_ENABLE_MAXPOOL_NANPROP")
# Do not propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "0"
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
# Propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "1"
expected_input_backprop_cudnn = expected_input_backprop_tf_cpu
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
if saved_nanprop:
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = saved_nanprop
else:
del os.environ["TF_ENABLE_MAXPOOL_NANPROP"]
@test_util.no_xla_auto_jit("b/123923733") # NaNs handled differently
def _testMaxPoolGradDirectWithNans2_2(self):
input_data = [float("nan")] * 16
output_backprop = [
float("nan"), 12.0, 13.0, 15.0,
float("nan"), 17.0, 19.0, 20.0,
float("nan")
]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
float("nan"), 12.0, 13.0, 0.0, 15.0,
float("nan"), 17.0, 0.0, 19.0, 20.0,
float("nan"), 0.0, 0.0, 0.0, 0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=False,
v2=v2)
if not test.is_gpu_available():
return
# Test the GPU implementation that uses cudnn for now.
saved_nanprop = os.environ.get("TF_ENABLE_MAXPOOL_NANPROP")
# Do not propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "0"
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
# Propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "1"
expected_input_backprop_cudnn = expected_input_backprop_tf_cpu
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
if saved_nanprop:
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = saved_nanprop
else:
del os.environ["TF_ENABLE_MAXPOOL_NANPROP"]
@test_util.run_deprecated_v1
def testMaxPoolGradDirect(self):
self._testMaxPoolGradDirect1_1()
self._testMaxPoolGradDirect1_2()
self._testMaxPoolGradDirect1_3()
self._testMaxPoolGradDirectWithNans2_1()
self._testMaxPoolGradDirectWithNans2_2()
def _testMaxPoolGradGradValidPadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
output_sizes=[1, 3, 3, 1],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_1_6(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 6, 6, 3],
output_sizes=[2, 5, 5, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_1_7(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 7, 7, 3],
output_sizes=[2, 6, 6, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding2_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding3_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testMaxPoolGradGrad(self):
for (data_format, use_gpu) in GetTestConfigs():
self._testMaxPoolGradGradValidPadding1_1(data_format, use_gpu)
self._testMaxPoolGradGradValidPadding2_1_6(data_format, use_gpu)
self._testMaxPoolGradGradValidPadding2_1_7(data_format, use_gpu)
self._testMaxPoolGradGradValidPadding2_2(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding1_1(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding2_1(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding2_2(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding3_1(data_format, use_gpu)
def _MaxPoolGradGrad(self, orig_input, orig_output, grad, window_rows,
window_cols, row_stride, col_stride, padding):
"""Max Pooling Second-Order Gradient.
Args:
orig_input: A float Tensor. The original input tensor.
orig_output: A float Tensor. The original output tensor.
grad: A float Tensor.
The 4D (batch x out_rows x out_cols x depth) output backprop.
window_rows: integer. Kernel size along rows dimension.
window_cols: integer. Kernel size along cols dimension.
row_stride: integer. Stride along rows dimension
col_stride: integer. Stride along cols dimension
padding: PoolingOpDef.Padding. Padding type.
Returns:
A Tensor.
"""
return gen_nn_ops.max_pool_grad_grad(
orig_input, orig_output, grad, [1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
@test_util.run_deprecated_v1
def testAvgPoolGrad(self):
for (data_format, use_gpu) in GetTestConfigs():
self._testAvgPoolGradValidPadding1_1(data_format, use_gpu)
self._testAvgPoolGradValidPadding1_2(data_format, use_gpu)
self._testAvgPoolGradValidPadding2_1(data_format, use_gpu)
self._testAvgPoolGradValidPadding2_2(data_format, use_gpu)
self._testAvgPoolGradSamePadding1_1(data_format, use_gpu)
self._testAvgPoolGradSamePadding1_2(data_format, use_gpu)
self._testAvgPoolGradSamePadding2_1(data_format, use_gpu)
self._testAvgPoolGradSamePadding2_2(data_format, use_gpu)
self._testAvgPoolGradSamePadding3_1(data_format, use_gpu)
def _testAvgPoolGradValidPadding1_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 3, 3, 3],
output_sizes=[2, 3, 3, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradValidPadding1_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 3, 3, 3],
output_sizes=[2, 2, 2, 3],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 3, 3, 3],
output_sizes=[2, 2, 2, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding1_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding1_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding3_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
for pool_func in [nn_ops.max_pool, nn_ops.avg_pool]:
p = pool_func(
array_ops.placeholder(dtypes.float32),
ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
p, am = nn_ops.max_pool_with_argmax(
array_ops.placeholder(dtypes.float32),
ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
self.assertEqual([None, None, None, None], am.get_shape().as_list())
# Incorrect input shape.
for pool_func in [
nn_ops.max_pool, nn_ops.avg_pool, nn_ops.max_pool_with_argmax
]:
with self.assertRaises(ValueError):
pool_func(
array_ops.placeholder(dtypes.float32, shape=[1, 3]),
ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME")
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testOpEdgeCases(self):
with self.session(use_gpu=test.is_gpu_available()) as sess:
pool_funcs = [nn_ops.max_pool, nn_ops.avg_pool]
if test.is_gpu_available():
pool_funcs.append(nn_ops.max_pool_with_argmax)
for pool_func in pool_funcs:
if pool_func != nn_ops.max_pool:
# Illegal strides.
with self.assertRaisesRegexp(
errors_impl.UnimplementedError,
"Pooling is not yet supported on the batch"):
sess.run(
pool_func(
array_ops.placeholder(dtypes.float32),
ksize=[1, 1, 1, 1],
strides=[2, 1, 1, 1],
padding="SAME"))
# Filter too large.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
pool_func(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
ksize=[1, 20, 21, 1],
strides=[1, 1, 1, 1],
padding="VALID"))
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
pool_func(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
ksize=[1, 21, 20, 1],
strides=[1, 1, 1, 1],
padding="VALID")
def GetMaxPoolFwdTest(input_size, filter_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding)
return Test
def GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingBk(input_size, output_size, filter_size, strides,
padding)
return Test
def GetMaxPoolGradGradTest(input_size, filter_size, output_size, strides,
padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingGradBk(input_size, output_size, filter_size, strides,
padding)
return Test
if __name__ == "__main__":
for (name_, input_size_, filter_size_, output_size_, stride_,
padding_) in GetShrunkInceptionMaxPoolShapes():
setattr(PoolingTest, "testMaxPoolFwd_" + name_,
GetMaxPoolFwdTest(input_size_, filter_size_, stride_, padding_))
setattr(PoolingTest, "testMaxPoolGrad_" + name_,
GetMaxPoolGradTest(input_size_, filter_size_, output_size_, stride_,
padding_))
setattr(PoolingTest, "testMaxPoolGradGrad_" + name_,
GetMaxPoolGradGradTest(input_size_, filter_size_, output_size_,
stride_, padding_))
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/pooling_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the DynamicPartition op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class DynamicPartitionTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimpleOneDimensional(self):
with self.session(use_gpu=True) as sess:
data = constant_op.constant([0, 13, 2, 39, 4, 17], dtype=dtypes.float32)
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = self.evaluate(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual([0, 13], partition_vals[0])
self.assertAllEqual([17], partition_vals[1])
self.assertAllEqual([2, 4], partition_vals[2])
self.assertAllEqual([39], partition_vals[3])
# Vector data input to DynamicPartition results in
# `num_partitions` vectors of unknown length.
self.assertEqual([None], partitions[0].get_shape().as_list())
self.assertEqual([None], partitions[1].get_shape().as_list())
self.assertEqual([None], partitions[2].get_shape().as_list())
self.assertEqual([None], partitions[3].get_shape().as_list())
@test_util.run_deprecated_v1
def testSimpleTwoDimensional(self):
with self.session(use_gpu=True) as sess:
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14], [15, 16, 17]],
dtype=dtypes.float32)
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = self.evaluate(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], partition_vals[0])
self.assertAllEqual([[15, 16, 17]], partition_vals[1])
self.assertAllEqual([[6, 7, 8], [12, 13, 14]], partition_vals[2])
self.assertAllEqual([[9, 10, 11]], partition_vals[3])
# Vector data input to DynamicPartition results in
# `num_partitions` matrices with an unknown number of rows, and 3 columns.
self.assertEqual([None, 3], partitions[0].get_shape().as_list())
self.assertEqual([None, 3], partitions[1].get_shape().as_list())
self.assertEqual([None, 3], partitions[2].get_shape().as_list())
self.assertEqual([None, 3], partitions[3].get_shape().as_list())
def testLargeOneDimensional(self):
num = 100000
data_list = [x for x in range(num)]
indices_list = [x % 2 for x in range(num)]
part1 = [x for x in range(num) if x % 2 == 0]
part2 = [x for x in range(num) if x % 2 == 1]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = self.evaluate(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual(part1, partition_vals[0])
self.assertAllEqual(part2, partition_vals[1])
def testLargeTwoDimensional(self):
rows = 100000
cols = 100
data_list = [None] * rows
for i in range(rows):
data_list[i] = [i for _ in range(cols)]
num_partitions = 97
indices_list = [(i ** 2) % num_partitions for i in range(rows)]
parts = [[] for _ in range(num_partitions)]
for i in range(rows):
parts[(i ** 2) % num_partitions].append(data_list[i])
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=num_partitions)
partition_vals = self.evaluate(partitions)
self.assertEqual(num_partitions, len(partition_vals))
for i in range(num_partitions):
# reshape because of empty parts
parts_np = np.array(parts[i], dtype=np.float).reshape(-1, cols)
self.assertAllEqual(parts_np, partition_vals[i])
def testSimpleComplex(self):
data_list = [1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j]
indices_list = [1, 0, 1, 0]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.complex64)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = self.evaluate(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual([3 + 4j, 7 + 8j], partition_vals[0])
self.assertAllEqual([1 + 2j, 5 + 6j], partition_vals[1])
def testScalarPartitions(self):
data_list = [10, 13, 12, 11]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float64)
indices = 3
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = self.evaluate(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual(np.array([], dtype=np.float64).reshape(-1, 4),
partition_vals[0])
self.assertAllEqual(np.array([], dtype=np.float64).reshape(-1, 4),
partition_vals[1])
self.assertAllEqual(np.array([], dtype=np.float64).reshape(-1, 4),
partition_vals[2])
self.assertAllEqual(np.array([10, 13, 12, 11],
dtype=np.float64).reshape(-1, 4),
partition_vals[3])
@test_util.run_deprecated_v1
def testHigherRank(self):
np.random.seed(7)
with self.session(use_gpu=True) as sess:
for n in 2, 3:
for shape in (4,), (4, 5), (4, 5, 2):
partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape)
for extra_shape in (), (6,), (6, 7):
data = np.random.randn(*(shape + extra_shape))
partitions_t = constant_op.constant(partitions, dtype=dtypes.int32)
data_t = constant_op.constant(data)
outputs = data_flow_ops.dynamic_partition(
data_t, partitions_t, num_partitions=n)
self.assertEqual(n, len(outputs))
outputs_val = self.evaluate(outputs)
for i, output in enumerate(outputs_val):
self.assertAllEqual(output, data[partitions == i])
# Test gradients
outputs_grad = [7 * output for output in outputs_val]
grads = gradients_impl.gradients(outputs, [data_t, partitions_t],
outputs_grad)
self.assertEqual(grads[1], None) # Partitions has no gradients
self.assertAllEqual(7 * data, sess.run(grads[0]))
def testEmptyParts(self):
data_list = [1, 2, 3, 4]
indices_list = [1, 3, 1, 3]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = self.evaluate(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual([], partition_vals[0])
self.assertAllEqual([1, 3], partition_vals[1])
self.assertAllEqual([], partition_vals[2])
self.assertAllEqual([2, 4], partition_vals[3])
def testEmptyDataTwoDimensional(self):
data_list = [[], []]
indices_list = [0, 1]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=3)
partition_vals = self.evaluate(partitions)
self.assertEqual(3, len(partition_vals))
self.assertAllEqual([[]], partition_vals[0])
self.assertAllEqual([[]], partition_vals[1])
self.assertAllEqual(np.array([], dtype=np.float).reshape(0, 0),
partition_vals[2])
def testEmptyPartitions(self):
data_list = []
indices_list = []
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = self.evaluate(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual([], partition_vals[0])
self.assertAllEqual([], partition_vals[1])
@unittest.skip("Fails on windows.")
def testGPUTooManyParts(self):
# This test only makes sense on the GPU. There we do not check
# for errors. In this case, we should discard all but the first
# num_partitions indices.
if not test.is_gpu_available():
return
data_list = [1, 2, 3, 4, 5, 6]
indices_list = [6, 5, 4, 3, 1, 0]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = self.evaluate(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual([6], partition_vals[0])
self.assertAllEqual([5], partition_vals[1])
@unittest.skip("Fails on windows.")
def testGPUPartsTooLarge(self):
# This test only makes sense on the GPU. There we do not check
# for errors. In this case, we should discard all the values
# larger than num_partitions.
if not test.is_gpu_available():
return
data_list = [1, 2, 3, 4, 5, 6]
indices_list = [10, 11, 2, 12, 0, 1000]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=5)
partition_vals = self.evaluate(partitions)
self.assertEqual(5, len(partition_vals))
self.assertAllEqual([5], partition_vals[0])
self.assertAllEqual([], partition_vals[1])
self.assertAllEqual([3], partition_vals[2])
self.assertAllEqual([], partition_vals[3])
self.assertAllEqual([], partition_vals[4])
@unittest.skip("Fails on windows.")
def testGPUAllIndicesBig(self):
# This test only makes sense on the GPU. There we do not check
# for errors. In this case, we should discard all the values
# and have an empty output.
if not test.is_gpu_available():
return
data_list = [1.1, 2.1, 3.1, 4.1, 5.1, 6.1]
indices_list = [90, 70, 60, 100, 110, 40]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=40)
partition_vals = self.evaluate(partitions)
self.assertEqual(40, len(partition_vals))
for i in range(40):
self.assertAllEqual([], partition_vals[i])
@test_util.run_deprecated_v1
def testErrorIndexOutOfRange(self):
with self.cached_session() as sess:
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
indices = constant_op.constant([0, 2, 99, 2, 2])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
with self.assertRaisesOpError(r"partitions\[2\] = 99 is not in \[0, 4\)"):
self.evaluate(partitions)
@test_util.run_deprecated_v1
def testScalarIndexOutOfRange(self):
with self.cached_session() as sess:
bad = 17
data = np.zeros(5)
partitions = data_flow_ops.dynamic_partition(data, bad, num_partitions=7)
with self.assertRaisesOpError(r"partitions = 17 is not in \[0, 7\)"):
self.evaluate(partitions)
@test_util.run_deprecated_v1
def testHigherRankIndexOutOfRange(self):
with self.cached_session() as sess:
shape = (2, 3)
indices = array_ops.placeholder(shape=shape, dtype=np.int32)
data = np.zeros(shape + (5,))
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=7)
for i in xrange(2):
for j in xrange(3):
bad = np.zeros(shape, dtype=np.int32)
bad[i, j] = 17
with self.assertRaisesOpError(
r"partitions\[%d,%d\] = 17 is not in \[0, 7\)" % (i, j)):
sess.run(partitions, feed_dict={indices: bad})
@test_util.run_deprecated_v1
def testErrorWrongDimsIndices(self):
data = constant_op.constant([[0], [1], [2]])
indices = constant_op.constant([[0], [0]])
with self.assertRaises(ValueError):
data_flow_ops.dynamic_partition(data, indices, num_partitions=4)
# see https://github.com/tensorflow/tensorflow/issues/17106
def testCUBBug(self):
x = constant_op.constant(np.random.randn(3072))
inds = [0]*189 + [1]*184 + [2]*184 + [3]*191 + [4]*192 + [5]*195 + [6]*195
inds += [7]*195 + [8]*188 + [9]*195 + [10]*188 + [11]*202 + [12]*194
inds += [13]*194 + [14]*194 + [15]*192
self.assertEqual(len(inds), x.shape[0])
partitioned = data_flow_ops.dynamic_partition(x, inds, 16)
with self.cached_session() as sess:
res = self.evaluate(partitioned)
self.assertEqual(res[-1].shape[0], 192)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/dynamic_partition_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ExtractImagePatches op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ExtractImagePatches(test.TestCase):
"""Functional tests for ExtractImagePatches op."""
def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):
"""Tests input-output pairs for the ExtractImagePatches op.
Args:
image: Input tensor with shape: [batch, in_rows, in_cols, depth].
ksizes: Patch size specified as: [ksize_rows, ksize_cols].
strides: Output strides, specified as [stride_rows, stride_cols].
rates: Atrous rates, specified as [rate_rows, rate_cols].
padding: Padding type.
patches: Expected output.
"""
ksizes = [1] + ksizes + [1]
strides = [1] + strides + [1]
rates = [1] + rates + [1]
out_tensor = array_ops.extract_image_patches(
constant_op.constant(image),
ksizes=ksizes,
strides=strides,
rates=rates,
padding=padding,
name="im2col")
self.assertAllClose(patches, self.evaluate(out_tensor))
def testKsize1x1Stride1x1Rate1x1(self):
"""Verifies that for 1x1 kernel the output equals the input."""
# [2, 3, 4, 5]
image = np.reshape(range(120), [2, 3, 4, 5])
# [2, 3, 4, 5]
patches = np.reshape(range(120), [2, 3, 4, 5])
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[1, 1],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize1x1Stride2x3Rate1x1(self):
"""Test for 1x1 kernel and strides."""
# [2, 4, 5, 3]
image = np.reshape(range(120), [2, 4, 5, 3])
# [2, 2, 2, 3]
patches = image[:, ::2, ::3, :]
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[2, 3],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize2x2Stride1x1Rate1x1Valid(self):
"""Test for 2x2 kernel with VALID padding."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 1, 1, 4]
patches = [[[[1, 2, 3, 4]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
patches=patches)
def testKsize2x2Stride1x1Rate1x1Same(self):
"""Test for 2x2 kernel with SAME padding."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 2, 2, 4]
patches = [[[[1, 2, 3, 4], [2, 0, 4, 0]], [[3, 4, 0, 0], [4, 0, 0, 0]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
patches=patches)
def testKsize2x2Stride1x1Rate2x2Valid(self):
"""Test for 2x2 kernel with 2x2 dilation."""
# [1, 2, 2, 1]
image = np.arange(16).reshape(1, 4, 4, 1).astype(np.float32)
# [1, 2, 2, 4]
patches = [[[[0, 2, 8, 10], [1, 3, 9, 11]],
[[4, 6, 12, 14], [5, 7, 13, 15]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[2, 2],
padding="VALID",
patches=patches)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/extract_image_patches_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for unified pooling functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def pool_direct_single_axis(
input, # pylint: disable=redefined-builtin
axis,
window_size,
pooling_type,
padding,
dilation_rate,
stride):
"""Numpy implementation of pooling along a single axis.
This is intended for testing only, and therefore isn't particularly efficient.
See pool_direct below for the meaning of the arguments.
Args:
input: numpy array.
axis: axis along which to perform pooling.
window_size: int >= 1. Size of pooling window within axis.
pooling_type: either "MAX" or "AVG".
padding: either "SAME" or "VALID".
dilation_rate: int >= 1. Dilation factor for window, i.e. stride at which
to sample input.
stride: int >= 1. Stride at which to generate output.
Returns:
pooling output array of rank N+2.
Raises:
ValueError: if arguments are invalid.
"""
effective_window_size = (window_size - 1) * dilation_rate + 1
input_size = input.shape[axis]
if padding == "SAME":
output_size = int(math.ceil(input_size / stride))
total_padding_amount = max(
0, (output_size - 1) * stride + effective_window_size - input_size)
before_padding = total_padding_amount // 2
elif padding == "VALID":
output_size = int(
math.ceil((input_size - effective_window_size + 1) / stride))
before_padding = 0
else:
raise ValueError("Unsupported padding type: %r" % (padding,))
output_shape = input.shape[:axis] + (output_size,) + input.shape[axis + 1:]
output = np.zeros(output_shape, input.dtype)
initial_dim_selector = tuple(np.s_[:] for _ in range(axis))
if pooling_type == "MAX":
pooling_func = np.max
elif pooling_type == "AVG":
pooling_func = np.mean
else:
raise ValueError("Unsupported pooling type: %r" % (pooling_type,))
for output_pos in range(output_size):
input_start_pos = output_pos * stride - before_padding
input_end_pos = min(input_start_pos + effective_window_size, input_size)
if input_start_pos < 0:
input_start_pos += dilation_rate
input_slice = np.s_[input_start_pos:input_end_pos:dilation_rate]
output[initial_dim_selector + (output_pos,)] = pooling_func(
input[initial_dim_selector + (input_slice,)], axis=axis)
return output
def pool_direct(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding, # pylint: disable=redefined-builtin
dilation_rate,
strides,
data_format=None):
"""Numpy implementation of pooling.
This is intended for testing only, and therefore isn't particularly efficient.
See tensorflow.nn.pool.
Args:
input: numpy array of rank N+2.
window_shape: Sequence of N ints >= 1.
pooling_type: either "MAX" or "AVG".
padding: either "SAME" or "VALID".
dilation_rate: Sequence of N ints >= 1.
strides: Sequence of N ints >= 1.
data_format: If specified and starts with "NC", indicates that second
dimension, rather than the last dimension, specifies the channel.
Returns:
pooling output array of rank N+2.
Raises:
ValueError: if arguments are invalid.
"""
if data_format is None or not data_format.startswith("NC"):
spatial_start_dim = 1
else:
spatial_start_dim = 2
output = input
for i in range(len(window_shape)):
output = pool_direct_single_axis(
input=output,
axis=i + spatial_start_dim,
window_size=window_shape[i],
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilation_rate[i],
stride=strides[i])
return output
class PoolingTest(test.TestCase):
def _test(self, input_shape, **kwargs):
# Use negative numbers to make sure there isn't any zero padding getting
# used.
x = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
y1 = pool_direct(input=x, **kwargs)
y2 = nn_ops.pool(input=x, **kwargs)
self.assertAllClose(y1, self.evaluate(y2), rtol=1e-2, atol=1e-2)
def testPoolSimple(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
self._test(
input_shape=[1, 1, 10, 1],
window_shape=[1, 3],
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=[1, 2])
def testPool1D(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 2], [2, 10, 2]]:
for window_shape in [[1], [2], [3]]:
if padding != "SAME":
for dilation_rate in [[1], [2], [3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2], [3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
def testPool2D(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 2], [2, 10, 9, 2]]:
for window_shape in [[1, 1], [2, 1], [2, 3]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [1, 2], [2, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
def testPool3D(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 11, 2], [2, 10, 9, 11, 2]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 3, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [1, 2, 2],
[2, 3, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [1, 2, 2], [2, 3, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
def testPoolNC(self):
if test.is_gpu_available(cuda_only=True):
# "NC*" format is currently only supported on CUDA.
with self.session(use_gpu=True):
for padding in ["SAME", "VALID"]:
self._test(
input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[1],
dilation_rate=[1],
data_format="NCW")
self._test(
input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[2],
dilation_rate=[1],
data_format="NCW")
self._test(
input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding=padding,
pooling_type="MAX",
strides=[1, 2],
dilation_rate=[1, 1],
data_format="NCHW")
self._test(
input_shape=[2, 2, 7, 5, 3],
window_shape=[2, 2, 2],
padding=padding,
pooling_type="MAX",
strides=[1, 2, 1],
dilation_rate=[1, 1, 1],
data_format="NCDHW")
self._test(
input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding="VALID",
pooling_type="MAX",
strides=[1, 1],
dilation_rate=[2, 2],
data_format="NCHW")
def _test_gradient(self, input_shape, **kwargs):
x_val = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
output = nn_ops.pool(input=x, **kwargs)
y_shape = output.get_shape().as_list()
err = gradient_checker.compute_gradient_error(
[x], [input_shape], output, y_shape, x_init_value=[x_val])
err_tolerance = 1e-2
self.assertLess(err, err_tolerance)
@test_util.run_deprecated_v1
def testGradient1D(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 5, 2], [1, 4, 1]]:
for window_shape in [[1], [2]]:
if padding != "SAME":
for dilation_rate in [[1], [2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
@test_util.run_deprecated_v1
def testGradient2D(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 4, 5, 2], [1, 5, 4, 1]]:
for window_shape in [[1, 1], [2, 1], [2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
@test_util.run_deprecated_v1
def testGradient3D(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[1, 3, 5, 4, 1], [1, 5, 4, 3, 1]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/pool_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for summary V1 image op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import image_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
class SummaryV1ImageOpTest(test.TestCase):
def _AsSummary(self, s):
summ = summary_pb2.Summary()
summ.ParseFromString(s)
return summ
def _CheckProto(self, image_summ, shape):
"""Verify that the non-image parts of the image_summ proto match shape."""
# Only the first 3 images are returned.
for v in image_summ.value:
v.image.ClearField("encoded_image_string")
expected = "\n".join("""
value {
tag: "img/image/%d"
image { height: %d width: %d colorspace: %d }
}""" % ((i,) + shape[1:]) for i in xrange(3))
self.assertProtoEquals(expected, image_summ)
@test_util.run_deprecated_v1
def testImageSummary(self):
for depth in (1, 3, 4):
for positive in False, True:
with self.session(graph=ops.Graph()) as sess:
shape = (4, 5, 7) + (depth,)
bad_color = [255, 0, 0, 255][:depth]
# Build a mostly random image with one nan
const = np.random.randn(*shape).astype(np.float32)
const[0, 1, 2] = 0 # Make the nan entry not the max
if positive:
const = 1 + np.maximum(const, 0)
scale = 255 / const.reshape(4, -1).max(axis=1)
offset = 0
else:
scale = 127 / np.abs(const.reshape(4, -1)).max(axis=1)
offset = 128
adjusted = np.floor(scale[:, None, None, None] * const + offset)
const[0, 1, 2, depth // 2] = np.nan
# Summarize
summ = summary.image("img", const)
value = self.evaluate(summ)
self.assertEqual([], summ.get_shape())
image_summ = self._AsSummary(value)
# Decode the first image and check consistency
image = image_ops.decode_png(image_summ.value[0]
.image.encoded_image_string).eval()
self.assertAllEqual(image[1, 2], bad_color)
image[1, 2] = adjusted[0, 1, 2]
self.assertAllClose(image, adjusted[0], rtol=2e-5, atol=2e-5)
# Check the rest of the proto
self._CheckProto(image_summ, shape)
@test_util.run_deprecated_v1
def testImageSummaryUint8(self):
np.random.seed(7)
for depth in (1, 3, 4):
with self.session(graph=ops.Graph()) as sess:
shape = (4, 5, 7) + (depth,)
# Build a random uint8 image
images = np.random.randint(256, size=shape).astype(np.uint8)
tf_images = ops.convert_to_tensor(images)
self.assertEqual(tf_images.dtype, dtypes.uint8)
# Summarize
summ = summary.image("img", tf_images)
value = self.evaluate(summ)
self.assertEqual([], summ.get_shape())
image_summ = self._AsSummary(value)
# Decode the first image and check consistency.
# Since we're uint8, everything should be exact.
image = image_ops.decode_png(image_summ.value[0]
.image.encoded_image_string).eval()
self.assertAllEqual(image, images[0])
# Check the rest of the proto
self._CheckProto(image_summ, shape)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/summary_v1_image_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg_grad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = '_'.join(['test', op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError('Test %s defined more than once' % test_name)
setattr(test, test_name, fn)
class ShapeTest(test_lib.TestCase):
@test_util.run_deprecated_v1
def testBatchGradientUnknownSize(self):
with self.cached_session():
batch_size = constant_op.constant(3)
matrix_size = constant_op.constant(4)
batch_identity = array_ops.tile(
array_ops.expand_dims(
array_ops.diag(array_ops.ones([matrix_size])), 0),
[batch_size, 1, 1])
determinants = linalg_ops.matrix_determinant(batch_identity)
reduced = math_ops.reduce_sum(determinants)
sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), self.evaluate(sum_grad))
class MatrixUnaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
@test_util.run_v1_only('b/120545219')
def Test(self):
with self.session(use_gpu=True):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
if functor_.__name__ == 'matrix_square_root':
# Square the input matrix to ensure that its matrix square root exists
a = math_ops.matmul(a, a)
a_np = self.evaluate(a)
b = functor_(a, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else 0.05
theoretical, numerical = gradient_checker.compute_gradient(
a,
a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=a_np,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class MatrixBinaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBinaryFunctorGradientTest(functor_,
dtype_,
shape_,
float32_tol_fudge=1.0,
**kwargs_):
@test_util.run_v1_only('b/120545219')
def Test(self):
# TODO(rmlarsen): Debug illegal address bug on CUDA and re-enable
# GPU test for matrix_solve.
use_gpu = False if functor_ == linalg_ops.matrix_solve else True
with self.session(use_gpu=use_gpu):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
b_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
b = constant_op.constant(b_np)
c = functor_(a, b, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else float32_tol_fudge * 0.05
# The gradients for a and b may be of very different magnitudes,
# so to not get spurious failures we test them separately.
for factor, factor_init in [a, a_np], [b, b_np]:
theoretical, numerical = gradient_checker.compute_gradient(
factor,
factor.get_shape().as_list(),
c,
c.get_shape().as_list(),
x_init_value=factor_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == '__main__':
# Tests for gradients of binary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
for adjoint in False, True:
shape = extra + (size, size)
name = '%s_%s_adj_%s' % (dtype.__name__, '_'.join(map(str, shape)),
str(adjoint))
_AddTest(MatrixBinaryFunctorGradientTest, 'MatrixSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_solve, dtype, shape, adjoint=adjoint))
for lower in True, False:
name = '%s_low_%s' % (name, lower)
_AddTest(MatrixBinaryFunctorGradientTest,
'MatrixTriangularSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_triangular_solve,
dtype,
shape,
float32_tol_fudge=4.0,
adjoint=adjoint,
lower=lower))
# Tests for gradients of unary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
shape = extra + (size, size)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixInverseGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_inverse,
dtype, shape))
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixExponentialGradient',
name,
_GetMatrixUnaryFunctorGradientTest(
linalg_impl.matrix_exponential, dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixDeterminantGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_determinant,
dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'LogMatrixDeterminantGradient',
name,
_GetMatrixUnaryFunctorGradientTest(
lambda x: linalg_ops.log_matrix_determinant(x)[1],
dtype, shape))
# The numerical Jacobian is consistently invalid for these four shapes
# because the matrix square root of the perturbed input doesn't exist
if shape in {(2, 5, 5), (3, 5, 5), (3, 10, 10), (3, 2, 5, 5)}:
# Alternative shape that consistently produces a valid numerical Jacobian
shape = extra + (size + 1, size + 1)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixSquareRootGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_square_root,
dtype, shape))
# Tests for gradients of matrix_solve_ls
for dtype in np.float32, np.float64:
for rows in 2, 5, 10:
for cols in 2, 5, 10:
for l2_regularization in 1e-6, 0.001, 1.0:
shape = (rows, cols)
name = '%s_%s_%s' % (dtype.__name__, '_'.join(map(str, shape)),
l2_regularization)
float32_tol_fudge = 5.1 if l2_regularization == 1e-6 else 4.0
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixSolveLsGradient',
name,
# pylint: disable=long-lambda,g-long-lambda
_GetMatrixBinaryFunctorGradientTest(
(lambda a, b, l=l2_regularization:
linalg_ops.matrix_solve_ls(a, b, l)),
dtype,
shape,
float32_tol_fudge))
test_lib.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg_grad_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_square_root."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class SquareRootOpTest(test.TestCase):
def _verifySquareRoot(self, matrix, np_type):
matrix = matrix.astype(np_type)
# Verify that matmul(sqrtm(A), sqrtm(A)) = A
sqrt = gen_linalg_ops.matrix_square_root(matrix)
square = math_ops.matmul(sqrt, sqrt)
self.assertShapeEqual(matrix, square)
self.assertAllClose(matrix, square, rtol=1e-4, atol=1e-3)
def _verifySquareRootReal(self, x):
for np_type in [np.float32, np.float64]:
self._verifySquareRoot(x, np_type)
def _verifySquareRootComplex(self, x):
for np_type in [np.complex64, np.complex128]:
self._verifySquareRoot(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0),
np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
def _testMatrices(self, matrix1, matrix2):
# Real
self._verifySquareRootReal(matrix1)
self._verifySquareRootReal(matrix2)
self._verifySquareRootReal(self._makeBatch(matrix1, matrix2))
# Complex
matrix1 = matrix1.astype(np.complex64)
matrix2 = matrix2.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 += 1j * matrix2
self._verifySquareRootComplex(matrix1)
self._verifySquareRootComplex(matrix2)
self._verifySquareRootComplex(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefinite(self):
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._testMatrices(matrix1, matrix2)
def testAsymmetric(self):
matrix1 = np.array([[0., 4.], [-1., 5.]])
matrix2 = np.array([[33., 24.], [48., 57.]])
self._testMatrices(matrix1, matrix2)
def testIdentityMatrix(self):
# 2x2
identity = np.array([[1., 0], [0, 1.]])
self._verifySquareRootReal(identity)
# 3x3
identity = np.array([[1., 0, 0], [0, 1., 0], [0, 0, 1.]])
self._verifySquareRootReal(identity)
def testEmpty(self):
self._verifySquareRootReal(np.empty([0, 2, 2]))
self._verifySquareRootReal(np.empty([2, 0, 0]))
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self):
# The input to the square root should be at least a 2-dimensional tensor.
tensor = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
gen_linalg_ops.matrix_square_root(tensor)
@test_util.run_v1_only("b/120545219")
def testNotSquare(self):
with self.assertRaises(ValueError):
tensor = constant_op.constant([[1., 0., -1.], [-1., 1., 0.]])
self.evaluate(gen_linalg_ops.matrix_square_root(tensor))
@test_util.run_v1_only("b/120545219")
def testConcurrentExecutesWithoutError(self):
with test_util.use_gpu():
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
square1 = math_ops.matmul(matrix1, matrix1)
square2 = math_ops.matmul(matrix2, matrix2)
sqrt1 = gen_linalg_ops.matrix_square_root(square1)
sqrt2 = gen_linalg_ops.matrix_square_root(square2)
all_ops = [sqrt1, sqrt2]
sqrt = self.evaluate(all_ops)
self.assertAllClose(sqrt[0], sqrt[1])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/matrix_square_root_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops which manipulate lists of tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np # pylint: disable=unused-import
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_list_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def _testPushPop(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
def testPushPop(self, max_num_elements):
self._testPushPop(max_num_elements)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
@test_util.run_gpu_only
def testPushPopGPU(self, max_num_elements):
with context.device("gpu:0"):
self._testPushPop(max_num_elements)
@test_util.run_deprecated_v1
def testPushInFullListFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[], max_num_elements=1)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Tried to push item into a full list"):
l = list_ops.tensor_list_push_back(l, 2.)
self.evaluate(l)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
@test_util.run_deprecated_v1
def testPopFromEmptyTensorListFails(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to pop from an empty list"):
l = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.evaluate(l)
def testPopUninitializedTensorUseListElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[2, 3], num_elements=3)
_, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(e, np.zeros((2, 3)))
def testPopUninitializedTensorUseSpecifiedElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[None, 3], num_elements=3)
_, e = gen_list_ops.tensor_list_pop_back(
l, element_dtype=dtypes.float32, element_shape=[4, 3])
self.assertAllEqual(e, np.zeros((4, 3)))
def testPopUninitializedTensorWithInvalidElementShapeFails(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Trying to read an uninitialized tensor but "
"element_shape is not fully defined"):
_, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.evaluate(e)
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[None, 2], num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Incompatible shapes during merge: \[1,3\] vs. \[\?,2\]"):
_, e = gen_list_ops.tensor_list_pop_back(
l, element_dtype=dtypes.float32, element_shape=[1, 3])
self.evaluate(e)
def testPushGetGrad(self):
with backprop.GradientTape() as tape:
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None)
c0 = constant_op.constant(5.0)
c1 = constant_op.constant([10.0, 20.0])
tape.watch(c0)
tape.watch(c1)
l = list_ops.tensor_list_push_back(l, c0)
l = list_ops.tensor_list_push_back(l, c1)
t1 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t1), [10.0, 20.0])
# t1 == c1 so the gradient should be [0., [1., 1.]]
# This tests that the gradient of push_back correctly converts DT_INVALID
# tensors to zeros. The list returned by the gradient of GetItem will
# have only have tensor at index 1 set and others set to DT_INVALID.
dt0, dt1 = tape.gradient(t1, [c0, c1])
self.assertAllEqual(self.evaluate(dt1), [1.0, 1.0])
self.assertEqual(self.evaluate(dt0), 0.0)
def _testStack(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
if not context.executing_eagerly():
self.assertAllEqual(t.shape.as_list(), [None])
self.assertAllEqual(self.evaluate(t), [1.0, 2.0])
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
def testStack(self, max_num_elements):
self._testStack(max_num_elements)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
@test_util.run_gpu_only
def testStackGPU(self, max_num_elements):
with context.device("gpu:0"):
self._testStack(max_num_elements)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 3))
@test_util.run_deprecated_v1
def testStackWithUnknownElementShape(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=None,
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [1.0, 2.0])
# Should raise an error when the element tensors do not all have the same
# shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Incompatible ranks during merge: 0 vs. 1"):
l = list_ops.tensor_list_push_back(l, constant_op.constant([3.0, 4.0]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 3))
@test_util.run_deprecated_v1
def testStackWithPartiallyDefinedElementShape(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[None],
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0]))
l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[1.0], [2.0]])
# Should raise an error when the element tensors do not all have the same
# shape.
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Incompatible shapes during merge: \[1\] vs. \[2\]"):
l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0, 3.0]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
@test_util.run_deprecated_v1
def testStackEmptyList(self, max_num_elements):
# Should be able to stack empty lists with fully defined element_shape.
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[1, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t).shape, (0, 1, 2))
# Should not be able to stack empty lists with partially defined
# element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[None, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
# Should not be able to stack empty lists with undefined element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=None,
max_num_elements=max_num_elements)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
def _testStackWithUninitializedTensors(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=3)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [0., 0., 0.])
def testStackWithUninitializedTensors(self):
self._testStackWithUninitializedTensors()
@test_util.run_gpu_only
def testStackWithUninitializedTensorsGpu(self):
with context.device("gpu:0"):
self._testStackWithUninitializedTensors()
def _testStackWithUninitializedTensorsInferShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
l = list_ops.tensor_list_set_item(l, 1, [1., 2.])
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [[0., 0.], [1., 2.], [0., 0.]])
def testStackWithUninitializedTensorsInferShape(self):
self._testStackWithUninitializedTensorsInferShape()
@test_util.run_gpu_only
def testStackWithUninitializedTensorsInferShapeGpu(self):
with context.device("gpu:0"):
self._testStackWithUninitializedTensorsInferShape()
def testStackReservedListWithNoElementsAndPartialElementShapeFails(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Tried to stack list which only contains "
"uninitialized tensors and has a "
"non-fully-defined element_shape: <unknown>"):
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
def testStackUsingSpecifiedElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
t = gen_list_ops.tensor_list_stack(
l, element_dtype=dtypes.float32, element_shape=[])
if context.executing_eagerly():
self.assertEqual(t.shape.as_list(), [3])
else:
self.assertEqual(t.shape.as_list(), [None])
self.assertAllEqual(self.evaluate(t), np.zeros((3,)))
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
def testGatherGrad(self, max_num_elements):
with backprop.GradientTape() as tape:
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
c0 = constant_op.constant(1.0)
tape.watch(c0)
l = list_ops.tensor_list_push_back(l, c0)
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_gather(l, [1, 0], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [2.0, 1.0])
s = (t[0] + t[1]) * (t[0] + t[1])
dt = tape.gradient(s, c0)
self.assertAllEqual(self.evaluate(dt), 6.0)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 3))
@test_util.run_deprecated_v1
def testGatherWithUnknownElementShape(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=None,
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant([3.0, 4.0]))
t = list_ops.tensor_list_gather(l, [1, 0], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [2.0, 1.0])
t = list_ops.tensor_list_gather(l, [2], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[3.0, 4.0]])
# Should raise an error when the requested tensors do not all have the same
# shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Incompatible ranks during merge: 0 vs. 1"):
t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32)
self.evaluate(t)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 3))
@test_util.run_deprecated_v1
def testGatherWithPartiallyDefinedElementShape(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[None],
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0]))
l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0, 3.0]))
l = list_ops.tensor_list_push_back(l, constant_op.constant([4.0, 5.0]))
t = list_ops.tensor_list_gather(l, [0], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[1.0]])
t = list_ops.tensor_list_gather(l, [1, 2], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[2.0, 3.0], [4.0, 5.0]])
# Should raise an error when the requested tensors do not all have the same
# shape.
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Incompatible shapes during merge: \[1\] vs. \[2\]"):
t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32)
self.evaluate(t)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 3))
@test_util.run_deprecated_v1
def testGatherEmptyList(self, max_num_elements):
# Should be able to gather from empty lists with fully defined
# element_shape.
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[1, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32)
self.assertAllEqual((0, 1, 2), self.evaluate(t).shape)
# Should not be able to gather from empty lists with partially defined
# element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[None, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32)
self.evaluate(t)
# Should not be able to gather from empty lists with undefined
# element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=None,
max_num_elements=max_num_elements)
t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32)
self.evaluate(t)
def testGatherGradWithNonContiguousIndices(self):
with backprop.GradientTape(persistent=True) as tape:
t = constant_op.constant([1.0, 2.0, 3.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
c = constant_op.constant(5.0)
tape.watch(c)
l = list_ops.tensor_list_set_item(l, 1, c)
t = list_ops.tensor_list_gather(l, [1], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [5.0])
s = t[0] * t[0]
dt = tape.gradient(s, c)
self.assertAllEqual(self.evaluate(dt), 10.0)
dl = tape.gradient(t, l)
dl_length = list_ops.tensor_list_length(dl)
self.assertAllEqual(self.evaluate(dl_length), 3)
def _testGatherWithUninitializedTensors(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=3)
t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [0., 0.])
def testGatherWithUninitializedTensors(self):
self._testGatherWithUninitializedTensors()
@test_util.run_gpu_only
def testGatherWithUninitializedTensorsGpu(self):
with context.device("gpu:0"):
self._testGatherWithUninitializedTensors()
def _testGatherWithUninitializedTensorsInferShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
l = list_ops.tensor_list_set_item(l, 1, [1., 2.])
t = list_ops.tensor_list_gather(l, [1, 2], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[1., 2.], [0., 0.]])
def testGatherWithUninitializedTensorsInferShape(self):
self._testGatherWithUninitializedTensorsInferShape()
@test_util.run_gpu_only
def testGatherWithUninitializedTensorsInferShapeGpu(self):
with context.device("gpu:0"):
self._testGatherWithUninitializedTensorsInferShape()
def testGatherReservedListWithNoElementsAndPartialElementShapeFails(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Tried to gather uninitialized tensors from a"
" list with non-fully-defined element_shape"):
t = list_ops.tensor_list_gather(l, [0], element_dtype=dtypes.float32)
self.evaluate(t)
def testGatherUsingSpecifiedElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
t = gen_list_ops.tensor_list_gather(
l, [0, 1, 2], element_dtype=dtypes.float32, element_shape=[])
self.assertEqual(t.shape.as_list(), [3])
self.assertAllEqual(self.evaluate(t), np.zeros((3,)))
def testScatterOutputListSize(self):
c0 = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_scatter(c0, [1, 3], [])
# TensorListScatter should return a list with size largest index + 1.
self.assertAllEqual(list_ops.tensor_list_length(l), 4)
def testScatterOutputListSizeWithNumElementsSpecified(self):
c0 = constant_op.constant([1.0, 2.0])
l = gen_list_ops.tensor_list_scatter_v2(
c0, [1, 3], list_ops._build_element_shape([]), num_elements=5)
# TensorListScatter should return a list with size num_elements.
self.assertAllEqual(list_ops.tensor_list_length(l), 5)
def testScatterFailsWhenIndexLargerThanNumElements(self):
c0 = constant_op.constant([1.0, 2.0])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"TensorListScatter: Trying to scatter at index 3 in list with size 3"):
l = gen_list_ops.tensor_list_scatter_v2(
c0, [1, 3], list_ops._build_element_shape([]), num_elements=3)
self.evaluate(l)
def testScatterFailsWithInvalidNumElements(self):
c0 = constant_op.constant([1.0, 2.0])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"TensorListScatter expects num_elements >= -1, found: -2"):
l = gen_list_ops.tensor_list_scatter_v2(
c0, [1, 3], list_ops._build_element_shape([]), num_elements=-2)
self.evaluate(l)
def testScatterWithInvalidRowsInInputTensorFails(self):
c0 = constant_op.constant([1.0, 2.0])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Invalid number of rows in input tensor. Expected: 3 Actual: 2"):
l = list_ops.tensor_list_scatter(c0, [1, 0, 2], [])
self.evaluate(l)
def testScatterWithNegativeIndicesFails(self):
c0 = constant_op.constant([1.0, 2.0])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Indices in TensorListScatter must all be non-negative."):
l = list_ops.tensor_list_scatter(c0, [-1, -2], element_shape=[])
self.evaluate(l)
def testScatterIntoExistingList(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=3)
l = list_ops.tensor_list_scatter(tensor=[1.], indices=[0], element_shape=[])
l = list_ops.tensor_list_scatter(
tensor=[2., 3.], indices=[1, 2], element_shape=[], input_handle=l)
self.assertAllEqual(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32),
[1., 2., 3.])
def testScatterGrad(self):
with backprop.GradientTape() as tape:
c0 = constant_op.constant([1.0, 2.0])
tape.watch(c0)
l = list_ops.tensor_list_scatter(c0, [1, 0], element_shape=[])
t0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
t1 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t0), 2.0)
self.assertAllEqual(self.evaluate(t1), 1.0)
loss = t0 * t0 + t1 * t1
dt = tape.gradient(loss, c0)
self.assertAllEqual(self.evaluate(dt), [2., 4.])
def testScatterWithPartialReadGrad(self):
with backprop.GradientTape() as tape:
c0 = constant_op.constant([1.0, 2.0])
tape.watch(c0)
l = list_ops.tensor_list_scatter(c0, [1, 0], element_shape=[])
t0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t0), 2.0)
loss = t0 * t0
dt = tape.gradient(loss, c0)
self.assertAllEqual(self.evaluate(dt), [0., 4.])
def testTensorListFromTensor(self):
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e, 1.0)
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(e, 2.0)
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(e, 1.0)
self.assertAllEqual(list_ops.tensor_list_length(l), 0)
@test_util.run_gpu_only
def testFromTensorGPU(self):
with context.device("gpu:0"):
self.testTensorListFromTensor()
def testGetSet(self):
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e0), 1.0)
l = list_ops.tensor_list_set_item(l, 0, 3.0)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [3.0, 2.0])
@test_util.run_gpu_only
def testGetSetGPU(self):
with context.device("gpu:0"):
self.testGetSet()
def testGetSetReserved(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=2)
e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e0, 0.0)
l = list_ops.tensor_list_set_item(l, 0, 3.0)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [3.0, 0.0])
@test_util.run_gpu_only
def testGetSetReservedGPU(self):
with context.device("gpu:0"):
self.testGetSetReserved()
def testSetGetGrad(self):
with backprop.GradientTape() as tape:
t = constant_op.constant(5.)
tape.watch(t)
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=3)
l = list_ops.tensor_list_set_item(l, 1, 2. * t)
e = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 10.0)
self.assertAllEqual(self.evaluate(tape.gradient(e, t)), 2.0)
def testGetUninitializedTensorUseListElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=3)
l = list_ops.tensor_list_set_item(l, 0, 5.)
e1 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
e2 = list_ops.tensor_list_get_item(l, 2, element_dtype=dtypes.float32)
self.assertEqual(self.evaluate(e1), 0.)
self.assertEqual(self.evaluate(e2), 0.)
def testGetUninitializedTensorUseSpecifiedElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
e0 = gen_list_ops.tensor_list_get_item(
l, 0, element_shape=[], element_dtype=dtypes.float32)
e1 = gen_list_ops.tensor_list_get_item(
l, 1, element_shape=[2, 3], element_dtype=dtypes.float32)
self.assertEqual(e0.shape.as_list(), [])
self.assertEqual(e1.shape.as_list(), [2, 3])
self.assertEqual(self.evaluate(e0), 0.)
self.assertAllEqual(self.evaluate(e1), np.zeros((2, 3)))
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[None, 3], num_elements=3)
e1 = gen_list_ops.tensor_list_get_item(
l, 1, element_shape=[2, 3], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e1), np.zeros((2, 3)))
def testGetUninitializedTensorWithInvalidElementShapeFails(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Trying to read an uninitialized tensor but "
"element_shape is not fully defined"):
e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.evaluate(e0)
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[None, 2], num_elements=3)
# In eager mode the shape mismatch is caught in the TensorListGetItem
# kernel which raises an InvalidArgumentError.
# In graph mode the shape mismatch is caught in the C++ shape inference
# which raises a ValueError.
if context.executing_eagerly():
error_type = errors.InvalidArgumentError
else:
error_type = ValueError
with self.assertRaisesRegexp(error_type, r"shapes"):
e0 = gen_list_ops.tensor_list_get_item(
l, 0, element_dtype=dtypes.float32, element_shape=[1, 3])
self.evaluate(e0)
@test_util.run_deprecated_v1
@test_util.enable_control_flow_v2
def testSkipEagerSetItemIndexOutOfBounds(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[])
e0 = constant_op.constant(5.)
l = list_ops.tensor_list_set_item(
l, 0, 2. * e0, resize_if_index_out_of_bounds=True)
l = list_ops.tensor_list_set_item(
l, 1, 1., resize_if_index_out_of_bounds=True)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
grad = gradients_impl.gradients(t, e0)[0]
self.assertAllEqual(self.evaluate(grad), 2.)
@test_util.run_deprecated_v1
def testSetOnEmptyListWithMaxNumElementsFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[], max_num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Trying to modify element 0 in a list with 0 elements."):
l = list_ops.tensor_list_set_item(l, 0, 1.)
self.evaluate(l)
def testUnknownShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0, 2.0]))
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), [1.0, 2.0])
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
@test_util.run_gpu_only
def testCPUGPUCopy(self):
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
with context.device("gpu:0"):
l_gpu = array_ops.identity(l)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_pop_back(
l_gpu, element_dtype=dtypes.float32)[1]), 2.0)
l_cpu = array_ops.identity(l_gpu)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_pop_back(
l_cpu, element_dtype=dtypes.float32)[1]), 2.0)
@test_util.run_gpu_only
def testCPUGPUCopyNested(self):
t = constant_op.constant([1.0, 2.0])
child_l = list_ops.tensor_list_from_tensor(t, element_shape=[])
l = list_ops.empty_tensor_list(
element_shape=constant_op.constant([], dtype=dtypes.int32),
element_dtype=dtypes.variant)
l = list_ops.tensor_list_push_back(l, child_l)
with context.device("gpu:0"):
l_gpu = array_ops.identity(l)
_, child_l_gpu = list_ops.tensor_list_pop_back(
l_gpu, element_dtype=dtypes.variant)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_pop_back(
child_l_gpu, element_dtype=dtypes.float32)[1]), 2.0)
l_cpu = array_ops.identity(l_gpu)
_, child_l_cpu = list_ops.tensor_list_pop_back(
l_cpu, element_dtype=dtypes.variant)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_pop_back(
child_l_cpu, element_dtype=dtypes.float32)[1]), 2.0)
def testGraphStack(self):
with self.cached_session():
tl = list_ops.empty_tensor_list(
element_shape=constant_op.constant([1], dtype=dtypes.int32),
element_dtype=dtypes.int32)
tl = list_ops.tensor_list_push_back(tl, [1])
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_stack(tl, element_dtype=dtypes.int32)),
[[1]])
def testSkipEagerStackInLoop(self):
with self.cached_session():
t1 = list_ops.empty_tensor_list(
element_shape=constant_op.constant([], dtype=dtypes.int32),
element_dtype=dtypes.int32)
i = constant_op.constant(0, dtype=dtypes.int32)
def body(i, t1):
t1 = list_ops.tensor_list_push_back(t1, i)
i += 1
return i, t1
i, t1 = control_flow_ops.while_loop(lambda i, t1: math_ops.less(i, 4),
body, [i, t1])
s1 = list_ops.tensor_list_stack(t1, element_dtype=dtypes.int32)
self.assertAllEqual(self.evaluate(s1), [0, 1, 2, 3])
def testSkipEagerStackSwitchDtype(self):
with self.cached_session():
list_ = list_ops.empty_tensor_list(
element_shape=constant_op.constant([], dtype=dtypes.int32),
element_dtype=dtypes.int32)
m = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
def body(list_, m):
list_ = control_flow_ops.cond(
math_ops.equal(list_ops.tensor_list_length(list_), 0),
lambda: list_ops.empty_tensor_list(m.shape, m.dtype), lambda: list_)
list_ = list_ops.tensor_list_push_back(list_, m)
return list_, m
for _ in range(2):
list_, m = body(list_, m)
s1 = list_ops.tensor_list_stack(list_, element_dtype=dtypes.float32)
np_s1 = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32)
self.assertAllEqual(self.evaluate(s1), np_s1)
def testSkipEagerStackInLoopSwitchDtype(self):
with self.cached_session():
t1 = list_ops.empty_tensor_list(
element_shape=constant_op.constant([], dtype=dtypes.int32),
element_dtype=dtypes.int32)
i = constant_op.constant(0, dtype=dtypes.float32)
m = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
def body(i, m, t1):
t1 = control_flow_ops.cond(
math_ops.equal(list_ops.tensor_list_length(t1), 0),
lambda: list_ops.empty_tensor_list(m.shape, m.dtype), lambda: t1)
t1 = list_ops.tensor_list_push_back(t1, m * i)
i += 1.0
return i, m, t1
i, m, t1 = control_flow_ops.while_loop(
lambda i, m, t1: math_ops.less(i, 4), body, [i, m, t1])
s1 = list_ops.tensor_list_stack(t1, element_dtype=dtypes.float32)
np_s1 = np.vstack([np.arange(1, 4) * i for i in range(4)])
self.assertAllEqual(self.evaluate(s1), np_s1)
def testSerialize(self):
worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
with ops.Graph().as_default(), session.Session(target=worker.target):
with ops.device("/job:worker"):
t = constant_op.constant([[1.0], [2.0]])
l = list_ops.tensor_list_from_tensor(t, element_shape=[1])
with ops.device("/job:ps"):
l_ps = array_ops.identity(l)
l_ps, e = list_ops.tensor_list_pop_back(
l_ps, element_dtype=dtypes.float32)
with ops.device("/job:worker"):
worker_e = array_ops.identity(e)
self.assertAllEqual(self.evaluate(worker_e), [2.0])
def testSerializeListWithInvalidTensors(self):
worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
with ops.Graph().as_default(), session.Session(target=worker.target):
with ops.device("/job:worker"):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=2)
l = list_ops.tensor_list_set_item(l, 0, 1.)
with ops.device("/job:ps"):
l_ps = array_ops.identity(l)
l_ps = list_ops.tensor_list_set_item(l_ps, 1, 2.)
t = list_ops.tensor_list_stack(l_ps, element_dtype=dtypes.float32)
with ops.device("/job:worker"):
worker_t = array_ops.identity(t)
self.assertAllEqual(self.evaluate(worker_t), [1.0, 2.0])
def testSerializeListWithUnknownRank(self):
worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
with ops.Graph().as_default(), session.Session(target=worker.target):
with ops.device("/job:worker"):
t = constant_op.constant([[1.0], [2.0]])
l = list_ops.tensor_list_from_tensor(t, element_shape=None)
with ops.device("/job:ps"):
l_ps = array_ops.identity(l)
element_shape = list_ops.tensor_list_element_shape(
l_ps, shape_type=dtypes.int32)
with ops.device("/job:worker"):
element_shape = array_ops.identity(element_shape)
self.assertEqual(self.evaluate(element_shape), -1)
def testSerializeListWithMaxNumElements(self):
worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
with ops.Graph().as_default(), session.Session(target=worker.target):
with ops.device("/job:worker"):
l = list_ops.empty_tensor_list(
element_shape=None,
element_dtype=dtypes.float32,
max_num_elements=2)
l = list_ops.tensor_list_push_back(l, 1.)
with ops.device("/job:ps"):
l_ps = array_ops.identity(l)
l_ps = list_ops.tensor_list_push_back(l_ps, 2.)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Tried to push item into a full list"):
with ops.device("/job:worker"):
l_worker = array_ops.identity(l_ps)
l_worker = list_ops.tensor_list_push_back(l_worker, 3.0)
self.evaluate(l_worker)
def testPushPopGradients(self):
with backprop.GradientTape() as tape:
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[])
c = constant_op.constant(1.0)
tape.watch(c)
l = list_ops.tensor_list_push_back(l, c)
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
e = 2 * e
self.assertAllEqual(self.evaluate(tape.gradient(e, [c])[0]), 2.0)
def testStackFromTensorGradients(self):
with backprop.GradientTape() as tape:
c = constant_op.constant([1.0, 2.0])
tape.watch(c)
l = list_ops.tensor_list_from_tensor(c, element_shape=[])
c2 = list_ops.tensor_list_stack(
l, element_dtype=dtypes.float32, num_elements=2)
result = c2 * 2.0
grad = tape.gradient(result, [c])[0]
self.assertAllEqual(self.evaluate(grad), [2.0, 2.0])
def testGetSetGradients(self):
with backprop.GradientTape() as tape:
c = constant_op.constant([1.0, 2.0])
tape.watch(c)
l = list_ops.tensor_list_from_tensor(c, element_shape=[])
c2 = constant_op.constant(3.0)
tape.watch(c2)
l = list_ops.tensor_list_set_item(l, 0, c2)
e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
ee = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
y = e * e + ee * ee
grad_c, grad_c2 = tape.gradient(y, [c, c2])
self.assertAllEqual(self.evaluate(grad_c), [0.0, 4.0])
self.assertAllEqual(self.evaluate(grad_c2), 6.0)
@test_util.run_deprecated_v1
def testSetOutOfBounds(self):
c = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(c, element_shape=[])
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(list_ops.tensor_list_set_item(l, 20, 3.0))
@test_util.run_deprecated_v1
def testSkipEagerSetItemWithMismatchedShapeFails(self):
with self.cached_session() as sess:
ph = array_ops.placeholder(dtypes.float32)
c = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(c, element_shape=[])
# Set a placeholder with unknown shape to satisfy the shape inference
# at graph building time.
l = list_ops.tensor_list_set_item(l, 0, ph)
l_0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"incompatible shape"):
sess.run(l_0, {ph: [3.0]})
def testResourceVariableScatterGather(self):
c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
l = list_ops.tensor_list_from_tensor(c, element_shape=[])
v = vs.get_variable("var", initializer=[l] * 10, use_resource=True)
v_r_0_stacked = list_ops.tensor_list_stack(v[0], dtypes.float32)
self.evaluate(v.initializer)
self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_0_stacked))
v_r_sparse_stacked = list_ops.tensor_list_stack(
v.sparse_read(0), dtypes.float32)
self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_sparse_stacked))
l_new_0 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[])
l_new_1 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[])
updated_v = state_ops.scatter_update(v, [3, 5], [l_new_0, l_new_1])
updated_v_elems = array_ops.unstack(updated_v)
updated_v_stacked = [
list_ops.tensor_list_stack(el, dtypes.float32) for el in updated_v_elems
]
expected = ([[1.0, 2.0]] * 3 + [[3.0, 4.0], [1.0, 2.0], [5.0, 6.0]] +
[[1.0, 2.0]] * 4)
self.assertAllEqual(self.evaluate(updated_v_stacked), expected)
@test_util.run_deprecated_v1
def testConcat(self):
c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
l0 = list_ops.tensor_list_from_tensor(c, element_shape=[])
l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=[])
l_batch_0 = array_ops.stack([l0, l1])
l_batch_1 = array_ops.stack([l1, l0])
l_concat_01 = list_ops.tensor_list_concat_lists(
l_batch_0, l_batch_1, element_dtype=dtypes.float32)
l_concat_10 = list_ops.tensor_list_concat_lists(
l_batch_1, l_batch_0, element_dtype=dtypes.float32)
l_concat_00 = list_ops.tensor_list_concat_lists(
l_batch_0, l_batch_0, element_dtype=dtypes.float32)
l_concat_11 = list_ops.tensor_list_concat_lists(
l_batch_1, l_batch_1, element_dtype=dtypes.float32)
expected_00 = [[1.0, 2.0, 1.0, 2.0], [-1.0, -1.0]]
expected_01 = [[1.0, 2.0, -1.0], [-1.0, 1.0, 2.0]]
expected_10 = [[-1.0, 1.0, 2.0], [1.0, 2.0, -1.0]]
expected_11 = [[-1.0, -1.0], [1.0, 2.0, 1.0, 2.0]]
for i, (concat, expected) in enumerate(zip(
[l_concat_00, l_concat_01, l_concat_10, l_concat_11],
[expected_00, expected_01, expected_10, expected_11])):
splitted = array_ops.unstack(concat)
splitted_stacked_ret = self.evaluate(
(list_ops.tensor_list_stack(splitted[0], dtypes.float32),
list_ops.tensor_list_stack(splitted[1], dtypes.float32)))
print("Test concat %d: %s, %s, %s, %s"
% (i, expected[0], splitted_stacked_ret[0],
expected[1], splitted_stacked_ret[1]))
self.assertAllClose(expected[0], splitted_stacked_ret[0])
self.assertAllClose(expected[1], splitted_stacked_ret[1])
# Concatenating mismatched shapes fails.
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
self.evaluate(
list_ops.tensor_list_concat_lists(
l_batch_0,
list_ops.empty_tensor_list([], dtypes.float32),
element_dtype=dtypes.float32))
if context.executing_eagerly():
expected_error = (
errors.InvalidArgumentError,
"element shapes are not identical at index 0")
else:
expected_error = (ValueError, "Shapes must be equal rank")
with self.assertRaisesRegexp(*expected_error):
l_batch_of_vec_tls = array_ops.stack(
[list_ops.tensor_list_from_tensor([[1.0]], element_shape=[1])] * 2)
self.evaluate(
list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_vec_tls,
element_dtype=dtypes.float32))
if context.executing_eagerly():
expected_error = (errors.InvalidArgumentError,
r"input_b\[0\].dtype != element_dtype.")
else:
expected_error = (ValueError, "input_b.type != element_dtype")
with self.assertRaisesRegexp(*expected_error):
l_batch_of_int_tls = array_ops.stack(
[list_ops.tensor_list_from_tensor([1], element_shape=[])] * 2)
self.evaluate(
list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_int_tls,
element_dtype=dtypes.float32))
@test_util.run_deprecated_v1
def testPushBackBatch(self):
c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
l0 = list_ops.tensor_list_from_tensor(c, element_shape=[])
l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=[])
l_batch = array_ops.stack([l0, l1])
l_push = list_ops.tensor_list_push_back_batch(l_batch, [3.0, 4.0])
l_unstack = array_ops.unstack(l_push)
l0_ret = list_ops.tensor_list_stack(l_unstack[0], dtypes.float32)
l1_ret = list_ops.tensor_list_stack(l_unstack[1], dtypes.float32)
self.assertAllClose([1.0, 2.0, 3.0], self.evaluate(l0_ret))
self.assertAllClose([-1.0, 4.0], self.evaluate(l1_ret))
with ops.control_dependencies([l_push]):
l_unstack_orig = array_ops.unstack(l_batch)
l0_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[0],
dtypes.float32)
l1_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[1],
dtypes.float32)
# Check that without aliasing, push_back_batch still works; and
# that it doesn't modify the input.
l0_r_v, l1_r_v, l0_orig_v, l1_orig_v = self.evaluate(
(l0_ret, l1_ret, l0_orig_ret, l1_orig_ret))
self.assertAllClose([1.0, 2.0, 3.0], l0_r_v)
self.assertAllClose([-1.0, 4.0], l1_r_v)
self.assertAllClose([1.0, 2.0], l0_orig_v)
self.assertAllClose([-1.0], l1_orig_v)
# Pushing back mismatched shapes fails.
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, []))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"incompatible shape to a list at index 0"):
self.evaluate(
list_ops.tensor_list_push_back_batch(l_batch, [[3.0], [4.0]]))
if context.executing_eagerly():
expected_error = (errors.InvalidArgumentError, "Invalid data type")
else:
expected_error = (ValueError, "wrong element dtype")
with self.assertRaisesRegexp(*expected_error):
self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, [3, 4]))
def testZerosLike(self):
for dtype in (dtypes.uint8, dtypes.uint16, dtypes.int8, dtypes.int16,
dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32,
dtypes.float64, dtypes.complex64, dtypes.complex128,
dtypes.bool):
l_empty = list_ops.empty_tensor_list(
element_dtype=dtype, element_shape=[])
l_empty_zeros = array_ops.zeros_like(l_empty)
t_empty_zeros = list_ops.tensor_list_stack(
l_empty_zeros, element_dtype=dtype)
l_full = list_ops.tensor_list_push_back(l_empty,
math_ops.cast(0, dtype=dtype))
l_full = list_ops.tensor_list_push_back(l_full,
math_ops.cast(1, dtype=dtype))
l_full_zeros = array_ops.zeros_like(l_full)
t_full_zeros = list_ops.tensor_list_stack(
l_full_zeros, element_dtype=dtype)
self.assertAllEqual(self.evaluate(t_empty_zeros), [])
self.assertAllEqual(
self.evaluate(t_full_zeros), np.zeros(
(2,), dtype=dtype.as_numpy_dtype))
def testZerosLikeNested(self):
for dtype in (dtypes.uint8, dtypes.uint16, dtypes.int8, dtypes.int16,
dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32,
dtypes.float64, dtypes.complex64, dtypes.complex128,
dtypes.bool):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.variant, element_shape=[])
sub_l = list_ops.empty_tensor_list(element_dtype=dtype, element_shape=[])
l = list_ops.tensor_list_push_back(l, sub_l)
sub_l = list_ops.tensor_list_push_back(sub_l, math_ops.cast(
1, dtype=dtype))
l = list_ops.tensor_list_push_back(l, sub_l)
sub_l = list_ops.tensor_list_push_back(sub_l, math_ops.cast(
2, dtype=dtype))
l = list_ops.tensor_list_push_back(l, sub_l)
# l : [[],
# [1],
# [1, 2]]
#
# l_zeros : [[],
# [0],
# [0, 0]]
l_zeros = array_ops.zeros_like(l)
outputs = []
for _ in range(3):
l_zeros, out = list_ops.tensor_list_pop_back(
l_zeros, element_dtype=dtypes.variant)
outputs.append(list_ops.tensor_list_stack(out, element_dtype=dtype))
# Note: `outputs` contains popped values so the order is reversed.
self.assertAllEqual(self.evaluate(outputs[2]), [])
self.assertAllEqual(
self.evaluate(outputs[1]), np.zeros((1,), dtype=dtype.as_numpy_dtype))
self.assertAllEqual(
self.evaluate(outputs[0]), np.zeros((2,), dtype=dtype.as_numpy_dtype))
def testElementShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None)
shape = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int32)
self.assertEqual(self.evaluate(shape), -1)
def testZerosLikeUninitialized(self):
l0 = list_ops.tensor_list_reserve([], 3, element_dtype=dtypes.float32)
l1 = list_ops.tensor_list_set_item(l0, 0, 1.) # [1., _, _]
zeros_1 = array_ops.zeros_like(l1) # [0., _, _]
l2 = list_ops.tensor_list_set_item(l1, 2, 2.) # [1., _, 2.]
zeros_2 = array_ops.zeros_like(l2) # [0., _, 0.]
# Gather indices with zeros in `zeros_1`.
res_1 = list_ops.tensor_list_gather(
zeros_1, [0], element_dtype=dtypes.float32)
# Gather indices with zeros in `zeros_2`.
res_2 = list_ops.tensor_list_gather(
zeros_2, [0, 2], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(res_1), [0.])
self.assertAllEqual(self.evaluate(res_2), [0., 0.])
@test_util.run_deprecated_v1
def testSkipEagerTensorListGetItemGradAggregation(self):
l = list_ops.tensor_list_reserve(
element_shape=[], num_elements=1, element_dtype=dtypes.float32)
x = constant_op.constant(1.0)
l = list_ops.tensor_list_set_item(l, 0, x)
l_read1 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
l_read2 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
grad = gradients_impl.gradients([l_read1, l_read2], [x])
with self.cached_session() as sess:
self.assertSequenceEqual(self.evaluate(grad), [2.])
@test_util.run_deprecated_v1
def testSkipEagerBuildElementShape(self):
fn = list_ops._build_element_shape
# Unknown shape -> -1.
self.assertEqual(fn(None), -1)
self.assertEqual(fn(tensor_shape.unknown_shape()), -1)
# Scalar shape -> [] with type int32.
self.assertEqual(fn([]).dtype, dtypes.int32)
self.assertEqual(fn(tensor_shape.scalar()).dtype, dtypes.int32)
self.assertAllEqual(self.evaluate(fn([])), np.array([], np.int32))
self.assertAllEqual(
self.evaluate(fn(tensor_shape.scalar())), np.array([], np.int32))
# Tensor -> Tensor
shape = constant_op.constant(1)
self.assertIs(fn(shape), shape)
# Shape with unknown dims -> shape list with -1's.
shape = [None, 5]
self.assertAllEqual(fn(shape), [-1, 5])
self.assertAllEqual(fn(tensor_shape.TensorShape(shape)), [-1, 5])
# Shape with unknown dims and tensor dims -> shape list with -1's and tensor
# dims.
t = array_ops.placeholder(dtypes.int32)
shape = [None, 5, t]
result = fn(shape)
self.assertAllEqual(result[:2], [-1, 5])
self.assertIs(result[2], t)
def testAddN(self):
l1 = list_ops.tensor_list_from_tensor([1.0, 2.0], element_shape=[])
l2 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[])
l3 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[])
result = math_ops.add_n((l1, l2, l3))
result_t = list_ops.tensor_list_stack(result, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(result_t), [9., 12.])
def testAddNNestedList(self):
l1 = list_ops.tensor_list_from_tensor([1.0, 2.0], element_shape=[])
l2 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[])
l3 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[])
l4 = list_ops.tensor_list_from_tensor([7.0, 8.0], element_shape=[])
a = list_ops.empty_tensor_list(
element_dtype=dtypes.variant, element_shape=[])
a = list_ops.tensor_list_push_back(a, l1)
a = list_ops.tensor_list_push_back(a, l2)
b = list_ops.empty_tensor_list(
element_dtype=dtypes.variant, element_shape=[])
b = list_ops.tensor_list_push_back(b, l3)
b = list_ops.tensor_list_push_back(b, l4)
result = math_ops.add_n((a, b))
result_0 = list_ops.tensor_list_stack(
list_ops.tensor_list_get_item(result, 0, element_dtype=dtypes.variant),
element_dtype=dtypes.float32)
result_1 = list_ops.tensor_list_stack(
list_ops.tensor_list_get_item(result, 1, element_dtype=dtypes.variant),
element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(result_0), [6., 8.])
self.assertAllEqual(self.evaluate(result_1), [10., 12.])
def testAddTensorListsFailsIfLeadingDimsMismatch(self):
l1 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=2)
l2 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Trying to add two lists of tensors with different lengths"):
l = math_ops.add_n([l1, l2])
self.evaluate(list_ops.tensor_list_stack(l, element_dtype=dtypes.float32))
@test_util.run_v1_only("Uses placeholders")
def testSkipEagerAddTensorListsFailsIfElementShapesMismatch(self):
with self.cached_session() as sess:
# Use placeholders instead of constant values for shapes to prevent TF's
# shape inference from catching this early.
l1_element_shape = array_ops.placeholder(dtype=dtypes.int32)
l2_element_shape = array_ops.placeholder(dtype=dtypes.int32)
l1 = list_ops.tensor_list_reserve(
element_shape=l1_element_shape,
element_dtype=dtypes.float32,
num_elements=3)
l2 = list_ops.tensor_list_reserve(
element_shape=l2_element_shape,
element_dtype=dtypes.float32,
num_elements=3)
l = math_ops.add_n([l1, l2])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Trying to add two lists of tensors with incompatible element shapes"
):
sess.run(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32), {
l1_element_shape: [],
l2_element_shape: [2]
})
@test_util.run_deprecated_v1
def testSkipEagerConcatShapeInference(self):
def BuildTensor(element_shape):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=element_shape)
return list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertIsNone(BuildTensor(None).shape.rank)
self.assertAllEqual(BuildTensor([None, 2, 3]).shape.as_list(), [None, 2, 3])
self.assertAllEqual(
BuildTensor([None, 2, None]).shape.as_list(), [None, 2, None])
self.assertAllEqual(BuildTensor([1, 2, 3]).shape.as_list(), [None, 2, 3])
def testConcatWithFullyDefinedElementShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[2, 2])
l = list_ops.tensor_list_push_back(l, [[0., 1.], [2., 3.]])
l = list_ops.tensor_list_push_back(l, [[4., 5.], [6., 7.]])
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertAllEqual(
self.evaluate(t), [[0., 1.], [2., 3.], [4., 5.], [6., 7.]])
def testConcatWithNonFullyDefinedElementShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[None, 2])
l = list_ops.tensor_list_push_back(l, [[0., 1.]])
l = list_ops.tensor_list_push_back(l, [[2., 3.], [4., 5.]])
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[0., 1.], [2., 3.], [4., 5.]])
def testConcatWithMismatchingTensorShapesFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None)
l = list_ops.tensor_list_push_back(l, [[0., 1.]])
l = list_ops.tensor_list_push_back(l, [[2.], [4.]])
with self.assertRaisesRegexp(
errors.InvalidArgumentError, r"Incompatible shapes during merge: "
r"\[2\] vs. \[1\]"):
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.evaluate(t)
def testConcatEmptyListWithFullyDefinedElementShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[5, 2])
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t).shape, (0, 2))
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[None, 2])
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t).shape, (0, 2))
def testConcatEmptyListWithUnknownElementShapeFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"All except the first dimension must be fully"
" defined when concating an empty tensor list"):
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.evaluate(t)
def testConcatEmptyListWithPartiallyDefinedElementShapeFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[2, None])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"All except the first dimension must be fully"
" defined when concating an empty tensor list"):
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.evaluate(t)
def testConcatListWithScalarElementShapeFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=tensor_shape.scalar())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Concat requires elements to be at least vectors, "
"found scalars instead"):
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.evaluate(t)
def testConcatListWithScalarElementsFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None)
l1 = list_ops.tensor_list_push_back(l, 1.)
with self.assertRaisesRegexp(
errors.InvalidArgumentError, "Concat saw a scalar shape at index 0"
" but requires at least vectors"):
t = list_ops.tensor_list_concat(l1, element_dtype=dtypes.float32)
self.evaluate(t)
l1 = list_ops.tensor_list_push_back(l, [1.])
l1 = list_ops.tensor_list_push_back(l1, 2.)
with self.assertRaisesRegexp(
errors.InvalidArgumentError, "Concat saw a scalar shape at index 1"
" but requires at least vectors"):
t = list_ops.tensor_list_concat(l1, element_dtype=dtypes.float32)
self.evaluate(t)
def testConcatWithUninitializedTensorsUseListElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[2, 3], num_elements=3)
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertAllEqual(np.zeros((6, 3)), t)
def testConcatWithUninitializedTensorsUseProvidedElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
t = list_ops.tensor_list_concat(
l, element_dtype=dtypes.float32, element_shape=(2, 3))
self.assertAllEqual(np.zeros((6, 3)), t)
def testConcatWithUninitializedTensorsUseProvidedElementShapeAndLengths(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
t, _ = gen_list_ops.tensor_list_concat_v2(
l,
element_dtype=dtypes.float32,
element_shape=list_ops._build_element_shape((None, 3)),
leading_dims=[2, 3, 5])
self.assertAllEqual(np.zeros((10, 3)), t)
l = list_ops.tensor_list_set_item(l, 1, [[2., 3.], [4., 5.], [6., 7.]])
t, _ = gen_list_ops.tensor_list_concat_v2(
l,
element_dtype=dtypes.float32,
element_shape=list_ops._build_element_shape((None, 2)),
leading_dims=[2, 3, 4])
self.assertAllEqual([[0., 0.], [0., 0.], [2., 3.], [4., 5.], [6., 7.],
[0., 0.], [0., 0.], [0., 0.], [0., 0.]], t)
def testConcatWithUninitializedTensorsInferShapeFromElements(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
l = list_ops.tensor_list_set_item(l, 1, [[2., 3.], [4., 5.], [6., 7.]])
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertAllEqual([[0., 0.], [0., 0.], [0., 0.], [2., 3.], [4., 5.],
[6., 7.], [0., 0.], [0., 0.], [0., 0.]], t)
def testConcatWithUninitializedTensorsFailsIfNoElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Trying to concat list with only uninitialized tensors "
r"but element_shape_except_first_dim_ is not fully defined"):
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.evaluate(t)
def testConcatWithUninitializedTensorsFailsIfNoInputLengths(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[None, 3], num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"List contains uninitialized tensor at index 0"
r" but leading_dims has only 0 elements."):
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.evaluate(t)
def testEvenSplit(self):
def RunTest(input_tensor, lengths, expected_stacked_output):
l = list_ops.tensor_list_split(
input_tensor, element_shape=None, lengths=lengths)
self.assertAllEqual(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32),
expected_stacked_output)
RunTest([1., 2., 3.], [1, 1, 1], [[1.], [2.], [3.]])
RunTest([1., 2., 3., 4.], [2, 2], [[1., 2.], [3., 4.]])
RunTest([[1., 2.], [3., 4.]], [1, 1], [[[1., 2.]], [[3., 4.]]])
def testUnevenSplit(self):
l = list_ops.tensor_list_split([1., 2., 3., 4., 5],
element_shape=None,
lengths=[3, 2])
self.assertAllEqual(list_ops.tensor_list_length(l), 2)
self.assertAllEqual(
list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32),
[1., 2., 3.])
self.assertAllEqual(
list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32),
[4., 5.])
@test_util.run_deprecated_v1
def testSkipEagerSplitWithInvalidTensorShapeFails(self):
with self.cached_session():
tensor = array_ops.placeholder(dtype=dtypes.float32)
l = list_ops.tensor_list_split(tensor, element_shape=None, lengths=[1])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Tensor must be at least a vector, but saw shape: \[\]"):
l.eval({tensor: 1})
@test_util.run_deprecated_v1
def testSkipEagerSplitWithInvalidLengthsShapeFails(self):
with self.cached_session():
lengths = array_ops.placeholder(dtype=dtypes.int64)
l = list_ops.tensor_list_split([1., 2.],
element_shape=None,
lengths=lengths)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Expected lengths to be a vector, received shape: \[\]"):
l.eval({lengths: 1})
def testSplitWithInvalidLengthsFails(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"Invalid value in lengths: -1"):
l = list_ops.tensor_list_split([1., 2.],
element_shape=None,
lengths=[1, -1])
self.evaluate(l)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Attempting to slice \[0, 3\] from tensor with length 2"):
l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[3])
self.evaluate(l)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Unused values in tensor. Length of tensor: 2 Values used: 1"):
l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[1])
self.evaluate(l)
@test_util.run_deprecated_v1
def testSkipEagerSplitWithScalarElementShapeFails(self):
with self.assertRaisesRegexp(ValueError,
r"Shapes must be equal rank, but are 1 and 0"):
l = list_ops.tensor_list_split([1., 2.], element_shape=[], lengths=[1, 1])
with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"TensorListSplit requires element_shape to be at least of rank 1, "
r"but saw: \[\]"):
element_shape = array_ops.placeholder(dtype=dtypes.int32)
l = list_ops.tensor_list_split([1., 2.],
element_shape=element_shape,
lengths=[1, 1])
l.eval({element_shape: []})
def testEagerOnlySplitWithScalarElementShapeFails(self):
if context.executing_eagerly():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"TensorListSplit requires element_shape to be at least of rank 1, "
r"but saw: \[\]"):
list_ops.tensor_list_split([1., 2.], element_shape=[], lengths=[1, 1])
@test_util.run_deprecated_v1
def testSkipEagerSplitWithIncompatibleTensorShapeAndElementShapeFails(self):
with self.assertRaisesRegexp(ValueError,
r"Shapes must be equal rank, but are 2 and 1"):
l = list_ops.tensor_list_split([[1.], [2.]],
element_shape=[1],
lengths=[1, 1])
with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"tensor shape \[2,1\] is not compatible with element_shape \[1\]"):
element_shape = array_ops.placeholder(dtype=dtypes.int32)
l = list_ops.tensor_list_split([[1.], [2.]],
element_shape=element_shape,
lengths=[1, 1])
l.eval({element_shape: [1]})
def testEagerOnlySplitWithIncompatibleTensorShapeAndElementShapeFails(self):
if context.executing_eagerly():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"tensor shape \[2,1\] is not compatible with element_shape \[1\]"):
list_ops.tensor_list_split([[1.], [2.]],
element_shape=[1],
lengths=[1, 1])
def testResizeGrow(self):
l = list_ops.tensor_list_from_tensor([1., 2.], element_shape=[])
l = list_ops.tensor_list_resize(l, 4)
self.assertEqual(self.evaluate(list_ops.tensor_list_length(l)), 4)
self.assertEqual(
self.evaluate(
list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)),
1.)
self.assertEqual(
self.evaluate(
list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)),
2.)
def testResizeShrink(self):
l = list_ops.tensor_list_from_tensor([1., 2., 3.], element_shape=[])
l = list_ops.tensor_list_resize(l, 2)
self.assertEqual(self.evaluate(list_ops.tensor_list_length(l)), 2)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)),
[1., 2.])
def testResizeWithInvalidSizeFails(self):
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"TensorListSlice expects size to be non-negative"):
l = list_ops.tensor_list_from_tensor([1., 2., 3.], element_shape=[])
l = list_ops.tensor_list_resize(l, -1)
self.evaluate(l)
@test_util.run_deprecated_v1
@test_util.enable_control_flow_v2
def testSkipEagerResizeGrad(self):
t = constant_op.constant([1., 2., 3.])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
l = list_ops.tensor_list_set_item(
l, 3, 4., resize_if_index_out_of_bounds=True)
t1 = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
grad = gradients_impl.gradients(t1, t)[0]
self.assertAllEqual(self.evaluate(grad), [1., 1., 1.])
def testHandleDataAcrossFunctionCall(self):
@def_function.function
def func():
t = constant_op.constant([1., 2., 3.])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
return l
tensor_list = func()
element = list_ops.tensor_list_get_item(
tensor_list, 0, element_dtype=dtypes.float32)
self.assertAllEqual(element.shape.as_list(), [])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/list_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
class LinearOperatorDiagTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
diag = linear_operator_test_util.random_sign_uniform(
shape[:-1], minval=1., maxval=2., dtype=dtype)
if ensure_self_adjoint_and_pd:
# Abs on complex64 will result in a float32, so we cast back up.
diag = math_ops.cast(math_ops.abs(diag), dtype=dtype)
lin_op_diag = diag
if use_placeholder:
lin_op_diag = array_ops.placeholder_with_default(diag, shape=None)
operator = linalg.LinearOperatorDiag(
lin_op_diag,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
matrix = array_ops.matrix_diag(diag)
return operator, matrix
def test_assert_positive_definite_raises_for_zero_eigenvalue(self):
# Matrix with one positive eigenvalue and one zero eigenvalue.
with self.cached_session():
diag = [1.0, 0.0]
operator = linalg.LinearOperatorDiag(diag)
# is_self_adjoint should be auto-set for real diag.
self.assertTrue(operator.is_self_adjoint)
with self.assertRaisesOpError("non-positive.*not positive definite"):
operator.assert_positive_definite().run()
def test_assert_positive_definite_raises_for_negative_real_eigvalues(self):
with self.cached_session():
diag_x = [1.0, -2.0]
diag_y = [0., 0.] # Imaginary eigenvalues should not matter.
diag = math_ops.complex(diag_x, diag_y)
operator = linalg.LinearOperatorDiag(diag)
# is_self_adjoint should not be auto-set for complex diag.
self.assertTrue(operator.is_self_adjoint is None)
with self.assertRaisesOpError("non-positive real.*not positive definite"):
operator.assert_positive_definite().run()
@test_util.run_deprecated_v1
def test_assert_positive_definite_does_not_raise_if_pd_and_complex(self):
with self.cached_session():
x = [1., 2.]
y = [1., 0.]
diag = math_ops.complex(x, y) # Re[diag] > 0.
# Should not fail
linalg.LinearOperatorDiag(diag).assert_positive_definite().run()
def test_assert_non_singular_raises_if_zero_eigenvalue(self):
# Singlular matrix with one positive eigenvalue and one zero eigenvalue.
with self.cached_session():
diag = [1.0, 0.0]
operator = linalg.LinearOperatorDiag(diag, is_self_adjoint=True)
with self.assertRaisesOpError("Singular operator"):
operator.assert_non_singular().run()
@test_util.run_deprecated_v1
def test_assert_non_singular_does_not_raise_for_complex_nonsingular(self):
with self.cached_session():
x = [1., 0.]
y = [0., 1.]
diag = math_ops.complex(x, y)
# Should not raise.
linalg.LinearOperatorDiag(diag).assert_non_singular().run()
def test_assert_self_adjoint_raises_if_diag_has_complex_part(self):
with self.cached_session():
x = [1., 0.]
y = [0., 1.]
diag = math_ops.complex(x, y)
operator = linalg.LinearOperatorDiag(diag)
with self.assertRaisesOpError("imaginary.*not self-adjoint"):
operator.assert_self_adjoint().run()
@test_util.run_deprecated_v1
def test_assert_self_adjoint_does_not_raise_for_diag_with_zero_imag(self):
with self.cached_session():
x = [1., 0.]
y = [0., 0.]
diag = math_ops.complex(x, y)
operator = linalg.LinearOperatorDiag(diag)
# Should not raise
operator.assert_self_adjoint().run()
def test_scalar_diag_raises(self):
with self.assertRaisesRegexp(ValueError, "must have at least 1 dimension"):
linalg.LinearOperatorDiag(1.)
def test_broadcast_matmul_and_solve(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.matmul cannot handle.
# In particular, tf.matmul does not broadcast.
with self.cached_session() as sess:
x = random_ops.random_normal(shape=(2, 2, 3, 4))
# This LinearOperatorDiag will be broadcast to (2, 2, 3, 3) during solve
# and matmul with 'x' as the argument.
diag = random_ops.random_uniform(shape=(2, 1, 3))
operator = linalg.LinearOperatorDiag(diag, is_self_adjoint=True)
self.assertAllEqual((2, 1, 3, 3), operator.shape)
# Create a batch matrix with the broadcast shape of operator.
diag_broadcast = array_ops.concat((diag, diag), 1)
mat = array_ops.matrix_diag(diag_broadcast)
self.assertAllEqual((2, 2, 3, 3), mat.get_shape()) # being pedantic.
operator_matmul = operator.matmul(x)
mat_matmul = math_ops.matmul(mat, x)
self.assertAllEqual(operator_matmul.get_shape(), mat_matmul.get_shape())
self.assertAllClose(*self.evaluate([operator_matmul, mat_matmul]))
operator_solve = operator.solve(x)
mat_solve = linalg_ops.matrix_solve(mat, x)
self.assertAllEqual(operator_solve.get_shape(), mat_solve.get_shape())
self.assertAllClose(*self.evaluate([operator_solve, mat_solve]))
def test_diag_matmul(self):
operator1 = linalg_lib.LinearOperatorDiag([2., 3.])
operator2 = linalg_lib.LinearOperatorDiag([1., 2.])
operator3 = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=3.)
operator_matmul = operator1.matmul(operator2)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([2., 6.], self.evaluate(operator_matmul.diag))
operator_matmul = operator2.matmul(operator1)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([2., 6.], self.evaluate(operator_matmul.diag))
operator_matmul = operator1.matmul(operator3)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([6., 9.], self.evaluate(operator_matmul.diag))
operator_matmul = operator3.matmul(operator1)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([6., 9.], self.evaluate(operator_matmul.diag))
def test_diag_solve(self):
operator1 = linalg_lib.LinearOperatorDiag([2., 3.], is_non_singular=True)
operator2 = linalg_lib.LinearOperatorDiag([1., 2.], is_non_singular=True)
operator3 = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=3., is_non_singular=True)
operator_solve = operator1.solve(operator2)
self.assertTrue(isinstance(
operator_solve,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([0.5, 2 / 3.], self.evaluate(operator_solve.diag))
operator_solve = operator2.solve(operator1)
self.assertTrue(isinstance(
operator_solve,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([2., 3 / 2.], self.evaluate(operator_solve.diag))
operator_solve = operator1.solve(operator3)
self.assertTrue(isinstance(
operator_solve,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([3 / 2., 1.], self.evaluate(operator_solve.diag))
operator_solve = operator3.solve(operator1)
self.assertTrue(isinstance(
operator_solve,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([2 / 3., 1.], self.evaluate(operator_solve.diag))
def test_diag_adjoint_type(self):
diag = [1., 3., 5., 8.]
operator = linalg.LinearOperatorDiag(diag, is_non_singular=True)
self.assertIsInstance(operator.adjoint(), linalg.LinearOperatorDiag)
def test_diag_cholesky_type(self):
diag = [1., 3., 5., 8.]
operator = linalg.LinearOperatorDiag(
diag,
is_positive_definite=True,
is_self_adjoint=True,
)
self.assertIsInstance(operator.cholesky(), linalg.LinearOperatorDiag)
def test_diag_inverse_type(self):
diag = [1., 3., 5., 8.]
operator = linalg.LinearOperatorDiag(diag, is_non_singular=True)
self.assertIsInstance(operator.inverse(), linalg.LinearOperatorDiag)
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorDiagTest)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_block_diag as block_diag
from tensorflow.python.ops.linalg import linear_operator_lower_triangular as lower_triangular
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
def _block_diag_dense(expected_shape, blocks):
"""Convert a list of blocks, into a dense block diagonal matrix."""
rows = []
num_cols = 0
for block in blocks:
# Get the batch shape for the block.
batch_row_shape = array_ops.shape(block)[:-1]
zeros_to_pad_before_shape = array_ops.concat(
[batch_row_shape, [num_cols]], axis=-1)
zeros_to_pad_before = array_ops.zeros(
shape=zeros_to_pad_before_shape, dtype=block.dtype)
num_cols += array_ops.shape(block)[-1]
zeros_to_pad_after_shape = array_ops.concat(
[batch_row_shape, [expected_shape[-2] - num_cols]], axis=-1)
zeros_to_pad_after = array_ops.zeros(
zeros_to_pad_after_shape, dtype=block.dtype)
rows.append(array_ops.concat(
[zeros_to_pad_before, block, zeros_to_pad_after], axis=-1))
return array_ops.concat(rows, axis=-2)
class SquareLinearOperatorBlockDiagTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
# Increase from 1e-6 to 1e-4
self._atol[dtypes.float32] = 1e-4
self._atol[dtypes.complex64] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
@property
def operator_shape_infos(self):
shape_info = linear_operator_test_util.OperatorShapeInfo
return [
shape_info((0, 0)),
shape_info((1, 1)),
shape_info((1, 3, 3)),
shape_info((5, 5), blocks=[(2, 2), (3, 3)]),
shape_info((3, 7, 7), blocks=[(1, 2, 2), (3, 2, 2), (1, 3, 3)]),
shape_info((2, 1, 5, 5), blocks=[(2, 1, 2, 2), (1, 3, 3)]),
]
def operator_and_matrix(
self, shape_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(shape_info.shape)
expected_blocks = (
shape_info.__dict__["blocks"] if "blocks" in shape_info.__dict__
else [shape])
matrices = [
linear_operator_test_util.random_positive_definite_matrix(
block_shape, dtype, force_well_conditioned=True)
for block_shape in expected_blocks
]
lin_op_matrices = matrices
if use_placeholder:
lin_op_matrices = [
array_ops.placeholder_with_default(
matrix, shape=None) for matrix in matrices]
operator = block_diag.LinearOperatorBlockDiag(
[linalg.LinearOperatorFullMatrix(
l,
is_square=True,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
for l in lin_op_matrices])
# Should be auto-set.
self.assertTrue(operator.is_square)
# Broadcast the shapes.
expected_shape = list(shape_info.shape)
matrices = linear_operator_util.broadcast_matrix_batch_dims(matrices)
block_diag_dense = _block_diag_dense(expected_shape, matrices)
if not use_placeholder:
block_diag_dense.set_shape(
expected_shape[:-2] + [expected_shape[-1], expected_shape[-1]])
return operator, block_diag_dense
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = block_diag.LinearOperatorBlockDiag(
[linalg.LinearOperatorFullMatrix(matrix)],
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_block_diag_adjoint_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = block_diag.LinearOperatorBlockDiag(
[
linalg.LinearOperatorFullMatrix(
matrix,
is_non_singular=True,
),
linalg.LinearOperatorFullMatrix(
matrix,
is_non_singular=True,
),
],
is_non_singular=True,
)
adjoint = operator.adjoint()
self.assertIsInstance(
adjoint,
block_diag.LinearOperatorBlockDiag)
self.assertEqual(2, len(adjoint.operators))
def test_block_diag_cholesky_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = block_diag.LinearOperatorBlockDiag(
[
linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_self_adjoint=True,
),
linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_self_adjoint=True,
),
],
is_positive_definite=True,
is_self_adjoint=True,
)
cholesky_factor = operator.cholesky()
self.assertIsInstance(
cholesky_factor,
block_diag.LinearOperatorBlockDiag)
self.assertEqual(2, len(cholesky_factor.operators))
self.assertIsInstance(
cholesky_factor.operators[0],
lower_triangular.LinearOperatorLowerTriangular)
self.assertIsInstance(
cholesky_factor.operators[1],
lower_triangular.LinearOperatorLowerTriangular
)
def test_block_diag_inverse_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = block_diag.LinearOperatorBlockDiag(
[
linalg.LinearOperatorFullMatrix(
matrix,
is_non_singular=True,
),
linalg.LinearOperatorFullMatrix(
matrix,
is_non_singular=True,
),
],
is_non_singular=True,
)
inverse = operator.inverse()
self.assertIsInstance(
inverse,
block_diag.LinearOperatorBlockDiag)
self.assertEqual(2, len(inverse.operators))
def test_is_non_singular_auto_set(self):
# Matrix with two positive eigenvalues, 11 and 8.
# The matrix values do not effect auto-setting of the flags.
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_2 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator = block_diag.LinearOperatorBlockDiag(
[operator_1, operator_2],
is_positive_definite=False, # No reason it HAS to be False...
is_non_singular=None)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
with self.assertRaisesRegexp(ValueError, "always non-singular"):
block_diag.LinearOperatorBlockDiag(
[operator_1, operator_2], is_non_singular=False)
def test_name(self):
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, name="left")
operator_2 = linalg.LinearOperatorFullMatrix(matrix, name="right")
operator = block_diag.LinearOperatorBlockDiag([operator_1, operator_2])
self.assertEqual("left_ds_right", operator.name)
def test_different_dtypes_raises(self):
operators = [
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))
]
with self.assertRaisesRegexp(TypeError, "same dtype"):
block_diag.LinearOperatorBlockDiag(operators)
def test_non_square_operator_raises(self):
operators = [
linalg.LinearOperatorFullMatrix(rng.rand(3, 4), is_square=False),
linalg.LinearOperatorFullMatrix(rng.rand(3, 3))
]
with self.assertRaisesRegexp(ValueError, "square matrices"):
block_diag.LinearOperatorBlockDiag(operators)
def test_empty_operators_raises(self):
with self.assertRaisesRegexp(ValueError, "non-empty"):
block_diag.LinearOperatorBlockDiag([])
if __name__ == "__main__":
linear_operator_test_util.add_tests(SquareLinearOperatorBlockDiagTest)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_block_diag_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
class SquareLinearOperatorFullMatrixTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
# Set the hints to none to test non-symmetric PD code paths.
operator = linalg.LinearOperatorFullMatrix(
lin_op_matrix,
is_square=True,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
return operator, matrix
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
matrix = [[1., 0.], [1., 11.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
# Auto-detected.
self.assertTrue(operator.is_square)
@test_util.run_deprecated_v1
def test_assert_non_singular_raises_if_cond_too_big_but_finite(self):
with self.cached_session():
tril = linear_operator_test_util.random_tril_matrix(
shape=(50, 50), dtype=np.float32)
diag = np.logspace(-2, 2, 50).astype(np.float32)
tril = array_ops.matrix_set_diag(tril, diag)
matrix = math_ops.matmul(tril, tril, transpose_b=True).eval()
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.assertRaisesOpError("Singular matrix"):
# Ensure that we have finite condition number...just HUGE.
cond = np.linalg.cond(matrix)
self.assertTrue(np.isfinite(cond))
self.assertGreater(cond, 1e12)
operator.assert_non_singular().run()
def test_assert_non_singular_raises_if_cond_infinite(self):
with self.cached_session():
matrix = [[1., 1.], [1., 1.]]
# We don't pass the is_self_adjoint hint here, which means we take the
# generic code path.
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.assertRaisesOpError("Singular matrix"):
operator.assert_non_singular().run()
def test_assert_self_adjoint(self):
matrix = [[0., 1.], [0., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.cached_session():
with self.assertRaisesOpError("not equal to its adjoint"):
operator.assert_self_adjoint().run()
def test_assert_positive_definite(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=True)
with self.cached_session():
with self.assertRaises(errors.InvalidArgumentError):
operator.assert_positive_definite().run()
class SquareLinearOperatorFullMatrixSymmetricPositiveDefiniteTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest.
In this test, the operator is constructed with hints that invoke the use of
a Cholesky decomposition for solves/determinant.
"""
def setUp(self):
# Increase from 1e-6 to 1e-5. This reduction in tolerance happens,
# presumably, because we are taking a different code path in the operator
# and the matrix. The operator uses a Choleksy, the matrix uses standard
# solve.
self._atol[dtypes.float32] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
@staticmethod
def dtypes_to_test():
return [dtypes.float32, dtypes.float64]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
# Matrix is always symmetric and positive definite in this class.
del ensure_self_adjoint_and_pd
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
operator = linalg.LinearOperatorFullMatrix(
lin_op_matrix,
is_square=True,
is_self_adjoint=True,
is_positive_definite=True)
return operator, matrix
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
matrix = [[1., 0.], [0., 7.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_positive_definite=True, is_self_adjoint=True)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_self_adjoint)
# Should be auto-set
self.assertTrue(operator.is_non_singular)
self.assertTrue(operator._can_use_cholesky)
self.assertTrue(operator.is_square)
def test_assert_non_singular(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.cached_session():
# Cholesky decomposition may fail, so the error is not specific to
# non-singular.
with self.assertRaisesOpError(""):
operator.assert_non_singular().run()
def test_assert_self_adjoint(self):
matrix = [[0., 1.], [0., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.cached_session():
with self.assertRaisesOpError("not equal to its adjoint"):
operator.assert_self_adjoint().run()
def test_assert_positive_definite(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.cached_session():
# Cholesky decomposition may fail, so the error is not specific to
# non-singular.
with self.assertRaisesOpError(""):
operator.assert_positive_definite().run()
class NonSquareLinearOperatorFullMatrixTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_normal(shape, dtype=dtype)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
operator = linalg.LinearOperatorFullMatrix(lin_op_matrix, is_square=True)
return operator, matrix
def test_is_x_flags(self):
matrix = [[3., 2., 1.], [1., 1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_self_adjoint=False)
self.assertEqual(operator.is_positive_definite, None)
self.assertEqual(operator.is_non_singular, None)
self.assertFalse(operator.is_self_adjoint)
self.assertFalse(operator.is_square)
def test_matrix_must_have_at_least_two_dims_or_raises(self):
with self.assertRaisesRegexp(ValueError, "at least 2 dimensions"):
linalg.LinearOperatorFullMatrix([1.])
if __name__ == "__main__":
linear_operator_test_util.add_tests(SquareLinearOperatorFullMatrixTest)
linear_operator_test_util.add_tests(NonSquareLinearOperatorFullMatrixTest)
linear_operator_test_util.add_tests(
SquareLinearOperatorFullMatrixSymmetricPositiveDefiniteTest)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_kronecker as kronecker
from tensorflow.python.ops.linalg import linear_operator_lower_triangular as lower_triangular
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
def _kronecker_dense(factors):
"""Convert a list of factors, into a dense Kronecker product."""
product = factors[0]
for factor in factors[1:]:
product = product[..., array_ops.newaxis, :, array_ops.newaxis]
factor_to_mul = factor[..., array_ops.newaxis, :, array_ops.newaxis, :]
product *= factor_to_mul
product = array_ops.reshape(
product,
shape=array_ops.concat(
[array_ops.shape(product)[:-4],
[array_ops.shape(product)[-4] * array_ops.shape(product)[-3],
array_ops.shape(product)[-2] * array_ops.shape(product)[-1]]
], axis=0))
return product
class KroneckerDenseTest(test.TestCase):
@test_util.run_deprecated_v1
def testKroneckerDenseMatrix(self):
x = ops.convert_to_tensor([[2., 3.], [1., 2.]], dtype=dtypes.float32)
y = ops.convert_to_tensor([[1., 2.], [5., -1.]], dtype=dtypes.float32)
# From explicitly writing out the kronecker product of x and y.
z = ops.convert_to_tensor([
[2., 4., 3., 6.],
[10., -2., 15., -3.],
[1., 2., 2., 4.],
[5., -1., 10., -2.]], dtype=dtypes.float32)
# From explicitly writing out the kronecker product of y and x.
w = ops.convert_to_tensor([
[2., 3., 4., 6.],
[1., 2., 2., 4.],
[10., 15., -2., -3.],
[5., 10., -1., -2.]], dtype=dtypes.float32)
with self.cached_session():
self.assertAllClose(_kronecker_dense([x, y]).eval(), self.evaluate(z))
self.assertAllClose(_kronecker_dense([y, x]).eval(), self.evaluate(w))
class SquareLinearOperatorKroneckerTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
# Increase from 1e-6 to 1e-4
self._atol[dtypes.float32] = 1e-4
self._atol[dtypes.complex64] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
return [
shape_info((1, 1), factors=[(1, 1), (1, 1)]),
shape_info((8, 8), factors=[(2, 2), (2, 2), (2, 2)]),
shape_info((12, 12), factors=[(2, 2), (3, 3), (2, 2)]),
shape_info((1, 3, 3), factors=[(1, 1), (1, 3, 3)]),
shape_info((3, 6, 6), factors=[(3, 1, 1), (1, 2, 2), (1, 3, 3)]),
]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
# Kronecker products constructed below will be from symmetric
# positive-definite matrices.
del ensure_self_adjoint_and_pd
shape = list(build_info.shape)
expected_factors = build_info.__dict__["factors"]
matrices = [
linear_operator_test_util.random_positive_definite_matrix(
block_shape, dtype, force_well_conditioned=True)
for block_shape in expected_factors
]
lin_op_matrices = matrices
if use_placeholder:
lin_op_matrices = [
array_ops.placeholder_with_default(m, shape=None) for m in matrices]
operator = kronecker.LinearOperatorKronecker(
[linalg.LinearOperatorFullMatrix(
l,
is_square=True,
is_self_adjoint=True,
is_positive_definite=True)
for l in lin_op_matrices])
matrices = linear_operator_util.broadcast_matrix_batch_dims(matrices)
kronecker_dense = _kronecker_dense(matrices)
if not use_placeholder:
kronecker_dense.set_shape(shape)
return operator, kronecker_dense
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = kronecker.LinearOperatorKronecker(
[linalg.LinearOperatorFullMatrix(matrix),
linalg.LinearOperatorFullMatrix(matrix)],
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_is_non_singular_auto_set(self):
# Matrix with two positive eigenvalues, 11 and 8.
# The matrix values do not effect auto-setting of the flags.
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_2 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator = kronecker.LinearOperatorKronecker(
[operator_1, operator_2],
is_positive_definite=False, # No reason it HAS to be False...
is_non_singular=None)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
with self.assertRaisesRegexp(ValueError, "always non-singular"):
kronecker.LinearOperatorKronecker(
[operator_1, operator_2], is_non_singular=False)
def test_name(self):
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, name="left")
operator_2 = linalg.LinearOperatorFullMatrix(matrix, name="right")
operator = kronecker.LinearOperatorKronecker([operator_1, operator_2])
self.assertEqual("left_x_right", operator.name)
def test_different_dtypes_raises(self):
operators = [
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))
]
with self.assertRaisesRegexp(TypeError, "same dtype"):
kronecker.LinearOperatorKronecker(operators)
def test_empty_or_one_operators_raises(self):
with self.assertRaisesRegexp(ValueError, ">=1 operators"):
kronecker.LinearOperatorKronecker([])
def test_kronecker_adjoint_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = kronecker.LinearOperatorKronecker(
[
linalg.LinearOperatorFullMatrix(
matrix, is_non_singular=True),
linalg.LinearOperatorFullMatrix(
matrix, is_non_singular=True),
],
is_non_singular=True,
)
adjoint = operator.adjoint()
self.assertIsInstance(
adjoint,
kronecker.LinearOperatorKronecker)
self.assertEqual(2, len(adjoint.operators))
def test_kronecker_cholesky_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = kronecker.LinearOperatorKronecker(
[
linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_self_adjoint=True,
),
linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_self_adjoint=True,
),
],
is_positive_definite=True,
is_self_adjoint=True,
)
cholesky_factor = operator.cholesky()
self.assertIsInstance(
cholesky_factor,
kronecker.LinearOperatorKronecker)
self.assertEqual(2, len(cholesky_factor.operators))
self.assertIsInstance(
cholesky_factor.operators[0],
lower_triangular.LinearOperatorLowerTriangular)
self.assertIsInstance(
cholesky_factor.operators[1],
lower_triangular.LinearOperatorLowerTriangular)
def test_kronecker_inverse_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = kronecker.LinearOperatorKronecker(
[
linalg.LinearOperatorFullMatrix(
matrix, is_non_singular=True),
linalg.LinearOperatorFullMatrix(
matrix, is_non_singular=True),
],
is_non_singular=True,
)
inverse = operator.inverse()
self.assertIsInstance(
inverse,
kronecker.LinearOperatorKronecker)
self.assertEqual(2, len(inverse.operators))
if __name__ == "__main__":
linear_operator_test_util.add_tests(SquareLinearOperatorKroneckerTest)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_inversion
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
LinearOperatorInversion = linear_operator_inversion.LinearOperatorInversion # pylint: disable=invalid-name
class LinearOperatorInversionTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
self._atol[dtypes.complex64] = 1e-5
self._rtol[dtypes.complex64] = 1e-5
def operator_and_matrix(self,
build_info,
dtype,
use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
if ensure_self_adjoint_and_pd:
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
else:
matrix = linear_operator_test_util.random_tril_matrix(
shape, dtype, force_well_conditioned=True, remove_upper=True)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
if ensure_self_adjoint_and_pd:
operator = LinearOperatorInversion(
linalg.LinearOperatorFullMatrix(
lin_op_matrix, is_positive_definite=True, is_self_adjoint=True))
else:
operator = LinearOperatorInversion(
linalg.LinearOperatorLowerTriangular(lin_op_matrix))
return operator, linalg.inv(matrix)
def test_base_operator_hint_used(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
operator_inv = LinearOperatorInversion(operator)
self.assertTrue(operator_inv.is_positive_definite)
self.assertTrue(operator_inv.is_non_singular)
self.assertFalse(operator_inv.is_self_adjoint)
def test_supplied_hint_used(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix)
operator_inv = LinearOperatorInversion(
operator,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator_inv.is_positive_definite)
self.assertTrue(operator_inv.is_non_singular)
self.assertFalse(operator_inv.is_self_adjoint)
def test_contradicting_hints_raise(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_positive_definite=False)
with self.assertRaisesRegexp(ValueError, "positive-definite"):
LinearOperatorInversion(operator, is_positive_definite=True)
operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=False)
with self.assertRaisesRegexp(ValueError, "self-adjoint"):
LinearOperatorInversion(operator, is_self_adjoint=True)
def test_singular_raises(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=False)
with self.assertRaisesRegexp(ValueError, "is_non_singular"):
LinearOperatorInversion(operator)
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.assertRaisesRegexp(ValueError, "is_non_singular"):
LinearOperatorInversion(operator, is_non_singular=False)
def test_name(self):
matrix = [[11., 0.], [1., 8.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, name="my_operator", is_non_singular=True)
operator = LinearOperatorInversion(operator)
self.assertEqual("my_operator_inv", operator.name)
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorInversionTest)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_inversion_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_adjoint
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
LinearOperatorAdjoint = linear_operator_adjoint.LinearOperatorAdjoint # pylint: disable=invalid-name
class LinearOperatorAdjointTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
self._atol[dtypes.complex64] = 1e-5
self._rtol[dtypes.complex64] = 1e-5
def operator_and_matrix(self,
build_info,
dtype,
use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
if ensure_self_adjoint_and_pd:
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
else:
matrix = linear_operator_test_util.random_tril_matrix(
shape, dtype, force_well_conditioned=True, remove_upper=True)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
if ensure_self_adjoint_and_pd:
operator = LinearOperatorAdjoint(
linalg.LinearOperatorFullMatrix(
lin_op_matrix, is_positive_definite=True, is_self_adjoint=True))
else:
operator = LinearOperatorAdjoint(
linalg.LinearOperatorLowerTriangular(lin_op_matrix))
return operator, linalg.adjoint(matrix)
def test_base_operator_hint_used(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
operator_adjoint = LinearOperatorAdjoint(operator)
self.assertTrue(operator_adjoint.is_positive_definite)
self.assertTrue(operator_adjoint.is_non_singular)
self.assertFalse(operator_adjoint.is_self_adjoint)
def test_supplied_hint_used(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix)
operator_adjoint = LinearOperatorAdjoint(
operator,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator_adjoint.is_positive_definite)
self.assertTrue(operator_adjoint.is_non_singular)
self.assertFalse(operator_adjoint.is_self_adjoint)
def test_contradicting_hints_raise(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_positive_definite=False)
with self.assertRaisesRegexp(ValueError, "positive-definite"):
LinearOperatorAdjoint(operator, is_positive_definite=True)
operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=False)
with self.assertRaisesRegexp(ValueError, "self-adjoint"):
LinearOperatorAdjoint(operator, is_self_adjoint=True)
def test_name(self):
matrix = [[11., 0.], [1., 8.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, name="my_operator", is_non_singular=True)
operator = LinearOperatorAdjoint(operator)
self.assertEqual("my_operator_adjoint", operator.name)
def test_matmul_adjoint_operator(self):
matrix1 = np.random.randn(4, 4)
matrix2 = np.random.randn(4, 4)
full_matrix1 = linalg.LinearOperatorFullMatrix(matrix1)
full_matrix2 = linalg.LinearOperatorFullMatrix(matrix2)
self.assertAllClose(
np.matmul(matrix1, matrix2.T),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint_arg=True).to_dense()))
self.assertAllClose(
np.matmul(matrix1.T, matrix2),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint=True).to_dense()))
self.assertAllClose(
np.matmul(matrix1.T, matrix2.T),
self.evaluate(
full_matrix1.matmul(
full_matrix2, adjoint=True, adjoint_arg=True).to_dense()))
def test_matmul_adjoint_complex_operator(self):
matrix1 = np.random.randn(4, 4) + 1j * np.random.randn(4, 4)
matrix2 = np.random.randn(4, 4) + 1j * np.random.randn(4, 4)
full_matrix1 = linalg.LinearOperatorFullMatrix(matrix1)
full_matrix2 = linalg.LinearOperatorFullMatrix(matrix2)
self.assertAllClose(
np.matmul(matrix1, matrix2.conj().T),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint_arg=True).to_dense()))
self.assertAllClose(
np.matmul(matrix1.conj().T, matrix2),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint=True).to_dense()))
self.assertAllClose(
np.matmul(matrix1.conj().T, matrix2.conj().T),
self.evaluate(
full_matrix1.matmul(
full_matrix2, adjoint=True, adjoint_arg=True).to_dense()))
def test_solve_adjoint_operator(self):
matrix1 = self.evaluate(
linear_operator_test_util.random_tril_matrix(
[4, 4], dtype=dtypes.float64, force_well_conditioned=True))
matrix2 = np.random.randn(4, 4)
full_matrix1 = linalg.LinearOperatorLowerTriangular(
matrix1, is_non_singular=True)
full_matrix2 = linalg.LinearOperatorFullMatrix(matrix2)
self.assertAllClose(
self.evaluate(linalg.triangular_solve(matrix1, matrix2.T)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint_arg=True).to_dense()))
self.assertAllClose(
self.evaluate(
linalg.triangular_solve(
matrix1.T, matrix2, lower=False)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint=True).to_dense()))
self.assertAllClose(
self.evaluate(
linalg.triangular_solve(matrix1.T, matrix2.T, lower=False)),
self.evaluate(
full_matrix1.solve(
full_matrix2, adjoint=True, adjoint_arg=True).to_dense()))
def test_solve_adjoint_complex_operator(self):
matrix1 = self.evaluate(linear_operator_test_util.random_tril_matrix(
[4, 4], dtype=dtypes.complex128, force_well_conditioned=True) +
1j * linear_operator_test_util.random_tril_matrix(
[4, 4], dtype=dtypes.complex128,
force_well_conditioned=True))
matrix2 = np.random.randn(4, 4) + 1j * np.random.randn(4, 4)
full_matrix1 = linalg.LinearOperatorLowerTriangular(
matrix1, is_non_singular=True)
full_matrix2 = linalg.LinearOperatorFullMatrix(matrix2)
self.assertAllClose(
self.evaluate(linalg.triangular_solve(matrix1, matrix2.conj().T)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint_arg=True).to_dense()))
self.assertAllClose(
self.evaluate(
linalg.triangular_solve(
matrix1.conj().T, matrix2, lower=False)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint=True).to_dense()))
self.assertAllClose(
self.evaluate(
linalg.triangular_solve(
matrix1.conj().T, matrix2.conj().T, lower=False)),
self.evaluate(
full_matrix1.solve(
full_matrix2, adjoint=True, adjoint_arg=True).to_dense()))
class LinearOperatorAdjointNonSquareTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""Tests done in the base class NonSquareLinearOperatorDerivedClassTest."""
def operator_and_matrix(self, build_info, dtype, use_placeholder):
shape_before_adjoint = list(build_info.shape)
# We need to swap the last two dimensions because we are taking the adjoint
# of this operator
shape_before_adjoint[-1], shape_before_adjoint[-2] = (
shape_before_adjoint[-2], shape_before_adjoint[-1])
matrix = linear_operator_test_util.random_normal(
shape_before_adjoint, dtype=dtype)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
operator = LinearOperatorAdjoint(
linalg.LinearOperatorFullMatrix(lin_op_matrix))
return operator, linalg.adjoint(matrix)
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorAdjointTest)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class AssertZeroImagPartTest(test.TestCase):
@test_util.run_deprecated_v1
def test_real_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([0., 2, 3])
with self.cached_session():
# Should not raise.
linear_operator_util.assert_zero_imag_part(x, message="ABC123").run()
@test_util.run_deprecated_v1
def test_complex_tensor_with_imag_zero_doesnt_raise(self):
x = ops.convert_to_tensor([1., 0, 3])
y = ops.convert_to_tensor([0., 0, 0])
z = math_ops.complex(x, y)
with self.cached_session():
# Should not raise.
linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
def test_complex_tensor_with_nonzero_imag_raises(self):
x = ops.convert_to_tensor([1., 2, 0])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.cached_session():
with self.assertRaisesOpError("ABC123"):
linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
class AssertNoEntriesWithModulusZeroTest(test.TestCase):
@test_util.run_deprecated_v1
def test_nonzero_real_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([1., 2, 3])
with self.cached_session():
# Should not raise.
linear_operator_util.assert_no_entries_with_modulus_zero(
x, message="ABC123").run()
@test_util.run_deprecated_v1
def test_nonzero_complex_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([1., 0, 3])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.cached_session():
# Should not raise.
linear_operator_util.assert_no_entries_with_modulus_zero(
z, message="ABC123").run()
def test_zero_real_tensor_raises(self):
x = ops.convert_to_tensor([1., 0, 3])
with self.cached_session():
with self.assertRaisesOpError("ABC123"):
linear_operator_util.assert_no_entries_with_modulus_zero(
x, message="ABC123").run()
def test_zero_complex_tensor_raises(self):
x = ops.convert_to_tensor([1., 2, 0])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.cached_session():
with self.assertRaisesOpError("ABC123"):
linear_operator_util.assert_no_entries_with_modulus_zero(
z, message="ABC123").run()
class BroadcastMatrixBatchDimsTest(test.TestCase):
def test_zero_batch_matrices_returned_as_empty_list(self):
self.assertAllEqual([],
linear_operator_util.broadcast_matrix_batch_dims([]))
def test_one_batch_matrix_returned_after_tensor_conversion(self):
arr = rng.rand(2, 3, 4)
tensor, = linear_operator_util.broadcast_matrix_batch_dims([arr])
self.assertTrue(isinstance(tensor, ops.Tensor))
with self.cached_session():
self.assertAllClose(arr, self.evaluate(tensor))
@test_util.run_deprecated_v1
def test_static_dims_broadcast(self):
# x.batch_shape = [3, 1, 2]
# y.batch_shape = [4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(3, 1, 2, 1, 5)
y = rng.rand(4, 1, 3, 7)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1))
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x, y])
with self.cached_session() as sess:
self.assertAllEqual(x_bc_expected.shape, x_bc.get_shape())
self.assertAllEqual(y_bc_expected.shape, y_bc.get_shape())
x_bc_, y_bc_ = self.evaluate([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_static_dims_broadcast_second_arg_higher_rank(self):
# x.batch_shape = [1, 2]
# y.batch_shape = [1, 3, 1]
# broadcast batch shape = [1, 3, 2]
x = rng.rand(1, 2, 1, 5)
y = rng.rand(1, 3, 2, 3, 7)
batch_of_zeros = np.zeros((1, 3, 2, 1, 1))
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x, y])
with self.cached_session() as sess:
self.assertAllEqual(x_bc_expected.shape, x_bc.get_shape())
self.assertAllEqual(y_bc_expected.shape, y_bc.get_shape())
x_bc_, y_bc_ = self.evaluate([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
@test_util.run_deprecated_v1
def test_dynamic_dims_broadcast_32bit(self):
# x.batch_shape = [3, 1, 2]
# y.batch_shape = [4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(3, 1, 2, 1, 5).astype(np.float32)
y = rng.rand(4, 1, 3, 7).astype(np.float32)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1)).astype(np.float32)
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_ph = array_ops.placeholder(dtypes.float32)
y_ph = array_ops.placeholder(dtypes.float32)
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x_ph, y_ph])
with self.cached_session() as sess:
x_bc_, y_bc_ = sess.run([x_bc, y_bc], feed_dict={x_ph: x, y_ph: y})
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
@test_util.run_deprecated_v1
def test_dynamic_dims_broadcast_32bit_second_arg_higher_rank(self):
# x.batch_shape = [1, 2]
# y.batch_shape = [3, 4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(1, 2, 1, 5).astype(np.float32)
y = rng.rand(3, 4, 1, 3, 7).astype(np.float32)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1)).astype(np.float32)
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_ph = array_ops.placeholder(dtypes.float32)
y_ph = array_ops.placeholder(dtypes.float32)
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x_ph, y_ph])
with self.cached_session() as sess:
x_bc_, y_bc_ = sess.run([x_bc, y_bc], feed_dict={x_ph: x, y_ph: y})
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_less_than_two_dims_raises_static(self):
x = rng.rand(3)
y = rng.rand(1, 1)
with self.assertRaisesRegexp(ValueError, "at least two dimensions"):
linear_operator_util.broadcast_matrix_batch_dims([x, y])
with self.assertRaisesRegexp(ValueError, "at least two dimensions"):
linear_operator_util.broadcast_matrix_batch_dims([y, x])
class CholeskySolveWithBroadcastTest(test.TestCase):
@test_util.run_deprecated_v1
def test_static_dims_broadcast(self):
# batch_shape = [2]
chol = rng.rand(3, 3)
rhs = rng.rand(2, 3, 7)
chol_broadcast = chol + np.zeros((2, 1, 1))
with self.cached_session():
result = linear_operator_util.cholesky_solve_with_broadcast(chol, rhs)
self.assertAllEqual((2, 3, 7), result.get_shape())
expected = linalg_ops.cholesky_solve(chol_broadcast, rhs)
self.assertAllClose(expected.eval(), self.evaluate(result))
@test_util.run_deprecated_v1
def test_dynamic_dims_broadcast_64bit(self):
# batch_shape = [2, 2]
chol = rng.rand(2, 3, 3)
rhs = rng.rand(2, 1, 3, 7)
chol_broadcast = chol + np.zeros((2, 2, 1, 1))
rhs_broadcast = rhs + np.zeros((2, 2, 1, 1))
chol_ph = array_ops.placeholder(dtypes.float64)
rhs_ph = array_ops.placeholder(dtypes.float64)
with self.cached_session() as sess:
result, expected = sess.run(
[
linear_operator_util.cholesky_solve_with_broadcast(
chol_ph, rhs_ph),
linalg_ops.cholesky_solve(chol_broadcast, rhs_broadcast)
],
feed_dict={
chol_ph: chol,
rhs_ph: rhs,
})
self.assertAllClose(expected, result)
class MatrixSolveWithBroadcastTest(test.TestCase):
@test_util.run_deprecated_v1
def test_static_dims_broadcast_matrix_has_extra_dims(self):
# batch_shape = [2]
matrix = rng.rand(2, 3, 3)
rhs = rng.rand(3, 7)
rhs_broadcast = rhs + np.zeros((2, 1, 1))
with self.cached_session():
result = linear_operator_util.matrix_solve_with_broadcast(
matrix, rhs)
self.assertAllEqual((2, 3, 7), result.get_shape())
expected = linalg_ops.matrix_solve(matrix, rhs_broadcast)
self.assertAllClose(expected.eval(), self.evaluate(result))
@test_util.run_deprecated_v1
def test_static_dims_broadcast_rhs_has_extra_dims(self):
# Since the second arg has extra dims, and the domain dim of the first arg
# is larger than the number of linear equations, code will "flip" the extra
# dims of the first arg to the far right, making extra linear equations
# (then call the matrix function, then flip back).
# We have verified that this optimization indeed happens. How? We stepped
# through with a debugger.
# batch_shape = [2]
matrix = rng.rand(3, 3)
rhs = rng.rand(2, 3, 2)
matrix_broadcast = matrix + np.zeros((2, 1, 1))
with self.cached_session():
result = linear_operator_util.matrix_solve_with_broadcast(matrix, rhs)
self.assertAllEqual((2, 3, 2), result.get_shape())
expected = linalg_ops.matrix_solve(matrix_broadcast, rhs)
self.assertAllClose(expected.eval(), self.evaluate(result))
@test_util.run_deprecated_v1
def test_static_dims_broadcast_rhs_has_extra_dims_dynamic(self):
# Since the second arg has extra dims, and the domain dim of the first arg
# is larger than the number of linear equations, code will "flip" the extra
# dims of the first arg to the far right, making extra linear equations
# (then call the matrix function, then flip back).
# We have verified that this optimization indeed happens. How? We stepped
# through with a debugger.
# batch_shape = [2]
matrix = rng.rand(3, 3)
rhs = rng.rand(2, 3, 2)
matrix_broadcast = matrix + np.zeros((2, 1, 1))
matrix_ph = array_ops.placeholder(dtypes.float64, shape=[None, None])
rhs_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None])
with self.cached_session():
result = linear_operator_util.matrix_solve_with_broadcast(matrix_ph,
rhs_ph)
self.assertAllEqual(3, result.shape.ndims)
expected = linalg_ops.matrix_solve(matrix_broadcast, rhs)
self.assertAllClose(
self.evaluate(expected),
result.eval(feed_dict={
matrix_ph: matrix,
rhs_ph: rhs
}))
@test_util.run_deprecated_v1
def test_static_dims_broadcast_rhs_has_extra_dims_and_adjoint(self):
# Since the second arg has extra dims, and the domain dim of the first arg
# is larger than the number of linear equations, code will "flip" the extra
# dims of the first arg to the far right, making extra linear equations
# (then call the matrix function, then flip back).
# We have verified that this optimization indeed happens. How? We stepped
# through with a debugger.
# batch_shape = [2]
matrix = rng.rand(3, 3)
rhs = rng.rand(2, 3, 2)
matrix_broadcast = matrix + np.zeros((2, 1, 1))
with self.cached_session():
result = linear_operator_util.matrix_solve_with_broadcast(
matrix, rhs, adjoint=True)
self.assertAllEqual((2, 3, 2), result.get_shape())
expected = linalg_ops.matrix_solve(matrix_broadcast, rhs, adjoint=True)
self.assertAllClose(expected.eval(), self.evaluate(result))
@test_util.run_deprecated_v1
def test_dynamic_dims_broadcast_64bit(self):
# batch_shape = [2, 2]
matrix = rng.rand(2, 3, 3)
rhs = rng.rand(2, 1, 3, 7)
matrix_broadcast = matrix + np.zeros((2, 2, 1, 1))
rhs_broadcast = rhs + np.zeros((2, 2, 1, 1))
matrix_ph = array_ops.placeholder(dtypes.float64)
rhs_ph = array_ops.placeholder(dtypes.float64)
with self.cached_session() as sess:
result, expected = sess.run(
[
linear_operator_util.matrix_solve_with_broadcast(
matrix_ph, rhs_ph),
linalg_ops.matrix_solve(matrix_broadcast, rhs_broadcast)
],
feed_dict={
matrix_ph: matrix,
rhs_ph: rhs,
})
self.assertAllClose(expected, result)
class MatrixTriangularSolveWithBroadcastTest(test.TestCase):
@test_util.run_deprecated_v1
def test_static_dims_broadcast_matrix_has_extra_dims(self):
# batch_shape = [2]
matrix = rng.rand(2, 3, 3)
rhs = rng.rand(3, 7)
rhs_broadcast = rhs + np.zeros((2, 1, 1))
with self.cached_session():
result = linear_operator_util.matrix_triangular_solve_with_broadcast(
matrix, rhs)
self.assertAllEqual((2, 3, 7), result.get_shape())
expected = linalg_ops.matrix_triangular_solve(matrix, rhs_broadcast)
self.assertAllClose(expected.eval(), self.evaluate(result))
@test_util.run_deprecated_v1
def test_static_dims_broadcast_rhs_has_extra_dims(self):
# Since the second arg has extra dims, and the domain dim of the first arg
# is larger than the number of linear equations, code will "flip" the extra
# dims of the first arg to the far right, making extra linear equations
# (then call the matrix function, then flip back).
# We have verified that this optimization indeed happens. How? We stepped
# through with a debugger.
# batch_shape = [2]
matrix = rng.rand(3, 3)
rhs = rng.rand(2, 3, 2)
matrix_broadcast = matrix + np.zeros((2, 1, 1))
with self.cached_session():
result = linear_operator_util.matrix_triangular_solve_with_broadcast(
matrix, rhs)
self.assertAllEqual((2, 3, 2), result.get_shape())
expected = linalg_ops.matrix_triangular_solve(matrix_broadcast, rhs)
self.assertAllClose(expected.eval(), self.evaluate(result))
@test_util.run_deprecated_v1
def test_static_dims_broadcast_rhs_has_extra_dims_and_adjoint(self):
# Since the second arg has extra dims, and the domain dim of the first arg
# is larger than the number of linear equations, code will "flip" the extra
# dims of the first arg to the far right, making extra linear equations
# (then call the matrix function, then flip back).
# We have verified that this optimization indeed happens. How? We stepped
# through with a debugger.
# batch_shape = [2]
matrix = rng.rand(3, 3)
rhs = rng.rand(2, 3, 2)
matrix_broadcast = matrix + np.zeros((2, 1, 1))
with self.cached_session():
result = linear_operator_util.matrix_triangular_solve_with_broadcast(
matrix, rhs, adjoint=True)
self.assertAllEqual((2, 3, 2), result.get_shape())
expected = linalg_ops.matrix_triangular_solve(
matrix_broadcast, rhs, adjoint=True)
self.assertAllClose(expected.eval(), self.evaluate(result))
@test_util.run_deprecated_v1
def test_dynamic_dims_broadcast_64bit(self):
# batch_shape = [2]
matrix = rng.rand(2, 3, 3)
rhs = rng.rand(3, 7)
rhs_broadcast = rhs + np.zeros((2, 1, 1))
matrix_ph = array_ops.placeholder(dtypes.float64)
rhs_ph = array_ops.placeholder(dtypes.float64)
with self.cached_session() as sess:
result, expected = sess.run(
[
linear_operator_util.matrix_triangular_solve_with_broadcast(
matrix_ph, rhs_ph),
linalg_ops.matrix_triangular_solve(matrix, rhs_broadcast)
],
feed_dict={
matrix_ph: matrix,
rhs_ph: rhs,
})
self.assertAllClose(expected, result)
class DomainDimensionStubOperator(object):
def __init__(self, domain_dimension):
self._domain_dimension = ops.convert_to_tensor(domain_dimension)
def domain_dimension_tensor(self):
return self._domain_dimension
class AssertCompatibleMatrixDimensionsTest(test.TestCase):
@test_util.run_deprecated_v1
def test_compatible_dimensions_do_not_raise(self):
with self.cached_session():
x = ops.convert_to_tensor(rng.rand(2, 3, 4))
operator = DomainDimensionStubOperator(3)
# Should not raise
linear_operator_util.assert_compatible_matrix_dimensions(
operator, x).run() # pyformat: disable
def test_incompatible_dimensions_raise(self):
with self.cached_session():
x = ops.convert_to_tensor(rng.rand(2, 4, 4))
operator = DomainDimensionStubOperator(3)
with self.assertRaisesOpError("Incompatible matrix dimensions"):
linear_operator_util.assert_compatible_matrix_dimensions(
operator, x).run() # pyformat: disable
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Kernel tests for tf.linalg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.linalg import linear_operator_circulant
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
_to_complex = linear_operator_circulant._to_complex
class LinearOperatorCirculantBaseTest(object):
"""Common class for circulant tests."""
_atol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-7,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-7
}
_rtol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-7,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-7
}
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""We overwrite the FFT operation mapping for testing."""
with test.TestCase._constrain_devices_and_set_default(
self, sess, use_gpu, force_gpu) as sess:
with spectral_ops_test_util.fft_kernel_label_map():
yield sess
def _shape_to_spectrum_shape(self, shape):
# If spectrum.shape = batch_shape + [N],
# this creates an operator of shape batch_shape + [N, N]
return shape[:-1]
def _spectrum_to_circulant_1d(self, spectrum, shape, dtype):
"""Creates a circulant matrix from a spectrum.
Intentionally done in an explicit yet inefficient way. This provides a
cross check to the main code that uses fancy reshapes.
Args:
spectrum: Float or complex `Tensor`.
shape: Python list. Desired shape of returned matrix.
dtype: Type to cast the returned matrix to.
Returns:
Circulant (batch) matrix of desired `dtype`.
"""
spectrum = _to_complex(spectrum)
spectrum_shape = self._shape_to_spectrum_shape(shape)
domain_dimension = spectrum_shape[-1]
if not domain_dimension:
return array_ops.zeros(shape, dtype)
# Explicitly compute the action of spectrum on basis vectors.
matrix_rows = []
for m in range(domain_dimension):
x = np.zeros([domain_dimension])
# x is a basis vector.
x[m] = 1.0
fft_x = fft_ops.fft(math_ops.cast(x, spectrum.dtype))
h_convolve_x = fft_ops.ifft(spectrum * fft_x)
matrix_rows.append(h_convolve_x)
matrix = array_ops.stack(matrix_rows, axis=-1)
return math_ops.cast(matrix, dtype)
class LinearOperatorCirculantTestSelfAdjointOperator(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when operator is self-adjoint.
Real spectrum <==> Self adjoint operator.
Note that when the spectrum is real, the operator may still be complex.
"""
@staticmethod
def dtypes_to_test():
# This operator will always be complex because, although the spectrum is
# real, the matrix will not be real.
return [dtypes.complex64, dtypes.complex128]
def operator_and_matrix(
self, shape_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = shape_info.shape
# For this test class, we are creating real spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
#
# spectrum is bounded away from zero.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape),
minval=1.,
maxval=2.)
if ensure_self_adjoint_and_pd:
spectrum = math_ops.abs(spectrum)
# If dtype is complex, cast spectrum to complex. The imaginary part will be
# zero, so the operator will still be self-adjoint.
spectrum = math_ops.cast(spectrum, dtype)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum,
is_self_adjoint=True,
is_positive_definite=True if ensure_self_adjoint_and_pd else None,
input_output_dtype=dtype)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
@test_util.run_deprecated_v1
@test_util.disable_xla("No registered Const")
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
spectrum = math_ops.cast([1., 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
class LinearOperatorCirculantTestHermitianSpectrum(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when the spectrum is Hermitian.
Hermitian spectrum <==> Real valued operator. We test both real and complex
dtypes here though. So in some cases the matrix will be complex but with
zero imaginary part.
"""
def operator_and_matrix(
self, shape_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = shape_info.shape
# For this test class, we are creating Hermitian spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
#
# pre_spectrum is bounded away from zero.
pre_spectrum = linear_operator_test_util.random_uniform(
shape=self._shape_to_spectrum_shape(shape),
dtype=dtype,
minval=1.,
maxval=2.)
pre_spectrum = math_ops.cast(math_ops.abs(pre_spectrum), dtype=dtype)
pre_spectrum_c = _to_complex(pre_spectrum)
# Real{IFFT[pre_spectrum]}
# = IFFT[EvenPartOf[pre_spectrum]]
# is the IFFT of something that is also bounded away from zero.
# Therefore, FFT[pre_h] would be a well-conditioned spectrum.
pre_h = fft_ops.ifft(pre_spectrum_c)
# A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
# So we will make spectrum = FFT[h], for real valued h.
h = math_ops.real(pre_h)
h_c = _to_complex(h)
spectrum = fft_ops.fft(h_c)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum,
input_output_dtype=dtype,
is_positive_definite=True if ensure_self_adjoint_and_pd else None,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
@test_util.run_deprecated_v1
@test_util.disable_xla("No registered Const")
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
spectrum = math_ops.cast([1., 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
class LinearOperatorCirculantTestNonHermitianSpectrum(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when the spectrum is not Hermitian.
Non-Hermitian spectrum <==> Complex valued operator.
We test only complex dtypes here.
"""
@staticmethod
def dtypes_to_test():
return [dtypes.complex64, dtypes.complex128]
# Skip Cholesky since we are explicitly testing non-hermitian
# spectra.
@staticmethod
def tests_to_skip():
return ["cholesky"]
def operator_and_matrix(
self, shape_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
del ensure_self_adjoint_and_pd
shape = shape_info.shape
# Will be well conditioned enough to get accurate solves.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape),
dtype=dtype,
minval=1.,
maxval=2.)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
@test_util.run_deprecated_v1
@test_util.disable_xla("No registered Const")
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
spectrum = math_ops.cast([1., 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
@test_util.run_deprecated_v1
def test_simple_positive_real_spectrum_gives_self_adjoint_pos_def_oper(self):
with self.cached_session() as sess:
spectrum = math_ops.cast([6., 4, 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix, matrix_h = sess.run(
[operator.to_dense(),
linalg.adjoint(operator.to_dense())])
self.assertAllClose(matrix, matrix_h)
operator.assert_positive_definite().run() # Should not fail
operator.assert_self_adjoint().run() # Should not fail
@test_util.run_deprecated_v1
def test_defining_operator_using_real_convolution_kernel(self):
with self.cached_session():
convolution_kernel = [1., 2., 1.]
spectrum = fft_ops.fft(
math_ops.cast(convolution_kernel, dtypes.complex64))
# spectrum is shape [3] ==> operator is shape [3, 3]
# spectrum is Hermitian ==> operator is real.
operator = linalg.LinearOperatorCirculant(spectrum)
# Allow for complex output so we can make sure it has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix = operator.to_dense().eval()
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
@test_util.run_v1_only("currently failing on v2")
def test_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
# Make spectrum the FFT of a real convolution kernel h. This ensures that
# spectrum is Hermitian.
h = linear_operator_test_util.random_normal(shape=(3, 4))
spectrum = fft_ops.fft(math_ops.cast(h, dtypes.complex64))
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3 * 4)
@test_util.run_deprecated_v1
def test_convolution_kernel_same_as_first_row_of_to_dense(self):
spectrum = [[3., 2., 1.], [2., 1.5, 1.]]
with self.cached_session():
operator = linalg.LinearOperatorCirculant(spectrum)
h = operator.convolution_kernel()
c = operator.to_dense()
self.assertAllEqual((2, 3), h.get_shape())
self.assertAllEqual((2, 3, 3), c.get_shape())
self.assertAllClose(h.eval(), self.evaluate(c)[:, :, 0])
@test_util.run_deprecated_v1
def test_assert_non_singular_fails_for_singular_operator(self):
spectrum = math_ops.cast([0, 4, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Singular operator"):
operator.assert_non_singular().run()
@test_util.run_deprecated_v1
def test_assert_non_singular_does_not_fail_for_non_singular_operator(self):
spectrum = math_ops.cast([-3j, 4, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
operator.assert_non_singular().run() # Should not fail
@test_util.run_deprecated_v1
def test_assert_positive_definite_fails_for_non_positive_definite(self):
spectrum = math_ops.cast([6., 4, 2j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Not positive definite"):
operator.assert_positive_definite().run()
@test_util.run_deprecated_v1
def test_assert_positive_definite_does_not_fail_when_pos_def(self):
spectrum = math_ops.cast([6., 4, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
operator.assert_positive_definite().run() # Should not fail
def test_real_spectrum_and_not_self_adjoint_hint_raises(self):
spectrum = [1., 2.]
with self.assertRaisesRegexp(ValueError, "real.*always.*self-adjoint"):
linalg.LinearOperatorCirculant(spectrum, is_self_adjoint=False)
def test_real_spectrum_auto_sets_is_self_adjoint_to_true(self):
spectrum = [1., 2.]
operator = linalg.LinearOperatorCirculant(spectrum)
self.assertTrue(operator.is_self_adjoint)
class LinearOperatorCirculant2DBaseTest(object):
"""Common class for 2D circulant tests."""
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""We overwrite the FFT operation mapping for testing."""
with test.TestCase._constrain_devices_and_set_default(
self, sess, use_gpu, force_gpu) as sess:
with spectral_ops_test_util.fft_kernel_label_map():
yield sess
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
# non-batch operators (n, n) and batch operators.
return [
shape_info((0, 0)),
shape_info((1, 1)),
shape_info((1, 6, 6)),
shape_info((3, 4, 4)),
shape_info((2, 1, 3, 3))
]
def _shape_to_spectrum_shape(self, shape):
"""Get a spectrum shape that will make an operator of desired shape."""
# This 2D block circulant operator takes a spectrum of shape
# batch_shape + [N0, N1],
# and creates and operator of shape
# batch_shape + [N0*N1, N0*N1]
if shape == (0, 0):
return (0, 0)
elif shape == (1, 1):
return (1, 1)
elif shape == (1, 6, 6):
return (1, 2, 3)
elif shape == (3, 4, 4):
return (3, 2, 2)
elif shape == (2, 1, 3, 3):
return (2, 1, 3, 1)
else:
raise ValueError("Unhandled shape: %s" % shape)
def _spectrum_to_circulant_2d(self, spectrum, shape, dtype):
"""Creates a block circulant matrix from a spectrum.
Intentionally done in an explicit yet inefficient way. This provides a
cross check to the main code that uses fancy reshapes.
Args:
spectrum: Float or complex `Tensor`.
shape: Python list. Desired shape of returned matrix.
dtype: Type to cast the returned matrix to.
Returns:
Block circulant (batch) matrix of desired `dtype`.
"""
spectrum = _to_complex(spectrum)
spectrum_shape = self._shape_to_spectrum_shape(shape)
domain_dimension = spectrum_shape[-1]
if not domain_dimension:
return array_ops.zeros(shape, dtype)
block_shape = spectrum_shape[-2:]
# Explicitly compute the action of spectrum on basis vectors.
matrix_rows = []
for n0 in range(block_shape[0]):
for n1 in range(block_shape[1]):
x = np.zeros(block_shape)
# x is a basis vector.
x[n0, n1] = 1.0
fft_x = fft_ops.fft2d(math_ops.cast(x, spectrum.dtype))
h_convolve_x = fft_ops.ifft2d(spectrum * fft_x)
# We want the flat version of the action of the operator on a basis
# vector, not the block version.
h_convolve_x = array_ops.reshape(h_convolve_x, shape[:-1])
matrix_rows.append(h_convolve_x)
matrix = array_ops.stack(matrix_rows, axis=-1)
return math_ops.cast(matrix, dtype)
class LinearOperatorCirculant2DTestHermitianSpectrum(
LinearOperatorCirculant2DBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant2D when the spectrum is Hermitian.
Hermitian spectrum <==> Real valued operator. We test both real and complex
dtypes here though. So in some cases the matrix will be complex but with
zero imaginary part.
"""
def operator_and_matrix(
self, shape_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = shape_info.shape
# For this test class, we are creating Hermitian spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
#
# pre_spectrum is bounded away from zero.
pre_spectrum = linear_operator_test_util.random_uniform(
shape=self._shape_to_spectrum_shape(shape),
dtype=dtype,
minval=1.,
maxval=2.)
pre_spectrum_c = _to_complex(pre_spectrum)
# Real{IFFT[pre_spectrum]}
# = IFFT[EvenPartOf[pre_spectrum]]
# is the IFFT of something that is also bounded away from zero.
# Therefore, FFT[pre_h] would be a well-conditioned spectrum.
pre_h = fft_ops.ifft2d(pre_spectrum_c)
# A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
# So we will make spectrum = FFT[h], for real valued h.
h = math_ops.real(pre_h)
h_c = _to_complex(h)
spectrum = fft_ops.fft2d(h_c)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant2D(
lin_op_spectrum,
is_positive_definite=True if ensure_self_adjoint_and_pd else None,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
input_output_dtype=dtype)
mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)
return operator, mat
class LinearOperatorCirculant2DTestNonHermitianSpectrum(
LinearOperatorCirculant2DBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when the spectrum is not Hermitian.
Non-Hermitian spectrum <==> Complex valued operator.
We test only complex dtypes here.
"""
@staticmethod
def dtypes_to_test():
return [dtypes.complex64, dtypes.complex128]
@staticmethod
def tests_to_skip():
return ["cholesky"]
def operator_and_matrix(
self, shape_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
del ensure_self_adjoint_and_pd
shape = shape_info.shape
# Will be well conditioned enough to get accurate solves.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape),
dtype=dtype,
minval=1.,
maxval=2.)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant2D(
lin_op_spectrum, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)
return operator, mat
@test_util.run_deprecated_v1
def test_real_hermitian_spectrum_gives_real_symmetric_operator(self):
with self.cached_session() as sess:
# This is a real and hermitian spectrum.
spectrum = [[1., 2., 2.], [3., 4., 4.], [3., 4., 4.]]
operator = linalg.LinearOperatorCirculant(spectrum)
matrix_tensor = operator.to_dense()
self.assertEqual(matrix_tensor.dtype, dtypes.complex64)
matrix_t = array_ops.matrix_transpose(matrix_tensor)
imag_matrix = math_ops.imag(matrix_tensor)
matrix, matrix_transpose, imag_matrix = sess.run(
[matrix_tensor, matrix_t, imag_matrix])
np.testing.assert_allclose(0, imag_matrix, atol=1e-6)
self.assertAllClose(matrix, matrix_transpose, atol=0)
@test_util.run_v1_only("b/120545219")
def test_real_spectrum_gives_self_adjoint_operator(self):
with self.cached_session():
# This is a real and hermitian spectrum.
spectrum = linear_operator_test_util.random_normal(
shape=(3, 3), dtype=dtypes.float32)
operator = linalg.LinearOperatorCirculant2D(spectrum)
matrix_tensor = operator.to_dense()
self.assertEqual(matrix_tensor.dtype, dtypes.complex64)
matrix_h = linalg.adjoint(matrix_tensor)
matrix, matrix_h = self.evaluate([matrix_tensor, matrix_h])
self.assertAllClose(matrix, matrix_h, atol=0)
@test_util.run_deprecated_v1
def test_assert_non_singular_fails_for_singular_operator(self):
spectrum = math_ops.cast([[0, 4], [2j + 2, 3.]], dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Singular operator"):
operator.assert_non_singular().run()
@test_util.run_deprecated_v1
def test_assert_non_singular_does_not_fail_for_non_singular_operator(self):
spectrum = math_ops.cast([[-3j, 4], [2j + 2, 3.]], dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.cached_session():
operator.assert_non_singular().run() # Should not fail
@test_util.run_deprecated_v1
def test_assert_positive_definite_fails_for_non_positive_definite(self):
spectrum = math_ops.cast([[6., 4], [2j, 3.]], dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Not positive definite"):
operator.assert_positive_definite().run()
@test_util.run_deprecated_v1
def test_assert_positive_definite_does_not_fail_when_pos_def(self):
spectrum = math_ops.cast([[6., 4], [2j + 2, 3.]], dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.cached_session():
operator.assert_positive_definite().run() # Should not fail
def test_real_spectrum_and_not_self_adjoint_hint_raises(self):
spectrum = [[1., 2.], [3., 4]]
with self.assertRaisesRegexp(ValueError, "real.*always.*self-adjoint"):
linalg.LinearOperatorCirculant2D(spectrum, is_self_adjoint=False)
def test_real_spectrum_auto_sets_is_self_adjoint_to_true(self):
spectrum = [[1., 2.], [3., 4]]
operator = linalg.LinearOperatorCirculant2D(spectrum)
self.assertTrue(operator.is_self_adjoint)
def test_invalid_rank_raises(self):
spectrum = array_ops.constant(np.float32(rng.rand(2)))
with self.assertRaisesRegexp(ValueError, "must have at least 2 dimensions"):
linalg.LinearOperatorCirculant2D(spectrum)
class LinearOperatorCirculant3DTest(test.TestCase):
"""Simple test of the 3D case. See also the 1D and 2D tests."""
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""We overwrite the FFT operation mapping for testing."""
with test.TestCase._constrain_devices_and_set_default(
self, sess, use_gpu, force_gpu) as sess:
with spectral_ops_test_util.fft_kernel_label_map():
yield sess
@test_util.run_deprecated_v1
def test_real_spectrum_gives_self_adjoint_operator(self):
with self.cached_session():
# This is a real and hermitian spectrum.
spectrum = linear_operator_test_util.random_normal(
shape=(2, 2, 3, 5), dtype=dtypes.float32)
operator = linalg.LinearOperatorCirculant3D(spectrum)
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), operator.shape)
matrix_tensor = operator.to_dense()
self.assertEqual(matrix_tensor.dtype, dtypes.complex64)
matrix_h = linalg.adjoint(matrix_tensor)
matrix, matrix_h = self.evaluate([matrix_tensor, matrix_h])
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), matrix.shape)
self.assertAllClose(matrix, matrix_h)
@test_util.run_deprecated_v1
def test_defining_operator_using_real_convolution_kernel(self):
with self.cached_session():
convolution_kernel = linear_operator_test_util.random_normal(
shape=(2, 2, 3, 5), dtype=dtypes.float32)
# Convolution kernel is real ==> spectrum is Hermitian.
spectrum = fft_ops.fft3d(
math_ops.cast(convolution_kernel, dtypes.complex64))
# spectrum is Hermitian ==> operator is real.
operator = linalg.LinearOperatorCirculant3D(spectrum)
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), operator.shape)
# Allow for complex output so we can make sure it has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix = operator.to_dense().eval()
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), matrix.shape)
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
@test_util.run_deprecated_v1
def test_defining_spd_operator_by_taking_real_part(self):
with self.cached_session() as sess:
# S is real and positive.
s = linear_operator_test_util.random_uniform(
shape=(10, 2, 3, 4), dtype=dtypes.float32, minval=1., maxval=2.)
# Let S = S1 + S2, the Hermitian and anti-hermitian parts.
# S1 = 0.5 * (S + S^H), S2 = 0.5 * (S - S^H),
# where ^H is the Hermitian transpose of the function:
# f(n0, n1, n2)^H := ComplexConjugate[f(N0-n0, N1-n1, N2-n2)].
# We want to isolate S1, since
# S1 is Hermitian by construction
# S1 is real since S is
# S1 is positive since it is the sum of two positive kernels
# IDFT[S] = IDFT[S1] + IDFT[S2]
# = H1 + H2
# where H1 is real since it is Hermitian,
# and H2 is imaginary since it is anti-Hermitian.
ifft_s = fft_ops.ifft3d(math_ops.cast(s, dtypes.complex64))
# Throw away H2, keep H1.
real_ifft_s = math_ops.real(ifft_s)
# This is the perfect spectrum!
# spectrum = DFT[H1]
# = S1,
fft_real_ifft_s = fft_ops.fft3d(
math_ops.cast(real_ifft_s, dtypes.complex64))
# S1 is Hermitian ==> operator is real.
# S1 is real ==> operator is self-adjoint.
# S1 is positive ==> operator is positive-definite.
operator = linalg.LinearOperatorCirculant3D(fft_real_ifft_s)
# Allow for complex output so we can check operator has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix, matrix_t = sess.run([
operator.to_dense(),
array_ops.matrix_transpose(operator.to_dense())
])
operator.assert_positive_definite().run() # Should not fail.
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
self.assertAllClose(matrix, matrix_t)
# Just to test the theory, get S2 as well.
# This should create an imaginary operator.
# S2 is anti-Hermitian ==> operator is imaginary.
# S2 is real ==> operator is self-adjoint.
imag_ifft_s = math_ops.imag(ifft_s)
fft_imag_ifft_s = fft_ops.fft3d(
1j * math_ops.cast(imag_ifft_s, dtypes.complex64))
operator_imag = linalg.LinearOperatorCirculant3D(fft_imag_ifft_s)
matrix, matrix_h = sess.run([
operator_imag.to_dense(),
array_ops.matrix_transpose(math_ops.conj(operator_imag.to_dense()))
])
self.assertAllClose(matrix, matrix_h)
np.testing.assert_allclose(0, np.real(matrix), atol=1e-7)
if __name__ == "__main__":
linear_operator_test_util.add_tests(
LinearOperatorCirculantTestSelfAdjointOperator)
linear_operator_test_util.add_tests(
LinearOperatorCirculantTestHermitianSpectrum)
linear_operator_test_util.add_tests(
LinearOperatorCirculantTestNonHermitianSpectrum)
linear_operator_test_util.add_tests(
LinearOperatorCirculant2DTestHermitianSpectrum)
linear_operator_test_util.add_tests(
LinearOperatorCirculant2DTestNonHermitianSpectrum)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for registration mechanisms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops.linalg import cholesky_registrations # pylint: disable=unused-import
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import matmul_registrations # pylint: disable=unused-import
from tensorflow.python.ops.linalg import solve_registrations # pylint: disable=unused-import
from tensorflow.python.platform import test
# pylint: disable=protected-access
_ADJOINTS = linear_operator_algebra._ADJOINTS
_registered_adjoint = linear_operator_algebra._registered_adjoint
_CHOLESKY_DECOMPS = linear_operator_algebra._CHOLESKY_DECOMPS
_registered_cholesky = linear_operator_algebra._registered_cholesky
_INVERSES = linear_operator_algebra._INVERSES
_registered_inverse = linear_operator_algebra._registered_inverse
_MATMUL = linear_operator_algebra._MATMUL
_registered_matmul = linear_operator_algebra._registered_matmul
_SOLVE = linear_operator_algebra._SOLVE
_registered_solve = linear_operator_algebra._registered_solve
# pylint: enable=protected-access
class AdjointTest(test.TestCase):
def testRegistration(self):
class CustomLinOp(linear_operator.LinearOperator):
def _matmul(self, a):
pass
def _shape(self):
return tensor_shape.TensorShape([1, 1])
def _shape_tensor(self):
pass
# Register Adjoint to a lambda that spits out the name parameter
@linear_operator_algebra.RegisterAdjoint(CustomLinOp)
def _adjoint(a): # pylint: disable=unused-argument,unused-variable
return "OK"
self.assertEqual("OK", CustomLinOp(dtype=None).adjoint())
def testRegistrationFailures(self):
class CustomLinOp(linear_operator.LinearOperator):
pass
with self.assertRaisesRegexp(TypeError, "must be callable"):
linear_operator_algebra.RegisterAdjoint(CustomLinOp)("blah")
# First registration is OK
linear_operator_algebra.RegisterAdjoint(CustomLinOp)(lambda a: None)
# Second registration fails
with self.assertRaisesRegexp(ValueError, "has already been registered"):
linear_operator_algebra.RegisterAdjoint(CustomLinOp)(lambda a: None)
def testExactAdjointRegistrationsAllMatch(self):
for (k, v) in _ADJOINTS.items():
self.assertEqual(v, _registered_adjoint(k[0]))
class CholeskyTest(test.TestCase):
def testRegistration(self):
class CustomLinOp(linear_operator.LinearOperator):
def _matmul(self, a):
pass
def _shape(self):
return tensor_shape.TensorShape([1, 1])
def _shape_tensor(self):
pass
# Register Cholesky to a lambda that spits out the name parameter
@linear_operator_algebra.RegisterCholesky(CustomLinOp)
def _cholesky(a): # pylint: disable=unused-argument,unused-variable
return "OK"
with self.assertRaisesRegexp(ValueError, "positive definite"):
CustomLinOp(dtype=None, is_self_adjoint=True).cholesky()
with self.assertRaisesRegexp(ValueError, "self adjoint"):
CustomLinOp(dtype=None, is_positive_definite=True).cholesky()
custom_linop = CustomLinOp(
dtype=None, is_self_adjoint=True, is_positive_definite=True)
self.assertEqual("OK", custom_linop.cholesky())
def testRegistrationFailures(self):
class CustomLinOp(linear_operator.LinearOperator):
pass
with self.assertRaisesRegexp(TypeError, "must be callable"):
linear_operator_algebra.RegisterCholesky(CustomLinOp)("blah")
# First registration is OK
linear_operator_algebra.RegisterCholesky(CustomLinOp)(lambda a: None)
# Second registration fails
with self.assertRaisesRegexp(ValueError, "has already been registered"):
linear_operator_algebra.RegisterCholesky(CustomLinOp)(lambda a: None)
def testExactCholeskyRegistrationsAllMatch(self):
for (k, v) in _CHOLESKY_DECOMPS.items():
self.assertEqual(v, _registered_cholesky(k[0]))
class MatmulTest(test.TestCase):
def testRegistration(self):
class CustomLinOp(linear_operator.LinearOperator):
def _matmul(self, a):
pass
def _shape(self):
return tensor_shape.TensorShape([1, 1])
def _shape_tensor(self):
pass
# Register Matmul to a lambda that spits out the name parameter
@linear_operator_algebra.RegisterMatmul(CustomLinOp, CustomLinOp)
def _matmul(a, b): # pylint: disable=unused-argument,unused-variable
return "OK"
custom_linop = CustomLinOp(
dtype=None, is_self_adjoint=True, is_positive_definite=True)
self.assertEqual("OK", custom_linop.matmul(custom_linop))
def testRegistrationFailures(self):
class CustomLinOp(linear_operator.LinearOperator):
pass
with self.assertRaisesRegexp(TypeError, "must be callable"):
linear_operator_algebra.RegisterMatmul(CustomLinOp, CustomLinOp)("blah")
# First registration is OK
linear_operator_algebra.RegisterMatmul(
CustomLinOp, CustomLinOp)(lambda a: None)
# Second registration fails
with self.assertRaisesRegexp(ValueError, "has already been registered"):
linear_operator_algebra.RegisterMatmul(
CustomLinOp, CustomLinOp)(lambda a: None)
def testExactMatmulRegistrationsAllMatch(self):
for (k, v) in _MATMUL.items():
self.assertEqual(v, _registered_matmul(k[0], k[1]))
class SolveTest(test.TestCase):
def testRegistration(self):
class CustomLinOp(linear_operator.LinearOperator):
def _matmul(self, a):
pass
def _solve(self, a):
pass
def _shape(self):
return tensor_shape.TensorShape([1, 1])
def _shape_tensor(self):
pass
# Register Solve to a lambda that spits out the name parameter
@linear_operator_algebra.RegisterSolve(CustomLinOp, CustomLinOp)
def _solve(a, b): # pylint: disable=unused-argument,unused-variable
return "OK"
custom_linop = CustomLinOp(
dtype=None, is_self_adjoint=True, is_positive_definite=True)
self.assertEqual("OK", custom_linop.solve(custom_linop))
def testRegistrationFailures(self):
class CustomLinOp(linear_operator.LinearOperator):
pass
with self.assertRaisesRegexp(TypeError, "must be callable"):
linear_operator_algebra.RegisterSolve(CustomLinOp, CustomLinOp)("blah")
# First registration is OK
linear_operator_algebra.RegisterSolve(
CustomLinOp, CustomLinOp)(lambda a: None)
# Second registration fails
with self.assertRaisesRegexp(ValueError, "has already been registered"):
linear_operator_algebra.RegisterSolve(
CustomLinOp, CustomLinOp)(lambda a: None)
def testExactSolveRegistrationsAllMatch(self):
for (k, v) in _SOLVE.items():
self.assertEqual(v, _registered_solve(k[0], k[1]))
class InverseTest(test.TestCase):
def testRegistration(self):
class CustomLinOp(linear_operator.LinearOperator):
def _matmul(self, a):
pass
def _shape(self):
return tensor_shape.TensorShape([1, 1])
def _shape_tensor(self):
pass
# Register Inverse to a lambda that spits out the name parameter
@linear_operator_algebra.RegisterInverse(CustomLinOp)
def _inverse(a): # pylint: disable=unused-argument,unused-variable
return "OK"
with self.assertRaisesRegexp(ValueError, "singular"):
CustomLinOp(dtype=None, is_non_singular=False).inverse()
self.assertEqual("OK", CustomLinOp(
dtype=None, is_non_singular=True).inverse())
def testRegistrationFailures(self):
class CustomLinOp(linear_operator.LinearOperator):
pass
with self.assertRaisesRegexp(TypeError, "must be callable"):
linear_operator_algebra.RegisterInverse(CustomLinOp)("blah")
# First registration is OK
linear_operator_algebra.RegisterInverse(CustomLinOp)(lambda a: None)
# Second registration fails
with self.assertRaisesRegexp(ValueError, "has already been registered"):
linear_operator_algebra.RegisterInverse(CustomLinOp)(lambda a: None)
def testExactRegistrationsAllMatch(self):
for (k, v) in _INVERSES.items():
self.assertEqual(v, _registered_inverse(k[0]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_algebra_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
class LinearOperatorLowerTriangularTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
@staticmethod
def tests_to_skip():
# Cholesky does not make sense for triangular matrices.
return ["cholesky"]
def operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
# Upper triangle will be nonzero, but ignored.
# Use a diagonal that ensures this matrix is well conditioned.
tril = linear_operator_test_util.random_tril_matrix(
shape, dtype=dtype, force_well_conditioned=True, remove_upper=False)
lin_op_tril = tril
if use_placeholder:
lin_op_tril = array_ops.placeholder_with_default(lin_op_tril, shape=None)
operator = linalg.LinearOperatorLowerTriangular(lin_op_tril)
matrix = array_ops.matrix_band_part(tril, -1, 0)
return operator, matrix
def test_assert_non_singular(self):
# Singlular matrix with one positive eigenvalue and one zero eigenvalue.
with self.cached_session():
tril = [[1., 0.], [1., 0.]]
operator = linalg.LinearOperatorLowerTriangular(tril)
with self.assertRaisesOpError("Singular operator"):
operator.assert_non_singular().run()
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
tril = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorLowerTriangular(
tril,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_tril_must_have_at_least_two_dims_or_raises(self):
with self.assertRaisesRegexp(ValueError, "at least 2 dimensions"):
linalg.LinearOperatorLowerTriangular([1.])
def test_triangular_diag_matmul(self):
operator1 = linalg_lib.LinearOperatorLowerTriangular(
[[1., 0., 0.], [2., 1., 0.], [2., 3., 3.]])
operator2 = linalg_lib.LinearOperatorDiag([2., 2., 3.])
operator_matmul = operator1.matmul(operator2)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorLowerTriangular))
self.assertAllClose(
math_ops.matmul(
operator1.to_dense(),
operator2.to_dense()),
self.evaluate(operator_matmul.to_dense()))
operator_matmul = operator2.matmul(operator1)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorLowerTriangular))
self.assertAllClose(
math_ops.matmul(
operator2.to_dense(),
operator1.to_dense()),
self.evaluate(operator_matmul.to_dense()))
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorLowerTriangularTest)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
rng = np.random.RandomState(2016)
class LinearOperatorIdentityTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
@staticmethod
def dtypes_to_test():
# TODO(langmore) Test tf.float16 once tf.linalg.solve works in
# 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
# Identity matrix is already Hermitian Positive Definite.
del ensure_self_adjoint_and_pd
shape = list(build_info.shape)
assert shape[-1] == shape[-2]
batch_shape = shape[:-2]
num_rows = shape[-1]
operator = linalg_lib.LinearOperatorIdentity(
num_rows, batch_shape=batch_shape, dtype=dtype)
mat = linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=dtype)
return operator, mat
@test_util.run_deprecated_v1
def test_assert_positive_definite(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorIdentity(num_rows=2)
operator.assert_positive_definite().run() # Should not fail
@test_util.run_deprecated_v1
def test_assert_non_singular(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorIdentity(num_rows=2)
operator.assert_non_singular().run() # Should not fail
@test_util.run_deprecated_v1
def test_assert_self_adjoint(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorIdentity(num_rows=2)
operator.assert_self_adjoint().run() # Should not fail
def test_float16_matmul(self):
# float16 cannot be tested by base test class because tf.linalg.solve does
# not work with float16.
with self.cached_session():
operator = linalg_lib.LinearOperatorIdentity(
num_rows=2, dtype=dtypes.float16)
x = rng.randn(2, 3).astype(np.float16)
y = operator.matmul(x)
self.assertAllClose(x, self.evaluate(y))
def test_non_scalar_num_rows_raises_static(self):
with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
linalg_lib.LinearOperatorIdentity(num_rows=[2])
def test_non_integer_num_rows_raises_static(self):
with self.assertRaisesRegexp(TypeError, "must be integer"):
linalg_lib.LinearOperatorIdentity(num_rows=2.)
def test_negative_num_rows_raises_static(self):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
linalg_lib.LinearOperatorIdentity(num_rows=-2)
def test_non_1d_batch_shape_raises_static(self):
with self.assertRaisesRegexp(ValueError, "must be a 1-D"):
linalg_lib.LinearOperatorIdentity(num_rows=2, batch_shape=2)
def test_non_integer_batch_shape_raises_static(self):
with self.assertRaisesRegexp(TypeError, "must be integer"):
linalg_lib.LinearOperatorIdentity(num_rows=2, batch_shape=[2.])
def test_negative_batch_shape_raises_static(self):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
linalg_lib.LinearOperatorIdentity(num_rows=2, batch_shape=[-2])
@test_util.run_deprecated_v1
def test_non_scalar_num_rows_raises_dynamic(self):
with self.cached_session():
num_rows = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorIdentity(
num_rows, assert_proper_shapes=True)
with self.assertRaisesOpError("must be a 0-D Tensor"):
operator.to_dense().eval(feed_dict={num_rows: [2]})
@test_util.run_deprecated_v1
def test_negative_num_rows_raises_dynamic(self):
with self.cached_session():
num_rows = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorIdentity(
num_rows, assert_proper_shapes=True)
with self.assertRaisesOpError("must be non-negative"):
operator.to_dense().eval(feed_dict={num_rows: -2})
@test_util.run_deprecated_v1
def test_non_1d_batch_shape_raises_dynamic(self):
with self.cached_session():
batch_shape = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorIdentity(
num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True)
with self.assertRaisesOpError("must be a 1-D"):
operator.to_dense().eval(feed_dict={batch_shape: 2})
@test_util.run_deprecated_v1
def test_negative_batch_shape_raises_dynamic(self):
with self.cached_session():
batch_shape = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorIdentity(
num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True)
with self.assertRaisesOpError("must be non-negative"):
operator.to_dense().eval(feed_dict={batch_shape: [-2]})
def test_wrong_matrix_dimensions_raises_static(self):
operator = linalg_lib.LinearOperatorIdentity(num_rows=2)
x = rng.randn(3, 3).astype(np.float32)
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
operator.matmul(x)
@test_util.run_deprecated_v1
def test_wrong_matrix_dimensions_raises_dynamic(self):
num_rows = array_ops.placeholder(dtypes.int32)
x = array_ops.placeholder(dtypes.float32)
with self.cached_session():
operator = linalg_lib.LinearOperatorIdentity(
num_rows, assert_proper_shapes=True)
y = operator.matmul(x)
with self.assertRaisesOpError("Incompatible.*dimensions"):
y.eval(feed_dict={num_rows: 2, x: rng.rand(3, 3)})
def test_default_batch_shape_broadcasts_with_everything_static(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.cached_session() as sess:
x = random_ops.random_normal(shape=(1, 2, 3, 4))
operator = linalg_lib.LinearOperatorIdentity(num_rows=3, dtype=x.dtype)
operator_matmul = operator.matmul(x)
expected = x
self.assertAllEqual(operator_matmul.get_shape(), expected.get_shape())
self.assertAllClose(*self.evaluate([operator_matmul, expected]))
@test_util.run_deprecated_v1
def test_default_batch_shape_broadcasts_with_everything_dynamic(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
operator = linalg_lib.LinearOperatorIdentity(num_rows=3, dtype=x.dtype)
operator_matmul = operator.matmul(x)
expected = x
feed_dict = {x: rng.randn(1, 2, 3, 4)}
self.assertAllClose(
*sess.run([operator_matmul, expected], feed_dict=feed_dict))
def test_broadcast_matmul_static_shapes(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.cached_session() as sess:
# Given this x and LinearOperatorIdentity shape of (2, 1, 3, 3), the
# broadcast shape of operator and 'x' is (2, 2, 3, 4)
x = random_ops.random_normal(shape=(1, 2, 3, 4))
operator = linalg_lib.LinearOperatorIdentity(
num_rows=3, batch_shape=(2, 1), dtype=x.dtype)
# Batch matrix of zeros with the broadcast shape of x and operator.
zeros = array_ops.zeros(shape=(2, 2, 3, 4), dtype=x.dtype)
# Expected result of matmul and solve.
expected = x + zeros
operator_matmul = operator.matmul(x)
self.assertAllEqual(operator_matmul.get_shape(), expected.get_shape())
self.assertAllClose(*self.evaluate([operator_matmul, expected]))
@test_util.run_deprecated_v1
def test_broadcast_matmul_dynamic_shapes(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.cached_session() as sess:
# Given this x and LinearOperatorIdentity shape of (2, 1, 3, 3), the
# broadcast shape of operator and 'x' is (2, 2, 3, 4)
x = array_ops.placeholder(dtypes.float32)
num_rows = array_ops.placeholder(dtypes.int32)
batch_shape = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorIdentity(
num_rows, batch_shape=batch_shape)
feed_dict = {x: rng.rand(1, 2, 3, 4), num_rows: 3, batch_shape: (2, 1)}
# Batch matrix of zeros with the broadcast shape of x and operator.
zeros = array_ops.zeros(shape=(2, 2, 3, 4), dtype=x.dtype)
# Expected result of matmul and solve.
expected = x + zeros
operator_matmul = operator.matmul(x)
self.assertAllClose(
*sess.run([operator_matmul, expected], feed_dict=feed_dict))
def test_is_x_flags(self):
# The is_x flags are by default all True.
operator = linalg_lib.LinearOperatorIdentity(num_rows=2)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertTrue(operator.is_self_adjoint)
# Any of them False raises because the identity is always self-adjoint etc..
with self.assertRaisesRegexp(ValueError, "is always non-singular"):
operator = linalg_lib.LinearOperatorIdentity(
num_rows=2,
is_non_singular=None,
)
def test_identity_adjoint_type(self):
operator = linalg_lib.LinearOperatorIdentity(
num_rows=2, is_non_singular=True)
self.assertIsInstance(
operator.adjoint(), linalg_lib.LinearOperatorIdentity)
def test_identity_cholesky_type(self):
operator = linalg_lib.LinearOperatorIdentity(
num_rows=2,
is_positive_definite=True,
is_self_adjoint=True,
)
self.assertIsInstance(
operator.cholesky(), linalg_lib.LinearOperatorIdentity)
def test_identity_inverse_type(self):
operator = linalg_lib.LinearOperatorIdentity(
num_rows=2, is_non_singular=True)
self.assertIsInstance(
operator.inverse(), linalg_lib.LinearOperatorIdentity)
class LinearOperatorScaledIdentityTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
@staticmethod
def dtypes_to_test():
# TODO(langmore) Test tf.float16 once tf.linalg.solve works in
# 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
assert shape[-1] == shape[-2]
batch_shape = shape[:-2]
num_rows = shape[-1]
# Uniform values that are at least length 1 from the origin. Allows the
# operator to be well conditioned.
# Shape batch_shape
multiplier = linear_operator_test_util.random_sign_uniform(
shape=batch_shape, minval=1., maxval=2., dtype=dtype)
if ensure_self_adjoint_and_pd:
# Abs on complex64 will result in a float32, so we cast back up.
multiplier = math_ops.cast(math_ops.abs(multiplier), dtype=dtype)
# Nothing to feed since LinearOperatorScaledIdentity takes no Tensor args.
lin_op_multiplier = multiplier
if use_placeholder:
lin_op_multiplier = array_ops.placeholder_with_default(
multiplier, shape=None)
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows,
lin_op_multiplier,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
multiplier_matrix = array_ops.expand_dims(
array_ops.expand_dims(multiplier, -1), -1)
matrix = multiplier_matrix * linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=dtype)
return operator, matrix
@test_util.run_deprecated_v1
def test_assert_positive_definite_does_not_raise_when_positive(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=1.)
operator.assert_positive_definite().run() # Should not fail
def test_assert_positive_definite_raises_when_negative(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=-1.)
with self.assertRaisesOpError("not positive definite"):
operator.assert_positive_definite().run()
@test_util.run_deprecated_v1
def test_assert_non_singular_does_not_raise_when_non_singular(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[1., 2., 3.])
operator.assert_non_singular().run() # Should not fail
def test_assert_non_singular_raises_when_singular(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[1., 2., 0.])
with self.assertRaisesOpError("was singular"):
operator.assert_non_singular().run()
@test_util.run_deprecated_v1
def test_assert_self_adjoint_does_not_raise_when_self_adjoint(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[1. + 0J])
operator.assert_self_adjoint().run() # Should not fail
def test_assert_self_adjoint_raises_when_not_self_adjoint(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[1. + 1J])
with self.assertRaisesOpError("not self-adjoint"):
operator.assert_self_adjoint().run()
def test_float16_matmul(self):
# float16 cannot be tested by base test class because tf.linalg.solve does
# not work with float16.
with self.cached_session():
multiplier = rng.rand(3).astype(np.float16)
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=multiplier)
x = rng.randn(2, 3).astype(np.float16)
y = operator.matmul(x)
self.assertAllClose(multiplier[..., None, None] * x, self.evaluate(y))
def test_non_scalar_num_rows_raises_static(self):
# Many "test_...num_rows" tests are performed in LinearOperatorIdentity.
with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
linalg_lib.LinearOperatorScaledIdentity(
num_rows=[2], multiplier=123.)
def test_wrong_matrix_dimensions_raises_static(self):
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=2.2)
x = rng.randn(3, 3).astype(np.float32)
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
operator.matmul(x)
@test_util.run_deprecated_v1
def test_wrong_matrix_dimensions_raises_dynamic(self):
num_rows = array_ops.placeholder(dtypes.int32)
x = array_ops.placeholder(dtypes.float32)
with self.cached_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows, multiplier=[1., 2], assert_proper_shapes=True)
y = operator.matmul(x)
with self.assertRaisesOpError("Incompatible.*dimensions"):
y.eval(feed_dict={num_rows: 2, x: rng.rand(3, 3)})
def test_broadcast_matmul_and_solve(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.cached_session() as sess:
# Given this x and LinearOperatorScaledIdentity shape of (2, 1, 3, 3), the
# broadcast shape of operator and 'x' is (2, 2, 3, 4)
x = random_ops.random_normal(shape=(1, 2, 3, 4))
# operator is 2.2 * identity (with a batch shape).
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=3, multiplier=2.2 * array_ops.ones((2, 1)))
# Batch matrix of zeros with the broadcast shape of x and operator.
zeros = array_ops.zeros(shape=(2, 2, 3, 4), dtype=x.dtype)
# Test matmul
expected = x * 2.2 + zeros
operator_matmul = operator.matmul(x)
self.assertAllEqual(operator_matmul.get_shape(), expected.get_shape())
self.assertAllClose(*self.evaluate([operator_matmul, expected]))
# Test solve
expected = x / 2.2 + zeros
operator_solve = operator.solve(x)
self.assertAllEqual(operator_solve.get_shape(), expected.get_shape())
self.assertAllClose(*self.evaluate([operator_solve, expected]))
def test_broadcast_matmul_and_solve_scalar_scale_multiplier(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.cached_session() as sess:
# Given this x and LinearOperatorScaledIdentity shape of (3, 3), the
# broadcast shape of operator and 'x' is (1, 2, 3, 4), which is the same
# shape as x.
x = random_ops.random_normal(shape=(1, 2, 3, 4))
# operator is 2.2 * identity (with a batch shape).
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=3, multiplier=2.2)
# Test matmul
expected = x * 2.2
operator_matmul = operator.matmul(x)
self.assertAllEqual(operator_matmul.get_shape(), expected.get_shape())
self.assertAllClose(*self.evaluate([operator_matmul, expected]))
# Test solve
expected = x / 2.2
operator_solve = operator.solve(x)
self.assertAllEqual(operator_solve.get_shape(), expected.get_shape())
self.assertAllClose(*self.evaluate([operator_solve, expected]))
def test_is_x_flags(self):
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=1.,
is_positive_definite=False, is_non_singular=True)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertTrue(operator.is_self_adjoint) # Auto-set due to real multiplier
def test_identity_matmul(self):
operator1 = linalg_lib.LinearOperatorIdentity(num_rows=2)
operator2 = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=3.)
self.assertTrue(isinstance(
operator1.matmul(operator1),
linalg_lib.LinearOperatorIdentity))
self.assertTrue(isinstance(
operator1.matmul(operator1),
linalg_lib.LinearOperatorIdentity))
operator_matmul = operator1.matmul(operator2)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorScaledIdentity))
self.assertAllClose(3., self.evaluate(operator_matmul.multiplier))
operator_matmul = operator2.matmul(operator1)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorScaledIdentity))
self.assertAllClose(3., self.evaluate(operator_matmul.multiplier))
def test_identity_solve(self):
operator1 = linalg_lib.LinearOperatorIdentity(num_rows=2)
operator2 = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=3.)
self.assertTrue(isinstance(
operator1.solve(operator1),
linalg_lib.LinearOperatorIdentity))
operator_solve = operator1.solve(operator2)
self.assertTrue(isinstance(
operator_solve,
linalg_lib.LinearOperatorScaledIdentity))
self.assertAllClose(3., self.evaluate(operator_solve.multiplier))
def test_scaled_identity_cholesky_type(self):
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2,
multiplier=3.,
is_positive_definite=True,
is_self_adjoint=True,
)
self.assertIsInstance(
operator.cholesky(),
linalg_lib.LinearOperatorScaledIdentity)
def test_scaled_identity_inverse_type(self):
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2,
multiplier=3.,
is_non_singular=True,
)
self.assertIsInstance(
operator.inverse(),
linalg_lib.LinearOperatorScaledIdentity)
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorIdentityTest)
linear_operator_test_util.add_tests(LinearOperatorScaledIdentityTest)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_householder as householder
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
class LinearOperatorHouseholderTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
return [
shape_info((1, 1)),
shape_info((1, 3, 3)),
shape_info((3, 4, 4)),
shape_info((2, 1, 4, 4))]
@staticmethod
def tests_to_skip():
# This linear operator is never positive definite.
return ["cholesky"]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
reflection_axis = linear_operator_test_util.random_sign_uniform(
shape[:-1], minval=1., maxval=2., dtype=dtype)
# Make sure unit norm.
reflection_axis = reflection_axis / linalg_ops.norm(
reflection_axis, axis=-1, keepdims=True)
lin_op_reflection_axis = reflection_axis
if use_placeholder:
lin_op_reflection_axis = array_ops.placeholder_with_default(
reflection_axis, shape=None)
operator = householder.LinearOperatorHouseholder(lin_op_reflection_axis)
mat = reflection_axis[..., array_ops.newaxis]
matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True)
matrix = array_ops.matrix_set_diag(
matrix, 1. + array_ops.matrix_diag_part(matrix))
return operator, matrix
def test_scalar_reflection_axis_raises(self):
with self.assertRaisesRegexp(ValueError, "must have at least 1 dimension"):
householder.LinearOperatorHouseholder(1.)
def test_householder_adjoint_type(self):
reflection_axis = [1., 3., 5., 8.]
operator = householder.LinearOperatorHouseholder(reflection_axis)
self.assertIsInstance(
operator.adjoint(), householder.LinearOperatorHouseholder)
def test_householder_inverse_type(self):
reflection_axis = [1., 3., 5., 8.]
operator = householder.LinearOperatorHouseholder(reflection_axis)
self.assertIsInstance(
operator.inverse(), householder.LinearOperatorHouseholder)
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorHouseholderTest)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_householder_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
class SquareLinearOperatorCompositionTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
# Increase from 1e-6 to 1e-4
self._atol[dtypes.float32] = 1e-4
self._atol[dtypes.complex64] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
@staticmethod
def tests_to_skip():
# Cholesky not implemented.
return ["cholesky"]
def operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
# Either 1 or 2 matrices, depending.
num_operators = rng.randint(low=1, high=3)
matrices = [
linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
for _ in range(num_operators)
]
lin_op_matrices = matrices
if use_placeholder:
lin_op_matrices = [
array_ops.placeholder_with_default(
matrix, shape=None) for matrix in matrices]
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorFullMatrix(l) for l in lin_op_matrices],
is_square=True)
matmul_order_list = list(reversed(matrices))
mat = matmul_order_list[0]
for other_mat in matmul_order_list[1:]:
mat = math_ops.matmul(other_mat, mat)
return operator, mat
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorFullMatrix(matrix)],
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_is_non_singular_auto_set(self):
# Matrix with two positive eigenvalues, 11 and 8.
# The matrix values do not effect auto-setting of the flags.
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_2 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator = linalg.LinearOperatorComposition(
[operator_1, operator_2],
is_positive_definite=False, # No reason it HAS to be False...
is_non_singular=None)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
with self.assertRaisesRegexp(ValueError, "always non-singular"):
linalg.LinearOperatorComposition(
[operator_1, operator_2], is_non_singular=False)
def test_name(self):
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, name="left")
operator_2 = linalg.LinearOperatorFullMatrix(matrix, name="right")
operator = linalg.LinearOperatorComposition([operator_1, operator_2])
self.assertEqual("left_o_right", operator.name)
def test_different_dtypes_raises(self):
operators = [
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))
]
with self.assertRaisesRegexp(TypeError, "same dtype"):
linalg.LinearOperatorComposition(operators)
def test_empty_operators_raises(self):
with self.assertRaisesRegexp(ValueError, "non-empty"):
linalg.LinearOperatorComposition([])
class NonSquareLinearOperatorCompositionTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
# Increase from 1e-6 to 1e-4
self._atol[dtypes.float32] = 1e-4
self._atol[dtypes.complex64] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
def operator_and_matrix(self, build_info, dtype, use_placeholder):
sess = ops.get_default_session()
shape = list(build_info.shape)
# Test only the case of 2 matrices.
# The Square test uses either 1 or 2, so we have tested the case of 1 matrix
# sufficiently.
num_operators = 2
# Create 2 matrices/operators, A1, A2, which becomes A = A1 A2.
# Use inner dimension of 2.
k = 2
batch_shape = shape[:-2]
shape_1 = batch_shape + [shape[-2], k]
shape_2 = batch_shape + [k, shape[-1]]
matrices = [
linear_operator_test_util.random_normal(
shape_1, dtype=dtype), linear_operator_test_util.random_normal(
shape_2, dtype=dtype)
]
lin_op_matrices = matrices
if use_placeholder:
lin_op_matrices = [
array_ops.placeholder_with_default(
matrix, shape=None) for matrix in matrices]
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorFullMatrix(l) for l in lin_op_matrices])
matmul_order_list = list(reversed(matrices))
mat = matmul_order_list[0]
for other_mat in matmul_order_list[1:]:
mat = math_ops.matmul(other_mat, mat)
return operator, mat
@test_util.run_deprecated_v1
def test_static_shapes(self):
operators = [
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 4)),
linalg.LinearOperatorFullMatrix(rng.rand(2, 4, 5))
]
operator = linalg.LinearOperatorComposition(operators)
self.assertAllEqual((2, 3, 5), operator.shape)
@test_util.run_deprecated_v1
def test_shape_tensors_when_statically_available(self):
operators = [
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 4)),
linalg.LinearOperatorFullMatrix(rng.rand(2, 4, 5))
]
operator = linalg.LinearOperatorComposition(operators)
with self.cached_session():
self.assertAllEqual((2, 3, 5), operator.shape_tensor().eval())
@test_util.run_deprecated_v1
def test_shape_tensors_when_only_dynamically_available(self):
mat_1 = rng.rand(1, 2, 3, 4)
mat_2 = rng.rand(1, 2, 4, 5)
mat_ph_1 = array_ops.placeholder(dtypes.float64)
mat_ph_2 = array_ops.placeholder(dtypes.float64)
feed_dict = {mat_ph_1: mat_1, mat_ph_2: mat_2}
operators = [
linalg.LinearOperatorFullMatrix(mat_ph_1),
linalg.LinearOperatorFullMatrix(mat_ph_2)
]
operator = linalg.LinearOperatorComposition(operators)
with self.cached_session():
self.assertAllEqual(
(1, 2, 3, 5), operator.shape_tensor().eval(feed_dict=feed_dict))
if __name__ == "__main__":
linear_operator_test_util.add_tests(SquareLinearOperatorCompositionTest)
linear_operator_test_util.add_tests(NonSquareLinearOperatorCompositionTest)
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg/linear_operator_composition_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.