python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# -*- coding: utf-8 -*-
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for py_func op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import re
import numpy as np
from six.moves import queue
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
def np_func(x, y):
return np.sinh(x) + np.cosh(y)
def matmul(x, y):
return math_ops.matmul(x, y)
class PyFuncTest(test.TestCase):
"""Encapsulates tests for py_func and eager_py_func."""
# ----- Tests for py_func -----
def testRealDataTypes(self):
def sum_func(x, y):
return x + y
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.uint8, dtypes.int8, dtypes.uint16, dtypes.int16,
dtypes.int32, dtypes.int64]:
with self.cached_session():
x = constant_op.constant(1, dtype=dtype)
y = constant_op.constant(2, dtype=dtype)
z = self.evaluate(script_ops.py_func(sum_func, [x, y], dtype))
self.assertEqual(z, 3)
def testComplexDataTypes(self):
def sub_func(x, y):
return x - y
for dtype in [dtypes.complex64, dtypes.complex128]:
with self.cached_session():
x = constant_op.constant(1 + 1j, dtype=dtype)
y = constant_op.constant(2 - 2j, dtype=dtype)
z = self.evaluate(script_ops.py_func(sub_func, [x, y], dtype))
self.assertEqual(z, -1 + 3j)
def testBoolDataTypes(self):
def and_func(x, y):
return x and y
dtype = dtypes.bool
with self.cached_session():
x = constant_op.constant(True, dtype=dtype)
y = constant_op.constant(False, dtype=dtype)
z = self.evaluate(script_ops.py_func(and_func, [x, y], dtype))
self.assertEqual(z, False)
def testSingleType(self):
with self.cached_session():
x = constant_op.constant(1.0, dtypes.float32)
y = constant_op.constant(2.0, dtypes.float32)
z = self.evaluate(script_ops.py_func(np_func, [x, y], dtypes.float32))
self.assertEqual(z, np_func(1.0, 2.0).astype(np.float32))
def testScalar(self):
with self.cached_session():
x = constant_op.constant(1.0, dtypes.float32)
y = constant_op.constant(2.0, dtypes.float32)
z = self.evaluate(
script_ops.eager_py_func(np_func, [x, y], [dtypes.float32]))
self.assertEqual(z[0], np_func(1.0, 2.0).astype(np.float32))
@test_util.run_v1_only("b/120545219")
def testArray(self):
with self.cached_session():
x = constant_op.constant([1.0, 2.0], dtypes.float64)
y = constant_op.constant([2.0, 3.0], dtypes.float64)
z = self.evaluate(script_ops.py_func(np_func, [x, y], [dtypes.float64]))
self.assertAllEqual(z[0],
np_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
def testComplexType(self):
with self.cached_session():
x = constant_op.constant(1 + 2j, dtypes.complex64)
y = constant_op.constant(3 + 4j, dtypes.complex64)
z = self.evaluate(script_ops.py_func(np_func, [x, y], dtypes.complex64))
self.assertAllClose(z, np_func(1 + 2j, 3 + 4j))
def testRFFT(self):
with self.cached_session():
x = constant_op.constant([1., 2., 3., 4.], dtypes.float32)
def rfft(x):
return np.fft.rfft(x).astype(np.complex64)
y = self.evaluate(script_ops.py_func(rfft, [x], dtypes.complex64))
self.assertAllClose(y, np.fft.rfft([1., 2., 3., 4.]))
def testPythonLiteral(self):
with self.cached_session():
def literal(x):
return 1.0 if float(x) == 0.0 else 0.0
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(script_ops.py_func(literal, [x], dtypes.float64))
self.assertAllClose(y, 1.0)
def testList(self):
with self.cached_session():
def list_func(x):
return [x, x + 1]
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(
script_ops.py_func(list_func, [x], [dtypes.float64] * 2))
self.assertAllClose(y, [0.0, 1.0])
def testTuple(self):
# returns a tuple
with self.cached_session():
def tuple_func(x):
return x, x + 1
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(
script_ops.py_func(tuple_func, [x], [dtypes.float64] * 2))
self.assertAllClose(y, [0.0, 1.0])
# returns a tuple, Tout and inp a tuple
with self.cached_session():
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(
script_ops.py_func(tuple_func, (x,),
(dtypes.float64, dtypes.float64)))
self.assertAllClose(y, [0.0, 1.0])
@test_util.run_v1_only("b/120545219")
def testStrings(self):
def read_fixed_length_numpy_strings():
return np.array([b" there"])
def read_and_return_strings(x, y):
return x + y
with self.cached_session():
x = constant_op.constant([b"hello", b"hi"], dtypes.string)
y = self.evaluate(
script_ops.py_func(read_fixed_length_numpy_strings, [],
dtypes.string))
z = self.evaluate(
script_ops.py_func(read_and_return_strings, [x, y], dtypes.string))
self.assertAllEqual(z, [b"hello there", b"hi there"])
@test_util.run_v1_only("b/120545219")
def testStringsAreConvertedToBytes(self):
def read_fixed_length_numpy_strings():
return np.array([" there"])
def read_and_return_strings(x, y):
return x + y
with self.cached_session():
x = constant_op.constant(["hello", "hi"], dtypes.string)
y = self.evaluate(
script_ops.py_func(read_fixed_length_numpy_strings, [],
dtypes.string))
z = self.evaluate(
script_ops.py_func(read_and_return_strings, [x, y], dtypes.string))
self.assertAllEqual(z, [b"hello there", b"hi there"])
@test_util.run_v1_only("b/120545219")
def testObjectArraysAreConvertedToBytes(self):
def read_object_array():
return np.array([b" there", u" ya"], dtype=np.object)
def read_and_return_strings(x, y):
return x + y
with self.cached_session():
x = constant_op.constant(["hello", "hi"], dtypes.string)
y, = script_ops.py_func(read_object_array, [],
[dtypes.string])
z, = script_ops.py_func(read_and_return_strings, [x, y], [dtypes.string])
self.assertListEqual(list(z.eval()), [b"hello there", b"hi ya"])
@test_util.run_v1_only("b/120545219")
def testStringPadding(self):
correct = [b"this", b"is", b"a", b"test"]
with self.cached_session():
s, = script_ops.py_func(lambda: [correct], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
@test_util.run_v1_only("b/120545219")
def testStringPaddingAreConvertedToBytes(self):
inp = ["this", "is", "a", "test"]
correct = [b"this", b"is", b"a", b"test"]
with self.cached_session():
s, = script_ops.py_func(lambda: [inp], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
@test_util.run_v1_only("b/120545219")
def testNulTerminatedStrings(self):
inp = np.array(["this\0", "is\0\0", "a\0", "test\0\0"], dtype=np.str_)
correct = [b"this", b"is", b"a", b"test"]
with self.cached_session():
s, = script_ops.py_func(lambda: [inp], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
@test_util.run_v1_only("b/120545219")
def testLarge(self):
with self.cached_session() as sess:
x = array_ops.zeros([1000000], dtype=np.float32)
y = script_ops.py_func(lambda x: x + 1, [x], [dtypes.float32])
z = script_ops.py_func(lambda x: x * 2, [x], [dtypes.float32])
for _ in xrange(100):
sess.run([y[0].op, z[0].op])
def testNoInput(self):
with self.cached_session():
x = self.evaluate(script_ops.py_func(lambda: 42.0, [], dtypes.float64))
self.assertAllClose(x, 42.0)
@test_util.run_v1_only("b/120545219")
def testAlias(self):
with self.cached_session():
np_array = np.array([1.0, 2.0], dtype=np.float32)
tf_array = script_ops.py_func(lambda: np_array, [], [dtypes.float32])
value = tf_array + constant_op.constant([2.0, 3.0], dtype=dtypes.float32)
value.op.run()
self.assertAllEqual(np_array, [1.0, 2.0])
@test_util.run_v1_only("b/120545219")
def testReturnUnicodeString(self):
with self.cached_session():
correct = u"你好 世界"
def unicode_string():
return correct
z, = script_ops.py_func(unicode_string, [], [dtypes.string])
self.assertEqual(z.eval(), correct.encode("utf8"))
@test_util.run_v1_only("b/120545219")
def testBadNumpyReturnType(self):
with self.cached_session():
def bad():
# Structured numpy arrays aren't supported.
return np.array([], dtype=[("foo", np.float32)])
y, = script_ops.py_func(bad, [], [dtypes.float32])
with self.assertRaisesRegexp(errors.InternalError,
"Unsupported numpy data type"):
self.evaluate(y)
@test_util.run_v1_only("b/120545219")
def testBadReturnType(self):
with self.cached_session():
def bad():
# Non-string python objects aren't supported.
return {"foo": dtypes.float32}
z, = script_ops.py_func(bad, [], [dtypes.int64])
with self.assertRaisesRegexp(errors.InternalError,
"Unsupported object type"):
self.evaluate(z)
@test_util.run_v1_only("b/120545219")
def testReturnInput(self):
with self.cached_session():
def ident(x):
return x[0]
p = array_ops.placeholder(dtypes.float32)
# Create a numpy array aliasing a tensor and a tensor aliasing this array
z, = script_ops.py_func(ident, [p], [dtypes.float32])
z += 0.0 # Makes sure we release the tensor aliasing the numpy array x[0]
# above instead of using its memory as the return value of
# session.run
self.assertEqual(0.0, z.eval(feed_dict={p: [0.0]}))
def testStateful(self):
# Not using self.cached_session(), which disables optimization.
with session_lib.Session() as sess:
producer = iter(range(3))
x, = script_ops.py_func(lambda: next(producer), [], [dtypes.int64])
self.assertEqual(self.evaluate(x), 0)
self.assertEqual(self.evaluate(x), 1)
self.assertEqual(self.evaluate(x), 2)
@test_util.enable_tf_xla_constant_folding("b/134376434")
def testStateless(self):
# Not using self.cached_session(), which disables optimization.
with session_lib.Session() as sess:
producer = iter(range(3))
x, = script_ops.py_func(
lambda: next(producer), [], [dtypes.int64], stateful=False)
self.assertEqual(self.evaluate(x), 0)
self.assertEqual(self.evaluate(x), 0)
self.assertEqual(self.evaluate(x), 0)
@test_util.run_v1_only("b/120545219")
def testGradientFunction(self):
# Input to tf.compat.v1.py_func is necessary,
# otherwise get_gradient_function() returns None per default.
a = constant_op.constant(0)
x, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64])
y, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64], stateful=False)
self.assertEqual(None, ops.get_gradient_function(x.op))
self.assertEqual(None, ops.get_gradient_function(y.op))
@test_util.run_v1_only("b/120545219")
def testCOrder(self):
with self.cached_session():
val = [[1, 2], [3, 4]]
x, = script_ops.py_func(lambda: np.array(val, order="F"), [],
[dtypes.int64])
self.assertAllEqual(val, self.evaluate(x))
@test_util.run_v1_only("b/120545219")
def testParallel(self):
# Tests that tf.compat.v1.py_func's can run in parallel if they release
# the GIL.
with self.cached_session() as session:
q = queue.Queue(1)
def blocking_put():
q.put(42)
q.join() # Wait for task_done().
return 42
def blocking_get():
v = q.get(block=True) # Wait for put().
q.task_done()
return v
x, = script_ops.py_func(blocking_put, [], [dtypes.int64])
y, = script_ops.py_func(blocking_get, [], [dtypes.int64])
# This will result in a deadlock if the py_func's don't run in parallel.
session.run([x, y])
def testNoReturnValueStateful(self):
class State(object):
def __init__(self):
self._value = np.array([1], np.int64)
def _increment(self, diff):
self._value += diff
def increment(self, diff):
return script_ops.py_func(self._increment, [diff], [], stateful=True)
@property
def value(self):
return self._value
with self.cached_session():
s = State()
op = s.increment(constant_op.constant(2, dtypes.int64))
ret = self.evaluate(op)
self.assertIsNone(ret)
self.assertAllEqual([3], s.value)
@test_util.run_v1_only("b/120545219")
def testNoReturnValueStateless(self):
def do_nothing(unused_x):
pass
f = script_ops.py_func(
do_nothing, [constant_op.constant(3, dtypes.int64)], [], stateful=False)
with self.cached_session() as sess:
self.assertEqual(self.evaluate(f), [])
def _testExceptionHandling(self, py_exp, tf_exp, eager=False):
def inner_exception():
raise py_exp("blah") # pylint: disable=not-callable
def raise_exception():
inner_exception()
expected_regexp = r": blah.*" # Error at the top
expected_regexp += r"in raise_exception.*" # Stacktrace outer
expected_regexp += r"in inner_exception.*" # Stacktrace inner
expected_regexp += r": blah" # Stacktrace of raise
def expected_error_check(exception):
return re.search(expected_regexp, str(exception), re.DOTALL)
if eager:
if context.executing_eagerly():
with self.assertRaisesWithPredicateMatch(tf_exp, expected_error_check):
f = script_ops.eager_py_func(raise_exception, [], [])
return
else:
f = script_ops.eager_py_func(raise_exception, [], [])
else:
f = script_ops.py_func(raise_exception, [], [])
with self.assertRaisesWithPredicateMatch(tf_exp, expected_error_check):
self.evaluate(f)
@test_util.run_v1_only("b/120545219")
def testExceptionHandling(self):
with self.cached_session():
self._testExceptionHandling(ValueError, errors.InvalidArgumentError)
self._testExceptionHandling(TypeError, errors.InvalidArgumentError)
self._testExceptionHandling(StopIteration, errors.OutOfRangeError)
self._testExceptionHandling(MemoryError, errors.ResourceExhaustedError)
self._testExceptionHandling(NotImplementedError,
errors.UnimplementedError)
class WeirdError(Exception):
pass
self._testExceptionHandling(WeirdError, errors.UnknownError)
# ----- Tests shared by py_func and eager_py_func -----
def testCleanup(self):
# Delete everything created by previous tests to avoid side effects.
ops.reset_default_graph()
gc.collect()
initial_size = script_ops._py_funcs.size()
# Encapsulate the graph generation, so locals can be deleted.
def make_graphs():
for _ in xrange(1000):
g = ops.Graph()
with g.as_default():
c = constant_op.constant([1.], dtypes.float32)
_ = script_ops.py_func(lambda x: x + 1, [c], [dtypes.float32])
_ = script_ops.eager_py_func(lambda x: x + 1, [c], [dtypes.float32])
# These ops have a reference to 'c' which has a reference to the graph.
# Checks if the functions are being deleted though the graph is referenced from them.
# (see #18292)
_ = script_ops.py_func(lambda x: x + c.shape[0], [c], [dtypes.float32])
_ = script_ops.eager_py_func(lambda x: x + c.shape[0], [c], [dtypes.float32])
# Call garbage collector to enforce deletion.
make_graphs()
ops.reset_default_graph()
gc.collect()
self.assertEqual(initial_size, script_ops._py_funcs.size())
# ----- Tests for eager_py_func -----
@test_util.run_in_graph_and_eager_modes
def testEagerSingleOutputInt32(self):
a = array_ops.ones((3, 3), dtype=dtypes.int32)
x = array_ops.ones((3, 1), dtype=dtypes.int32)
output = script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.int32)
ret = self.evaluate(output)
self.assertAllEqual(ret, [[3], [3], [3]])
@test_util.run_in_graph_and_eager_modes
def testRenamedDeviceInTestClusterCorrectlyIdentifiedAsLocalhost(self):
if context.executing_eagerly():
self.skipTest("b/126565353: We don't test eager's remote execution.")
workers, _ = test_util.create_local_cluster(num_workers=1, num_ps=0)
worker = workers[0]
session = session_lib.Session(worker.target)
with ops.device("/job:worker/task:0/cpu:0"):
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
output = script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.float32)
ret = session.run(output)
self.assertAllClose(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes
def testEagerSingleOutputFloat32(self):
with test_util.device(use_gpu=True):
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
output = script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.float32)
ret = self.evaluate(output)
self.assertAllClose(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes
def testEagerArrayOutput(self):
with test_util.device(use_gpu=True):
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
output = script_ops.eager_py_func(
lambda a, x: [matmul(a, x)], inp=[a, x], Tout=[dtypes.float32])
ret = self.evaluate(output)
self.assertAllEqual(ret, [[[3.0], [3.0], [3.0]]])
@test_util.run_in_graph_and_eager_modes
def testEagerReturnNone(self):
with test_util.device(use_gpu=True):
def no_return_value():
return
output = script_ops.eager_py_func(no_return_value, inp=[], Tout=[])
ret = self.evaluate(output)
if context.executing_eagerly():
self.assertEquals(len(ret), 0)
else:
self.assertIsNone(ret)
@test_util.run_in_graph_and_eager_modes
def testEagerPyFuncInDefun(self):
with test_util.device(use_gpu=True):
def wrapper():
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
return script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.float32)
wrapped = function.defun(wrapper)
ret = self.evaluate(wrapped())
self.assertAllEqual(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testEagerExceptionHandling(self):
with test_util.device(use_gpu=True):
self._testExceptionHandling(
ValueError, errors.InvalidArgumentError, eager=True)
self._testExceptionHandling(
TypeError, errors.InvalidArgumentError, eager=True)
self._testExceptionHandling(
StopIteration, errors.OutOfRangeError, eager=True)
self._testExceptionHandling(
MemoryError, errors.ResourceExhaustedError, eager=True)
self._testExceptionHandling(
NotImplementedError, errors.UnimplementedError, eager=True)
class WeirdError(Exception):
pass
self._testExceptionHandling(WeirdError, errors.UnknownError, eager=True)
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testEagerReturningVariableRaisesError(self):
def return_variable():
return resource_variable_ops.ResourceVariable(0.0)
with self.assertRaisesRegexp(errors.UnknownError,
"Attempting to return a variable"):
output = script_ops.eager_py_func(
return_variable, inp=[], Tout=dtypes.float32)
self.evaluate(output)
@test_util.run_in_graph_and_eager_modes
def testEagerGradientTape(self):
def f(x):
return x**2
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(x)
y = script_ops.eager_py_func(f, inp=[x], Tout=dtypes.float32)
dy_dx = tape.gradient(y, x)
self.assertEqual(self.evaluate(dy_dx), 6.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraph(self):
def f(x):
return x**2
x = constant_op.constant(3.0)
y = script_ops.eager_py_func(f, inp=[x], Tout=dtypes.float32)
dy_dx = gradients_impl.gradients(y, x)[0]
self.assertEqual(self.evaluate(dy_dx), 6.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraphTwoOutputs(self):
def f(x, y):
return x * y, x / y
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
fa, fb = script_ops.eager_py_func(f, inp=[x, y],
Tout=[dtypes.float32, dtypes.float32])
dy_dx = gradients_impl.gradients(fa + fb, x)[0]
self.assertEqual(self.evaluate(dy_dx), 2.5)
@test_util.run_in_graph_and_eager_modes
def testEagerGradientTapeMultipleArgs(self):
def f(x, y):
return x**2 + y**2
x = constant_op.constant(3.0)
y = constant_op.constant(4.0)
with backprop.GradientTape() as tape:
tape.watch(x)
tape.watch(y)
z = script_ops.eager_py_func(f, inp=[x, y], Tout=dtypes.float32)
dz_dx, dz_dy = tape.gradient(z, [x, y])
self.assertEqual(self.evaluate(dz_dx), 6.0)
self.assertEqual(self.evaluate(dz_dy), 8.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraphMultipleArgs(self):
def f(x, y):
return x**2 + y**2
x = constant_op.constant(3.0)
y = constant_op.constant(4.0)
z = script_ops.eager_py_func(f, inp=[x, y], Tout=dtypes.float32)
dz_dx, dz_dy = gradients_impl.gradients(z, [x, y])
self.assertEqual(self.evaluate(dz_dx), 6.0)
self.assertEqual(self.evaluate(dz_dy), 8.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraphLogHuber(self):
def log_huber(x, m):
if math_ops.abs(x) <= m:
return x**2
else:
return m**2 * (1 - 2 * math_ops.log(m) + math_ops.log(x**2))
x = array_ops.placeholder(dtypes.float32)
m = array_ops.placeholder(dtypes.float32)
y = script_ops.eager_py_func(
func=log_huber, inp=[x, m], Tout=dtypes.float32)
dy_dx = gradients_impl.gradients(y, x)[0]
with self.cached_session() as sess:
# Takes the first branch of log_huber.
y, dy_dx = sess.run([y, dy_dx], feed_dict={x: 1.0, m: 2.0})
self.assertEqual(y, 1.0)
self.assertEqual(dy_dx, 2.0)
@test_util.run_v1_only("b/120545219")
def testEagerRespectsDevicePlacmentOfOp(self):
def f(x):
return math_ops.square(x)
def g(x):
return math_ops.add(x, x)
with ops.device("/CPU:0"):
# Explicitly ask for the py_funcs to execute on CPU, even if
# a GPU is available.
x = array_ops.placeholder(dtypes.float32)
y = script_ops.eager_py_func(func=f, inp=[x], Tout=dtypes.float32)
z = script_ops.eager_py_func(func=g, inp=[y], Tout=dtypes.float32)
with self.session(use_gpu=True) as sess:
output = sess.run(z, feed_dict={x: 3.0})
self.assertEqual(output, 18.0)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/py_func_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv3DTransposeTest(test.TestCase):
def testConv3DTransposeSingleStride(self):
with self.cached_session():
strides = [1, 1, 1, 1, 1]
# Input, output: [batch, depth, height, width, channel]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 5, 6, 4, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
# We count the number of cells being added at the locations in the output.
# At the center, #cells = kernel_depth * kernel_height * kernel_width
# At the corners, #cells = ceil(kernel_depth/2) * ceil(kernel_height/2)
# * ceil(kernel_width/2)
# At the edges, #cells =
# kernel_depth * ceil(kernel_height/2) * ceil(kernel_width/2) or
# ceil(kernel_depth/2) * kernel_height * ceil(kernel_width/2) or
# ceil(kernel_depth/2) * ceil(kernel_height/2) * kernel_width
# At the borders, #cells =
# ceil(kernel_depth/2) * kernel_height * kernel_width or
# kernel_depth * ceil(kernel_height/2) * kernel_width or
# kernel_depth * kernel_height * ceil(kernel_width/2)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
for d in xrange(y_shape[1]):
d_in = d > 0 and d < y_shape[1] - 1
h_in = h > 0 and h < y_shape[2] - 1
w_in = w > 0 and w < y_shape[3] - 1
if d_in + h_in + w_in == 3:
target = 27 * 3.0
elif d_in + h_in + w_in == 2:
target = 18 * 3.0
elif d_in or h_in or w_in:
target = 12 * 3.0
else:
target = 8 * 3.0
self.assertAllClose(target, value[n, d, h, w, k])
def testConv3DTransposeSame(self):
with self.cached_session():
strides = [1, 2, 2, 2, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 10, 12, 8, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
for d in xrange(y_shape[1]):
# We add a case for locations divisible by the stride.
d_in = d % strides[1] == 0 and 0 < d < y_shape[1] - 1
h_in = h % strides[2] == 0 and 0 < h < y_shape[2] - 1
w_in = w % strides[3] == 0 and 0 < w < y_shape[3] - 1
if d_in + h_in + w_in == 3:
target = 8 * 3.0
elif d_in + h_in + w_in == 2:
target = 4 * 3.0
elif d_in or h_in or w_in:
target = 2 * 3.0
else:
target = 3.0
self.assertAllClose(target, value[n, d, h, w, k])
@test_util.run_deprecated_v1
def testConv3DTransposeShapeMismatch(self):
# Test case for GitHub issue 18460
x_shape = [2, 2, 3, 4, 3]
f_shape = [3, 3, 3, 2, 2]
y_shape = [2, 2, 6, 8, 6]
strides = [1, 1, 2, 2, 2]
np.random.seed(1)
x_value = np.random.random_sample(x_shape).astype(np.float64)
f_value = np.random.random_sample(f_shape).astype(np.float64)
nn_ops.conv3d_transpose(
x_value, f_value, y_shape, strides, data_format='NCDHW')
def testConv3DTransposeOutputShapeType(self):
# Test case for GitHub issue 18887
for dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session():
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 5, 6, 4, 2]
f_shape = [3, 3, 3, 2, 3]
strides = [1, 1, 1, 1, 1]
x_value = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f_value = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x_value, f_value, constant_op.constant(y_shape, dtype=dtype),
strides=strides, padding="SAME")
self.evaluate(output)
def testConv3DTransposeValid(self):
with self.cached_session():
strides = [1, 2, 2, 2, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 11, 13, 9, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
for d in xrange(y_shape[1]):
# We add a case for locations divisible by the stride.
d_in = d % strides[1] == 0 and pad < d < y_shape[1] - 1 - pad
h_in = h % strides[2] == 0 and pad < h < y_shape[2] - 1 - pad
w_in = w % strides[3] == 0 and pad < w < y_shape[3] - 1 - pad
if d_in + h_in + w_in == 3:
target = 8 * 3.0
elif d_in + h_in + w_in == 2:
target = 4 * 3.0
elif d_in or h_in or w_in:
target = 2 * 3.0
else:
target = 3.0
cache_values[n, d, h, w, k] = target
# copy values in the border
cache_values[n, :, :, 0, k] = cache_values[n, :, :, 1, k]
cache_values[n, :, :, -1, k] = cache_values[n, :, :, -2, k]
cache_values[n, :, 0, :, k] = cache_values[n, :, 1, :, k]
cache_values[n, :, -1, :, k] = cache_values[n, :, -2, :, k]
cache_values[n, 0, :, :, k] = cache_values[n, 1, :, :, k]
cache_values[n, -1, :, :, k] = cache_values[n, -2, :, :, k]
self.assertAllClose(cache_values, value)
@test_util.run_deprecated_v1
def testGradient(self):
x_shape = [2, 3, 4, 3, 2]
f_shape = [3, 3, 3, 2, 2]
y_shape = [2, 6, 8, 6, 2]
strides = [1, 2, 2, 2, 1]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.cached_session():
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
print("conv3d_transpose gradient err = %g " % err)
err_tolerance = 0.0005
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/conv3d_transpose_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for UnicodeEncode op from ragged_string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl as errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import test
class UnicodeEncodeOpTest(test.TestCase, parameterized.TestCase):
def assertRaggedEqual(self, rt, expected):
with self.cached_session() as sess:
value = sess.run(rt)
if isinstance(value, np.ndarray):
value = value.tolist()
elif isinstance(value, ragged_tensor_value.RaggedTensorValue):
value = value.to_list()
self.assertEqual(value, expected)
def testScalar(self):
with self.cached_session():
with self.assertRaises(ValueError):
ragged_string_ops.unicode_encode(72, "UTF-8")
with self.cached_session():
with self.assertRaises(ValueError):
ragged_string_ops.unicode_encode(constant_op.constant(72), "UTF-8")
def testRequireParams(self):
with self.cached_session():
with self.assertRaises(TypeError):
ragged_string_ops.unicode_encode()
with self.cached_session():
with self.assertRaises(TypeError):
ragged_string_ops.unicode_encode(72)
with self.cached_session():
with self.assertRaises(TypeError):
ragged_string_ops.unicode_encode(encoding="UTF-8")
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
def testStrictErrors(self, encoding):
test_value = np.array([72, 101, 2147483647, -1, 111], np.int32)
with self.cached_session() as session:
with self.assertRaises(errors.InvalidArgumentError):
session.run(
ragged_string_ops.unicode_encode(test_value, encoding, "strict"))
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testIgnoreErrors(self, encoding):
test_value = np.array([72, 101, 2147483647, -1, 111], np.int32)
expected_value = u"Heo".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"ignore")
with self.cached_session() as session:
result = session.run(unicode_encode_op)
self.assertIsInstance(result, bytes)
self.assertAllEqual(result, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testReplaceErrors(self, encoding):
test_value = np.array([72, 101, 2147483647, -1, 111], np.int32)
expected_value = u"He\U0000fffd\U0000fffdo".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"replace")
self.assertRaggedEqual(unicode_encode_op, expected_value)
# Test custom replacement character
test_value = np.array([72, 101, 2147483647, -1, 111], np.int32)
expected_value = u"Heooo".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"replace", 111)
self.assertRaggedEqual(unicode_encode_op, expected_value)
# Verify "replace" is default
test_value = np.array([72, 101, 2147483647, -1, 111], np.int32)
expected_value = u"He\U0000fffd\U0000fffdo".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertRaggedEqual(unicode_encode_op, expected_value)
# Replacement_char must be within range
test_value = np.array([72, 101, 2147483647, -1, 111], np.int32)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"replace", 1114112)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(unicode_encode_op)
# -- regular Tensor tests -- #
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testVector(self, encoding):
test_value = np.array([72, 101, 108, 108, 111], np.int32)
expected_value = u"Hello".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertRaggedEqual(unicode_encode_op, expected_value)
test_value = np.array([72, 101, 195, 195, 128516], np.int32)
expected_value = u"He\xc3\xc3\U0001f604".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertRaggedEqual(unicode_encode_op, expected_value)
# Single character string
test_value = np.array([72], np.int32)
expected_value = u"H".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertRaggedEqual(unicode_encode_op, expected_value)
test_value = np.array([128516], np.int32)
expected_value = u"\U0001f604".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertRaggedEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testMatrix(self, encoding):
test_value = np.array(
[[72, 128516, 108, 108, 111], [87, 128516, 114, 108, 100]], np.int32)
expected_value = [
u"H\U0001f604llo".encode(encoding), u"W\U0001f604rld".encode(encoding)
]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test3DimMatrix(self, encoding):
test_value = constant_op.constant(
[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100]],
[[102, 105, 120, 101, 100], [119, 111, 114, 100, 115]],
[[72, 121, 112, 101, 114], [99, 117, 98, 101, 46]]], np.int32)
expected_value = [[u"Hello".encode(encoding), u"World".encode(encoding)],
[u"fixed".encode(encoding), u"words".encode(encoding)],
[u"Hyper".encode(encoding), u"cube.".encode(encoding)]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertRaggedEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test4DimMatrix(self, encoding):
test_value = constant_op.constant(
[[[[72, 101, 108, 108, 111]], [[87, 111, 114, 108, 100]]],
[[[102, 105, 120, 101, 100]], [[119, 111, 114, 100, 115]]],
[[[72, 121, 112, 101, 114]], [[99, 117, 98, 101, 46]]]], np.int32)
expected_value = [[[u"Hello".encode(encoding)],
[u"World".encode(encoding)]],
[[u"fixed".encode(encoding)],
[u"words".encode(encoding)]],
[[u"Hyper".encode(encoding)],
[u"cube.".encode(encoding)]]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertRaggedEqual(unicode_encode_op, expected_value)
# -- Ragged Tensor tests -- #
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testRaggedMatrix(self, encoding):
test_value = ragged_factory_ops.constant(
[[72, 195, 108, 108, 111], [87, 128516, 114, 108, 100, 46]], np.int32)
expected_value = [
u"H\xc3llo".encode(encoding), u"W\U0001f604rld.".encode(encoding)
]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertRaggedEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test3DimMatrixWithRagged2ndDim(self, encoding):
test_value = ragged_factory_ops.constant(
[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100]],
[[102, 105, 120, 101, 100]],
[[72, 121, 112, 101, 114], [119, 111, 114, 100, 115],
[99, 117, 98, 101, 46]]], np.int32)
expected_value = [[u"Hello".encode(encoding), u"World".encode(encoding)],
[u"fixed".encode(encoding)],
[
u"Hyper".encode(encoding), u"words".encode(encoding),
u"cube.".encode(encoding)
]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertRaggedEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test3DimMatrixWithRagged3rdDim(self, encoding):
test_value = ragged_factory_ops.constant(
[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100, 46]],
[[68, 111, 110, 39, 116], [119, 195, 114, 114, 121, 44, 32, 98, 101]],
[[128516], []]], np.int32)
expected_value = [[u"Hello".encode(encoding), u"World.".encode(encoding)],
[
u"Don't".encode(encoding),
u"w\xc3rry, be".encode(encoding)
], [u"\U0001f604".encode(encoding), u"".encode(encoding)]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertRaggedEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test3DimMatrixWithRagged2ndAnd3rdDim(self, encoding):
test_value = ragged_factory_ops.constant(
[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100, 46]], [],
[[128516]]], np.int32)
expected_value = [[u"Hello".encode(encoding), u"World.".encode(encoding)],
[], [u"\U0001f604".encode(encoding)]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertRaggedEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test4DimRaggedMatrix(self, encoding):
test_value = ragged_factory_ops.constant(
[[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100]]],
[[[]], [[72, 121, 112, 101]]]], np.int32)
expected_value = [[[u"Hello".encode(encoding), u"World".encode(encoding)]],
[[u"".encode(encoding)], [u"Hype".encode(encoding)]]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertRaggedEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testRaggedMatrixWithMultiDimensionInnerValues(self, encoding):
test_flat_values = constant_op.constant([[[72, 101, 108, 108, 111],
[87, 111, 114, 108, 100]],
[[102, 105, 120, 101, 100],
[119, 111, 114, 100, 115]],
[[72, 121, 112, 101, 114],
[99, 117, 98, 101, 46]]])
test_row_splits = [
constant_op.constant([0, 2, 3], dtype=np.int64),
constant_op.constant([0, 1, 1, 3], dtype=np.int64)
]
test_value = ragged_tensor.RaggedTensor.from_nested_row_splits(
test_flat_values, test_row_splits)
expected_value = [[[[u"Hello".encode(encoding), u"World".encode(encoding)]],
[]],
[[[u"fixed".encode(encoding), u"words".encode(encoding)],
[u"Hyper".encode(encoding),
u"cube.".encode(encoding)]]]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertRaggedEqual(unicode_encode_op, expected_value)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/unicode_encode_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for decode_image."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
prefix_path = "tensorflow/core/lib"
class DecodeImageOpTest(test.TestCase):
def testBmp(self):
# Read a real bmp and verify shape
path = os.path.join(prefix_path, "bmp", "testdata", "lena.bmp")
with self.session(use_gpu=True) as sess:
bmp0 = io_ops.read_file(path)
image0 = image_ops.decode_image(bmp0)
image1 = image_ops.decode_bmp(bmp0)
bmp0, image0, image1 = self.evaluate([bmp0, image0, image1])
self.assertEqual(len(bmp0), 4194)
self.assertAllEqual(image0, image1)
@test_util.run_deprecated_v1
def testGif(self):
# Read some real GIFs
path = os.path.join(prefix_path, "gif", "testdata", "scan.gif")
width = 20
height = 40
stride = 5
shape = (12, height, width, 3)
with self.session(use_gpu=True) as sess:
gif0 = io_ops.read_file(path)
image0 = image_ops.decode_image(gif0)
image1 = image_ops.decode_gif(gif0)
gif0, image0, image1 = self.evaluate([gif0, image0, image1])
self.assertEqual(image0.shape, shape)
self.assertAllEqual(image0, image1)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * stride
end = (frame_idx + 1) * stride
if end <= width:
gt[:, start:end, :] = 255
else:
start -= width
end -= width
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
bad_channels = image_ops.decode_image(gif0, channels=1)
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(bad_channels)
@test_util.run_deprecated_v1
def testJpeg(self):
# Read a real jpeg and verify shape
path = os.path.join(prefix_path, "jpeg", "testdata", "jpeg_merge_test1.jpg")
with self.session(use_gpu=True) as sess:
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_image(jpeg0)
image1 = image_ops.decode_jpeg(jpeg0)
jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertAllEqual(image0, image1)
bad_channels = image_ops.decode_image(jpeg0, channels=4)
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(bad_channels)
def testPng(self):
# Read some real PNGs, converting to different channel numbers
inputs = [(1, "lena_gray.png")]
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.cached_session(use_gpu=True) as sess:
path = os.path.join(prefix_path, "png", "testdata", filename)
png0 = io_ops.read_file(path)
image0 = image_ops.decode_image(png0, channels=channels)
image1 = image_ops.decode_png(png0, channels=channels)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
self.assertAllEqual(image0, image1)
@test_util.run_deprecated_v1
def testInvalidBytes(self):
image_bytes = b"ThisIsNotAnImage!"
decode = image_ops.decode_image(image_bytes)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(decode)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/decode_image_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PaddingFIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("PaddingFIFOQueue removed from v2")
class PaddingFIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((None,),), name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list { shape { dim { size: -1 } } } } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32), ((), ()),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list { shape { } shape { } } } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((3, 2),))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(3, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32),
((), ()))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = self.evaluate(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, (
(None, None),))
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], self.evaluate(size_t))
enqueue_op.run()
self.assertEqual([0], self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, shapes=((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithDynamicShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpToWithDynamicShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testConstructPaddingFIFOQueueWithNoShape(self):
with self.cached_session():
with self.assertRaisesRegexp(
ValueError,
r"When providing partial shapes, a list of shapes must be provided."):
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32,
None).queue_ref.eval()
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.float32, dtypes_lib.int32),
((), (2,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testMultiEnqueueManyWithPartiallyKnownShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueManyWithPartiallyKnownShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueManyWithPartiallyKnownShapesAndVariableSizeInput(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_many(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueUpToPartiallyKnownShapesAndVariableInputNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_up_to(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, ((4, 4, 4, 4),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testPartiallyKnownHighDimension(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, (
(4, None, 4, None),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32),
((), (2,)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongPartiallyKnownShapeAtRuntime(self):
with self.cached_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[\?,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongPartiallyKnownShape(self):
with self.cached_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,\?,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
self.evaluate(dequeued_t)
def testParallelEnqueueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(50, dtypes_lib.float32, shapes=((),))
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
self.evaluate(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(self.evaluate(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
self.evaluate(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, self.evaluate(dequeued_t))
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.PaddingFIFOQueue(100, dtypes_lib.int32, ((),))
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.PaddingFIFOQueue(total_count, dtypes_lib.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
self.assertAllEqual(elems[3:], self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
self.evaluate(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], self.evaluate(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeued_t)
self.assertEqual(elems[3], self.evaluate(cleanup_dequeue_t))
def close():
self.evaluate(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, (dtypes_lib.float32,
dtypes_lib.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = self.evaluate([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
self.assertEqual([50.0], self.evaluate(dequeued_t))
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
time.sleep(0.01)
self.assertEqual([50.0], self.evaluate(dequeued_t))
self.assertEqual([60.0], self.evaluate(dequeued_t))
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
def testDoesNotLoseValue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(1, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_2 = data_flow_ops.PaddingFIFOQueue(
15, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_b")
q_b_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.int32, ((),), shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_c")
q_c_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_f")
q_f_2 = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), ((), ()),
shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = []
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),)))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.PaddingFIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
q2 = data_flow_ops.PaddingFIFOQueue(15, dtypes_lib.float32, ((),))
enq_q = data_flow_ops.PaddingFIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op)
@test_util.run_deprecated_v1
def testResetOfBlockingOperation(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q_empty = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.bool, dtypes_lib.complex64, dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = self.evaluate(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testUnknownRank(self):
with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
data_flow_ops.PaddingFIFOQueue(32, [dtypes_lib.float32],
[tensor_shape.TensorShape(None)])
class QueueFromListTest(test.TestCase):
def testQueueFromListShapes(self):
which = constant_op.constant(1)
def _cmp(expected, *shapes):
qs = [
data_flow_ops.PaddingFIFOQueue(10, [dtypes_lib.float32],
[tensor_shape.TensorShape(s)])
for s in shapes
]
s_expected = tensor_shape.TensorShape(expected)
s = data_flow_ops.QueueBase.from_list(which, qs).shapes[0]
if s_expected.ndims is None:
self.assertEqual(s_expected.ndims, s.ndims)
else:
self.assertEqual(s_expected.as_list(), s.as_list())
_cmp(None, [1, None], [None])
_cmp([None], [1], [2])
_cmp([1, None], [1, 1], [1, 2])
_cmp([1, None], [1, 1], [1, None])
_cmp([None, None], [None, 1], [1, None])
_cmp([1], [1], [1], [1])
_cmp([None], [1], [None], [1])
_cmp(None, [1, None], [1], [1])
def testQueueFromListShapesMultipleComponents(self):
q_u_u = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None])])
q_u_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([1, 2])])
q_f_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([3, 4]), tensor_shape.TensorShape([1, 2])])
which = constant_op.constant(1)
s_cmp_1 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_u]).shapes
self.assertEqual([1, 1], [x.ndims for x in s_cmp_1])
self.assertEqual([None, None], [x.as_list()[0] for x in s_cmp_1])
s_cmp_2 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_f]).shapes
self.assertEqual([1, None], [x.ndims for x in s_cmp_2])
self.assertEqual([None], s_cmp_2[0].as_list())
s_cmp_3 = data_flow_ops.QueueBase.from_list(which, [q_f_f, q_f_f]).shapes
self.assertEqual([2, 2], [x.ndims for x in s_cmp_3])
self.assertEqual([[3, 4], [1, 2]], [x.as_list() for x in s_cmp_3])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/padding_fifo_queue_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for SpaceToBatch and BatchToSpace ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def space_to_batch_direct(input_array, block_shape, paddings):
"""Direct Python implementation of space-to-batch conversion.
This is used for tests only.
Args:
input_array: N-D array
block_shape: 1-D array of shape [num_block_dims].
paddings: 2-D array of shape [num_block_dims, 2].
Returns:
Converted tensor.
"""
input_array = np.array(input_array)
block_shape = np.array(block_shape)
num_block_dims = len(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
padded = np.pad(input_array,
pad_width=([[0, 0]] + list(paddings) + [[0, 0]] *
(input_array.ndim - 1 - num_block_dims)),
mode="constant")
reshaped_padded_shape = [input_array.shape[0]]
output_shape = [input_array.shape[0] * np.prod(block_shape)]
for block_dim, block_shape_value in enumerate(block_shape):
reduced_size = padded.shape[block_dim + 1] // block_shape_value
reshaped_padded_shape.append(reduced_size)
output_shape.append(reduced_size)
reshaped_padded_shape.append(block_shape_value)
reshaped_padded_shape.extend(input_array.shape[num_block_dims + 1:])
output_shape.extend(input_array.shape[num_block_dims + 1:])
reshaped_padded = padded.reshape(reshaped_padded_shape)
permuted_reshaped_padded = np.transpose(reshaped_padded, (
list(np.arange(num_block_dims) * 2 + 2) + [0] +
list(np.arange(num_block_dims) * 2 + 1) + list(
np.arange(input_array.ndim - num_block_dims - 1) + 1 + num_block_dims
* 2)))
return permuted_reshaped_padded.reshape(output_shape)
class PythonOpImpl(object):
@staticmethod
def space_to_batch(*args, **kwargs):
return array_ops.space_to_batch(*args, **kwargs)
@staticmethod
def batch_to_space(*args, **kwargs):
return array_ops.batch_to_space(*args, **kwargs)
class CppOpImpl(object):
@staticmethod
def space_to_batch(*args, **kwargs):
return gen_array_ops.space_to_batch(*args, **kwargs)
@staticmethod
def batch_to_space(*args, **kwargs):
return gen_array_ops.batch_to_space(*args, **kwargs)
class SpaceToBatchTest(test.TestCase, PythonOpImpl):
"""Tests input-output pairs for the SpaceToBatch and BatchToSpace ops.
This uses the Python compatibility wrapper that forwards to space_to_batch_nd.
"""
def _testPad(self, inputs, paddings, block_size, outputs):
with self.cached_session(use_gpu=True):
# outputs = space_to_batch(inputs)
x_tf = self.space_to_batch(
math_ops.cast(inputs, dtypes.float32),
paddings,
block_size=block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# inputs = batch_to_space(outputs)
x_tf = self.batch_to_space(
math_ops.cast(outputs, dtypes.float32),
paddings,
block_size=block_size)
self.assertAllEqual(x_tf.eval(), inputs)
def _testOne(self, inputs, block_size, outputs):
paddings = np.zeros((2, 2), dtype=np.int32)
self._testPad(inputs, paddings, block_size, outputs)
# [1, 2, 2, 1] <-> [4, 1, 1, 1]
@test_util.run_deprecated_v1
def testSmallInput2x2(self):
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
self._testOne(x_np, block_size, x_out)
# [1, 2, 2, 1] <-> [1, 3, 3, 1] (padding) <-> [9, 1, 1, 1]
@test_util.run_deprecated_v1
def testSmallInput2x2Pad1x0(self):
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.array([[1, 0], [1, 0]], dtype=np.int32)
block_size = 3
x_out = [[[[0]]], [[[0]]], [[[0]]], [[[0]]], [[[1]]], [[[2]]], [[[0]]],
[[[3]]], [[[4]]]]
self._testPad(x_np, paddings, block_size, x_out)
# Test with depth larger than 1.
# [1, 2, 2, 3] <-> [4, 1, 1, 3]
@test_util.run_deprecated_v1
def testDepthInput2x2(self):
x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Test for larger input dimensions.
# [1, 4, 4, 1] <-> [4, 2, 2, 1]
@test_util.run_deprecated_v1
def testLargerInput2x2(self):
x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]],
[[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
block_size = 2
x_out = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]]
self._testOne(x_np, block_size, x_out)
# Test with batch larger than 1.
# [2, 2, 4, 1] <-> [8, 1, 2, 1]
@test_util.run_deprecated_v1
def testBatchInput2x2(self):
x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]],
[[[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
block_size = 2
x_out = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input spatial dimensions AND batch larger than 1, to ensure
# that elements are correctly laid out spatially and properly interleaved
# along the batch dimension.
# [2, 4, 4, 1] <-> [8, 2, 2, 1]
@test_util.run_deprecated_v1
def testLargerInputBatch2x2(self):
x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]],
[[9], [10], [11], [12]], [[13], [14], [15], [16]]],
[[[17], [18], [19], [20]], [[21], [22], [23], [24]],
[[25], [26], [27], [28]], [[29], [30], [31], [32]]]]
x_out = [[[[1], [3]], [[9], [11]]], [[[17], [19]], [[25], [27]]],
[[[2], [4]], [[10], [12]]], [[[18], [20]], [[26], [28]]],
[[[5], [7]], [[13], [15]]], [[[21], [23]], [[29], [31]]],
[[[6], [8]], [[14], [16]]], [[[22], [24]], [[30], [32]]]]
block_size = 2
self._testOne(x_np, block_size, x_out)
class SpaceToBatchCppTest(SpaceToBatchTest, CppOpImpl):
"""Tests input-output pairs for the SpaceToBatch and BatchToSpace ops.
This uses the C++ ops.
"""
pass
class SpaceToBatchNDTest(test.TestCase):
"""Tests input-output pairs for the SpaceToBatchND and BatchToSpaceND ops."""
def _testPad(self, inputs, block_shape, paddings, outputs):
block_shape = np.array(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
# outputs = space_to_batch(inputs)
x_tf = array_ops.space_to_batch_nd(
math_ops.cast(inputs, dtypes.float32), block_shape, paddings)
self.assertAllEqual(x_tf.eval(), outputs)
# inputs = batch_to_space(outputs)
x_tf = array_ops.batch_to_space_nd(
math_ops.cast(outputs, dtypes.float32), block_shape, paddings)
self.assertAllEqual(x_tf.eval(), inputs)
def _testDirect(self, input_shape, block_shape, paddings):
inputs = np.arange(np.prod(input_shape), dtype=np.float32)
inputs = inputs.reshape(input_shape)
self._testPad(inputs, block_shape, paddings,
space_to_batch_direct(inputs, block_shape, paddings))
@test_util.run_deprecated_v1
def testZeroBlockDimsZeroRemainingDims(self):
self._testPad(
inputs=[1, 2],
block_shape=[],
paddings=[],
outputs=[1, 2],)
@test_util.run_deprecated_v1
def testZeroBlockDimsOneRemainingDim(self):
self._testPad(
inputs=[[1, 2], [3, 4]],
block_shape=[],
paddings=[],
outputs=[[1, 2], [3, 4]])
# Same thing, but with a no-op block dim.
self._testPad(
inputs=[[1, 2], [3, 4]],
block_shape=[1],
paddings=[[0, 0]],
outputs=[[1, 2], [3, 4]])
@test_util.run_deprecated_v1
def testZeroBlockDimsTwoRemainingDims(self):
self._testPad(
inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
block_shape=[],
paddings=[],
outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# Same thing, but with a no-op block dim.
self._testPad(
inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
block_shape=[1],
paddings=[[0, 0]],
outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# Same thing, but with two no-op block dims.
self._testPad(
inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
block_shape=[1, 1],
paddings=[[0, 0], [0, 0]],
outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
@test_util.run_deprecated_v1
def testOneBlockDimZeroRemainingDims(self):
self._testPad(
inputs=[[1, 2, 3], [4, 5, 6]],
block_shape=[2],
paddings=[1, 0],
outputs=[[0, 2], [0, 5], [1, 3], [4, 6]])
@test_util.run_deprecated_v1
def testOneBlockDimOneRemainingDim(self):
self._testPad(
inputs=[[[1, 11], [2, 21], [3, 31]], [[4, 41], [5, 51], [6, 61]]],
block_shape=[2],
paddings=[1, 0],
outputs=[[[0, 0], [2, 21]], [[0, 0], [5, 51]], [[1, 11], [3, 31]],
[[4, 41], [6, 61]]])
@test_util.run_deprecated_v1
def testDirect(self):
# Test with zero-size remaining dimension.
self._testDirect(
input_shape=[3, 1, 2, 0], block_shape=[3], paddings=[[0, 2]])
# Test with zero-size blocked dimension.
self._testDirect(
input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[0, 0]])
# Test with padding up from zero size.
self._testDirect(
input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[1, 2]])
self._testDirect(
input_shape=[3, 3, 4, 5, 2],
block_shape=[3, 4, 2],
paddings=[[1, 2], [0, 0], [3, 0]])
self._testDirect(
input_shape=[3, 3, 4, 5, 2],
block_shape=[3, 4, 2, 2],
paddings=[[1, 2], [0, 0], [3, 0], [0, 0]])
self._testDirect(
input_shape=[3, 2, 2, 3, 4, 5, 2, 5],
block_shape=[1, 1, 3, 4, 2, 2],
paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0]])
self._testDirect(
input_shape=[3, 2, 2, 3, 4, 5, 2, 5],
block_shape=[1, 1, 3, 4, 2, 2, 1],
paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0], [0, 0]])
class SpaceToBatchSpaceToDepth(test.TestCase, PythonOpImpl):
# Verifies that: space_to_batch(x) = transpose(space_to_depth(transpose(x)))
@test_util.run_deprecated_v1
def testSpaceToDepthTranspose(self):
x = np.arange(5 * 10 * 16 * 7, dtype=np.float32).reshape([5, 10, 16, 7])
block_size = 2
paddings = np.zeros((2, 2), dtype=np.int32)
y1 = self.space_to_batch(x, paddings, block_size=block_size)
y2 = array_ops.transpose(
array_ops.space_to_depth(
array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
[3, 1, 2, 0])
with self.session(use_gpu=True):
self.assertAllEqual(y1.eval(), y2.eval())
class SpaceToBatchSpaceToDepthCpp(SpaceToBatchSpaceToDepth, CppOpImpl):
pass
class SpaceToBatchErrorHandlingTest(test.TestCase, PythonOpImpl):
@test_util.run_deprecated_v1
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 2
with self.assertRaises(ValueError):
_ = self.space_to_batch(x_np, paddings, block_size)
@test_util.run_deprecated_v1
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 0
with self.assertRaises(ValueError):
out_tf = self.space_to_batch(x_np, paddings, block_size)
out_tf.eval()
@test_util.run_deprecated_v1
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 1
with self.assertRaises(ValueError):
out_tf = self.space_to_batch(x_np, paddings, block_size)
out_tf.eval()
@test_util.run_deprecated_v1
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 10
with self.assertRaises(ValueError):
out_tf = self.space_to_batch(x_np, paddings, block_size)
out_tf.eval()
@test_util.run_deprecated_v1
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = self.space_to_batch(x_np, paddings, block_size)
@test_util.run_deprecated_v1
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = self.space_to_batch(x_np, paddings, block_size)
@test_util.run_deprecated_v1
def testBlockSizeNotDivisibleBoth(self):
# The block size does not divide neither width or height.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = self.space_to_batch(x_np, paddings, block_size)
@test_util.run_deprecated_v1
def testUnknownShape(self):
t = self.space_to_batch(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
block_size=4)
self.assertEqual(4, t.get_shape().ndims)
class SpaceToBatchErrorHandlingCppTest(SpaceToBatchErrorHandlingTest,
CppOpImpl):
pass
class SpaceToBatchNDErrorHandlingTest(test.TestCase):
def _testStaticShape(self, input_shape, block_shape, paddings, error):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes known at graph construction time.
with self.assertRaises(error):
_ = array_ops.space_to_batch_nd(
np.zeros(input_shape, np.float32), block_shape, paddings)
def _testDynamicShape(self, input_shape, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes unknown at graph construction time.
input_placeholder = array_ops.placeholder(dtypes.float32)
block_shape_placeholder = array_ops.placeholder(
dtypes.int32, shape=block_shape.shape)
paddings_placeholder = array_ops.placeholder(dtypes.int32)
t = array_ops.space_to_batch_nd(input_placeholder, block_shape_placeholder,
paddings_placeholder)
with self.assertRaises(ValueError):
_ = t.eval({
input_placeholder: np.zeros(input_shape, np.float32),
block_shape_placeholder: block_shape,
paddings_placeholder: paddings
})
def _testShape(self, input_shape, block_shape, paddings, error):
self._testStaticShape(input_shape, block_shape, paddings, error)
self._testDynamicShape(input_shape, block_shape, paddings)
@test_util.run_deprecated_v1
def testBlockSize0(self):
# The block size is 0.
self._testShape([1, 2, 2], [0, 2], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockSizeNegative(self):
self._testShape([1, 2, 2], [-1, 2], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testNegativePadding(self):
# The padding is negative.
self._testShape([1, 2, 2], [1, 1], [[0, -1], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockSizeNotDivisible(self):
# The padded size is not divisible by the block size.
self._testShape([1, 2, 3, 1], [3, 3], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockDimsMismatch(self):
# Shape of block_shape does not match shape of paddings.
self._testStaticShape([1, 3, 3, 1], [3, 3], [[0, 0]], ValueError)
@test_util.run_deprecated_v1
def testUnknown(self):
# Verify that input shape and paddings shape can be unknown.
_ = array_ops.space_to_batch_nd(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
# Only number of input dimensions is known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, None)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual(4, t.get_shape().ndims)
# Dimensions are partially known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, 2)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(3, None, None, 2)), [2, 3],
array_ops.placeholder(dtypes.int32))
self.assertEqual([3 * 2 * 3, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(3, None, 2, 2)), [2, 3], [[1, 1], [0, 1]])
self.assertEqual([3 * 2 * 3, None, 1, 2], t.get_shape().as_list())
# Dimensions are fully known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(3, 2, 3, 2)), [2, 3], [[1, 1], [0, 0]])
self.assertEqual([3 * 2 * 3, 2, 1, 2], t.get_shape().as_list())
class SpaceToBatchGradientTest(test.TestCase, PythonOpImpl):
# Check the gradients.
def _checkGrad(self, x, paddings, block_size):
assert 4 == x.ndim
with self.cached_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
tf_y = self.space_to_batch(tf_x, paddings, block_size)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for space_to_batch of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size, pad_beg, pad_end):
block_size_sq = block_size * block_size
x = np.random.normal(0, 1, b * h * w * d *
block_size_sq).astype(np.float32).reshape(
[b, h * block_size, w * block_size, d])
paddings = np.array(
[[pad_beg, pad_end], [pad_beg, pad_end]], dtype=np.int32)
self._checkGrad(x, paddings, block_size)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
block_size = 2
pad_beg = 0
pad_end = 0
self._compare(1, 2, 3, 5, block_size, pad_beg, pad_end)
@test_util.run_deprecated_v1
def testSmall2(self):
block_size = 2
pad_beg = 0
pad_end = 0
self._compare(2, 4, 3, 2, block_size, pad_beg, pad_end)
@test_util.run_deprecated_v1
def testSmallPad1x1(self):
block_size = 2
pad_beg = 1
pad_end = 1
self._compare(1, 2, 3, 5, block_size, pad_beg, pad_end)
class SpaceToBatchGradientCppTest(SpaceToBatchGradientTest, CppOpImpl):
pass
class SpaceToBatchNDGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
with self.cached_session():
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.space_to_batch_nd(tf_x, block_shape, paddings)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
def _compare(self, input_shape, block_shape, paddings):
x = np.random.normal(
0, 1, np.prod(input_shape)).astype(np.float32).reshape(input_shape)
self._checkGrad(x, block_shape, paddings)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
self._compare([1, 4, 6, 5], [2, 2], [[0, 0], [0, 0]])
@test_util.run_deprecated_v1
def testSmall2(self):
self._compare([2, 8, 6, 2], [2, 2], [[0, 0], [0, 0]])
@test_util.run_deprecated_v1
def testSmallPad1(self):
self._compare([2, 4, 6, 2], [2, 2], [[1, 1], [1, 1]])
@test_util.run_deprecated_v1
def testSmallPadThreeBlockDims(self):
self._compare([2, 2, 4, 3, 2], [2, 2, 2], [[1, 1], [1, 1], [1, 0]])
class RequiredSpaceToBatchPaddingsTest(test.TestCase):
def _checkProperties(self, input_shape, block_shape, base_paddings, paddings,
crops):
"""Checks that `paddings` and `crops` satisfy invariants."""
num_block_dims = len(block_shape)
self.assertEqual(len(input_shape), num_block_dims)
if base_paddings is None:
base_paddings = np.zeros((num_block_dims, 2), np.int32)
self.assertEqual(base_paddings.shape, (num_block_dims, 2))
self.assertEqual(paddings.shape, (num_block_dims, 2))
self.assertEqual(crops.shape, (num_block_dims, 2))
for i in range(num_block_dims):
self.assertEqual(paddings[i, 0], base_paddings[i, 0])
self.assertLessEqual(0, paddings[i, 1] - base_paddings[i, 1])
self.assertLess(paddings[i, 1] - base_paddings[i, 1], block_shape[i])
self.assertEqual(
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i],
0)
self.assertEqual(crops[i, 0], 0)
self.assertEqual(crops[i, 1], paddings[i, 1] - base_paddings[i, 1])
def _test(self, input_shape, block_shape, base_paddings):
input_shape = np.array(input_shape)
block_shape = np.array(block_shape)
if base_paddings is not None:
base_paddings = np.array(base_paddings)
# Check with constants.
paddings, crops = array_ops.required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings)
paddings_const = tensor_util.constant_value(paddings)
crops_const = tensor_util.constant_value(crops)
self.assertIsNotNone(paddings_const)
self.assertIsNotNone(crops_const)
self._checkProperties(input_shape, block_shape, base_paddings,
paddings_const, crops_const)
# Check with non-constants.
assignments = {}
input_shape_placeholder = array_ops.placeholder(dtypes.int32)
assignments[input_shape_placeholder] = input_shape
block_shape_placeholder = array_ops.placeholder(dtypes.int32,
[len(block_shape)])
assignments[block_shape_placeholder] = block_shape
if base_paddings is not None:
base_paddings_placeholder = array_ops.placeholder(dtypes.int32,
[len(block_shape), 2])
assignments[base_paddings_placeholder] = base_paddings
else:
base_paddings_placeholder = None
t_paddings, t_crops = array_ops.required_space_to_batch_paddings(
input_shape_placeholder, block_shape_placeholder,
base_paddings_placeholder)
with self.cached_session():
paddings_result = t_paddings.eval(assignments)
crops_result = t_crops.eval(assignments)
self.assertAllEqual(paddings_result, paddings_const)
self.assertAllEqual(crops_result, crops_const)
@test_util.run_deprecated_v1
def testSimple(self):
self._test(
input_shape=np.zeros((0,), np.int32),
block_shape=np.zeros((0,), np.int32),
base_paddings=None)
self._test(
input_shape=np.zeros((0,), np.int32),
block_shape=np.zeros((0,), np.int32),
base_paddings=np.zeros((0, 2), np.int32))
self._test(input_shape=[1], block_shape=[2], base_paddings=None)
self._test(input_shape=[1], block_shape=[2], base_paddings=[[1, 0]])
self._test(input_shape=[3], block_shape=[1], base_paddings=[[1, 2]])
self._test(input_shape=[1], block_shape=[2], base_paddings=[[2, 3]])
self._test(input_shape=[4, 5], block_shape=[3, 2], base_paddings=None)
self._test(
input_shape=[4, 5], block_shape=[3, 2], base_paddings=[[0, 0], [0, 1]])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/spacetobatch_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.control_flow_util_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.platform import test
class ControlFlowUtilV2Test(test.TestCase):
def setUp(self):
self._enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
def tearDown(self):
control_flow_util.ENABLE_CONTROL_FLOW_V2 = self._enable_control_flow_v2_old
def _create_control_flow(self, expect_in_defun):
"""Helper method for testInDefun."""
def body(i):
def branch():
self.assertEqual(control_flow_util_v2.in_defun(), expect_in_defun)
return i + 1
return control_flow_ops.cond(constant_op.constant(True),
branch, lambda: 0)
return control_flow_ops.while_loop(lambda i: i < 4, body,
[constant_op.constant(0)])
@test_util.run_in_graph_and_eager_modes
def testInDefun(self):
self._create_control_flow(False)
@function.defun
def defun():
self._create_control_flow(True)
defun()
self.assertFalse(control_flow_util_v2.in_defun())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/control_flow_util_v2_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for local response normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class LRNOpTest(test.TestCase):
def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0,
beta=0.5):
"""Compute expected result."""
output = copy.deepcopy(input_image)
batch_size = input_image.shape[0]
rows = input_image.shape[1]
cols = input_image.shape[2]
depth = input_image.shape[3]
for b in range(batch_size):
for r in range(rows):
for c in range(cols):
for d in range(depth):
begin = max(0, d - lrn_depth_radius)
end = min(depth, d + lrn_depth_radius + 1)
patch = input_image[b, r, c, begin:end]
output[b, r, c, d] /= (
np.power(bias + alpha * np.sum(patch * patch), beta))
return output
def _RunAndVerify(self, dtype):
with self.cached_session(use_gpu=True):
# random shape
shape = np.random.randint(1, 16, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
p = array_ops.placeholder(dtype, shape=shape)
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 2.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 2.0 * np.random.rand()
lrn_t = nn.local_response_normalization(
p,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
params = {p: np.random.rand(*shape).astype("f")}
result = lrn_t.eval(feed_dict=params)
expected = self._LRN(
params[p],
lrn_depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = np.amax(np.abs(result - expected))
print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ",
err)
if dtype == dtypes.float32:
self.assertTrue(err < 1e-4)
else:
self.assertTrue(err < 1e-2)
self.assertShapeEqual(expected, lrn_t)
@test_util.run_deprecated_v1
def testCompute(self):
for _ in range(2):
self._RunAndVerify(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
if not test.is_gpu_available():
self._RunAndVerify(dtypes.float16)
@test_util.run_deprecated_v1
def testGradientsZeroInput(self):
with self.session(use_gpu=True):
shape = [4, 4, 4, 4]
p = array_ops.placeholder(dtypes.float32, shape=shape)
inp_array = np.zeros(shape).astype("f")
lrn_op = nn.local_response_normalization(p, 2, 1.0, 0.0, 1.0, name="lrn")
grad = gradients_impl.gradients([lrn_op], [p])[0]
params = {p: inp_array}
r = grad.eval(feed_dict=params)
expected = np.ones(shape).astype("f")
self.assertAllClose(r, expected)
self.assertShapeEqual(expected, grad)
def _RunAndVerifyGradients(self, dtype):
with self.cached_session(use_gpu=True):
# random shape
shape = np.random.randint(1, 5, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 1.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 1.0 * np.random.rand()
if dtype == dtypes.float32:
inp_array = np.random.rand(*shape).astype(np.float32)
else:
inp_array = np.random.rand(*shape).astype(np.float16)
inp = constant_op.constant(
list(inp_array.ravel(order="C")), shape=shape, dtype=dtype)
lrn_op = nn.local_response_normalization(
inp,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = gradient_checker.compute_gradient_error(inp, shape, lrn_op, shape)
print("LRN Gradient error for bias ", bias, "alpha ", alpha, " beta ", beta,
" is ", err)
if dtype == dtypes.float32:
self.assertLess(err, 1e-4)
else:
self.assertLess(err, 1.0)
@test_util.run_deprecated_v1
def testGradients(self):
for _ in range(2):
self._RunAndVerifyGradients(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
if not test.is_gpu_available():
self._RunAndVerifyGradients(dtypes.float16)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/lrn_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for BiasAdd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class BiasAddTest(test.TestCase):
def _npBias(self, inputs, bias):
assert len(bias.shape) == 1
assert inputs.shape[-1] == bias.shape[0]
return inputs + bias.reshape(([1] * (len(inputs.shape) - 1)) +
[bias.shape[0]])
def testNpBias(self):
self.assertAllClose(
np.array([[11, 22, 33], [41, 52, 63]]),
self._npBias(
np.array([[10, 20, 30], [40, 50, 60]]), np.array([1, 2, 3])))
def _testBias(self, np_inputs, np_bias, use_gpu=False):
np_val = self._npBias(np_inputs, np_bias)
with self.cached_session(use_gpu=use_gpu):
tf_val = nn_ops.bias_add(np_inputs, np_bias).eval()
self.assertAllCloseAccordingToType(np_val, tf_val)
def _AtLeast3d(self, np_value):
# fill the input value to at least 3-dimension
if np_value.ndim < 3:
return np.reshape(np_value, (1,) * (3 - np_value.ndim) + np_value.shape)
return np_value
def _NHWCToNCHW(self, np_value):
# fill the input value to at least 3-dimension
np_value = self._AtLeast3d(np_value)
# move the last dimension to second
np_dim = list(range(np_value.ndim))
np_dim_new = list(np_dim[0:1]) + list(np_dim[-1:]) + list(np_dim[1:-1])
return np.transpose(np_value, np_dim_new)
def _NCHWToNHWC(self, np_value):
assert len(np_value.shape) >= 3
np_dim = list(range(np_value.ndim))
# move the second dimension to the last
np_dim_new = list(np_dim[0:1]) + list(np_dim[2:]) + list(np_dim[1:2])
return np.transpose(np_value, np_dim_new)
def _testBiasNCHW(self, np_inputs, np_bias, use_gpu):
np_val = self._npBias(np_inputs, np_bias)
np_inputs = self._NHWCToNCHW(np_inputs)
with self.cached_session(use_gpu=use_gpu):
tf_val = nn_ops.bias_add(np_inputs, np_bias, data_format="NCHW").eval()
tf_val = self._NCHWToNHWC(tf_val)
self.assertAllCloseAccordingToType(self._AtLeast3d(np_val), tf_val)
def _testAll(self, np_inputs, np_bias):
self._testBias(np_inputs, np_bias, use_gpu=False)
self._testBiasNCHW(np_inputs, np_bias, use_gpu=False)
if np_inputs.dtype in [np.float16, np.float32, np.float64]:
self._testBias(np_inputs, np_bias, use_gpu=True)
self._testBiasNCHW(np_inputs, np_bias, use_gpu=True)
@test_util.run_deprecated_v1
def testInputDims(self):
with self.assertRaises(ValueError):
nn_ops.bias_add([1, 2], [1])
@test_util.run_deprecated_v1
def testBiasVec(self):
with self.assertRaises(ValueError):
nn_ops.bias_add(
array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2], shape=[1, 2]))
@test_util.run_deprecated_v1
def testBiasInputsMatch(self):
with self.assertRaises(ValueError):
nn_ops.bias_add(
array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1], shape=[1]))
@test_util.run_deprecated_v1
def testIntTypes(self):
for t in [np.int8, np.int16, np.int32, np.int64]:
self._testAll(
np.array([[10, 20, 30], [40, 50, 60]]).astype(t),
np.array([1, 2, 3]).astype(t))
@test_util.run_deprecated_v1
def testFloatTypes(self):
for t in [np.float16, np.float32, np.float64]:
self._testAll(
np.random.rand(4, 3, 3).astype(t), np.random.rand(3).astype(t))
@test_util.run_deprecated_v1
def test4DFloatTypes(self):
for t in [np.float16, np.float32, np.float64]:
self._testAll(
np.random.rand(4, 3, 2, 3).astype(t),
np.random.rand(3).astype(t))
self._testAll(
np.random.rand(2048, 4, 4, 4).astype(t),
np.random.rand(4).astype(t))
self._testAll(
np.random.rand(4, 4, 4, 2048).astype(t),
np.random.rand(2048).astype(t))
@test_util.run_deprecated_v1
def test5DFloatTypes(self):
for t in [np.float16, np.float32, np.float64]:
self._testAll(
np.random.rand(4, 3, 2, 3, 4).astype(t),
np.random.rand(4).astype(t))
def _testGradient(self, np_input, bias, dtype, data_format, use_gpu):
with self.cached_session(use_gpu=use_gpu):
if data_format == "NCHW":
np_input = self._NHWCToNCHW(np_input)
input_tensor = constant_op.constant(
np_input, shape=np_input.shape, dtype=dtype)
bias_tensor = constant_op.constant(bias, shape=bias.shape, dtype=dtype)
output_tensor = nn_ops.bias_add(
input_tensor, bias_tensor, data_format=data_format)
tensor_jacob_t, tensor_jacob_n = gradient_checker.compute_gradient(
input_tensor, np_input.shape, output_tensor, np_input.shape)
bias_jacob_t, bias_jacob_n = gradient_checker.compute_gradient(
bias_tensor, bias.shape, output_tensor, np_input.shape)
# Test gradient of BiasAddGrad
bias_add_grad = gradients_impl.gradients(
nn_ops.l2_loss(output_tensor), bias_tensor)[0]
grad_jacob_t, grad_jacob_n = gradient_checker.compute_gradient(
output_tensor, np_input.shape, bias_add_grad, bias.shape)
if dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
input_tensor = constant_op.constant(
np_input, shape=np_input.shape, dtype=np.float32)
bias_tensor = constant_op.constant(
bias, shape=bias.shape, dtype=np.float32)
output_tensor = nn_ops.bias_add(
input_tensor, bias_tensor, data_format=data_format)
_, tensor_jacob_n = gradient_checker.compute_gradient(input_tensor,
np_input.shape,
output_tensor,
np_input.shape)
_, bias_jacob_n = gradient_checker.compute_gradient(bias_tensor,
bias.shape,
output_tensor,
np_input.shape)
bias_add_grad = gradients_impl.gradients(
nn_ops.l2_loss(output_tensor), bias_tensor)[0]
_, grad_jacob_n = gradient_checker.compute_gradient(output_tensor,
np_input.shape,
bias_add_grad,
bias.shape)
threshold = 5e-3
if dtype == dtypes.float64:
threshold = 1e-10
self.assertAllClose(tensor_jacob_t, tensor_jacob_n, threshold, threshold)
self.assertAllClose(bias_jacob_t, bias_jacob_n, threshold, threshold)
self.assertAllClose(grad_jacob_t, grad_jacob_n, threshold, threshold)
@test_util.run_deprecated_v1
def testGradientTensor2D(self):
for (data_format, use_gpu) in ("NHWC", False), ("NHWC", True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
np_input = np.array(
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
dtype=dtype.as_numpy_dtype).reshape(3, 2)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
@test_util.run_deprecated_v1
def testGradientTensor3D(self):
for (data_format, use_gpu) in [("NHWC", False), ("NHWC", True),
("NCHW", False), ("NCHW", True)]:
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
np_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
dtype=dtype.as_numpy_dtype).reshape(1, 3, 2)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
@test_util.run_deprecated_v1
def testGradientTensor4D(self):
for (data_format, use_gpu) in [("NHWC", False)]:
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
np_input = np.arange(
1.0, 49.0, dtype=dtype.as_numpy_dtype).reshape(
[2, 3, 4, 2]).astype(np.float32)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
np_input = np.arange(
1.0, 513.0, dtype=dtype.as_numpy_dtype).reshape(
[64, 2, 2, 2]).astype(np.float32)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
np_input = np.arange(
1.0, 513.0, dtype=dtype.as_numpy_dtype).reshape(
[2, 2, 2, 64]).astype(np.float32)
self._testGradient(np_input,
np.random.rand(64).astype(dtype.as_numpy_dtype),
dtype, data_format, use_gpu)
@test_util.run_deprecated_v1
def testGradientTensor5D(self):
for (data_format, use_gpu) in [("NHWC", False), ("NHWC", True),
("NCHW", False), ("NCHW", True)]:
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
np_input = np.arange(
1.0, 49.0, dtype=dtype.as_numpy_dtype).reshape(
[1, 2, 3, 4, 2]).astype(np.float32)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
@test_util.run_deprecated_v1
def testEmpty(self):
np.random.seed(7)
for shape in (0, 0), (2, 0), (0, 2), (4, 3, 0), (4, 0, 3), (0, 4, 3):
self._testAll(np.random.randn(*shape), np.random.randn(shape[-1]))
@test_util.run_deprecated_v1
def testEmptyGradient(self):
for (data_format, use_gpu) in ("NHWC", False), ("NHWC", True):
for shape in (0, 0), (2, 0), (0, 2):
self._testGradient(
np.random.randn(*shape), np.random.randn(shape[-1]), dtypes.float64,
data_format, use_gpu)
for (data_format, use_gpu) in [("NHWC", False), ("NHWC", True),
("NCHW", False), ("NCHW", True)]:
for shape in (4, 3, 0), (4, 0, 3), (0, 4, 3):
self._testGradient(
np.random.randn(*shape),
np.random.randn(shape[-1]), dtypes.float64, data_format, use_gpu)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/bias_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for as_string_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class AsStringOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testFloat(self):
float_inputs_ = [
0, 1, -1, 0.5, 0.25, 0.125, float("INF"), float("NAN"), float("-INF")
]
with self.cached_session():
for dtype in (dtypes.float32, dtypes.float64):
input_ = array_ops.placeholder(dtype)
output = string_ops.as_string(input_, shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
s = lambda strs: [x.decode("ascii") for x in strs]
self.assertAllEqual(s(result), ["%g" % x for x in float_inputs_])
output = string_ops.as_string(input_, scientific=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%e" % x for x in float_inputs_])
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%f" % x for x in float_inputs_])
output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%3f" % x for x in float_inputs_])
output = string_ops.as_string(input_, width=3, fill="0")
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03f" % x for x in float_inputs_])
output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03g" % x for x in float_inputs_])
output = string_ops.as_string(input_, precision=10, width=3)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03.10f" % x for x in float_inputs_])
output = string_ops.as_string(
input_, precision=10, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03.10g" % x for x in float_inputs_])
with self.assertRaisesOpError("Cannot select both"):
output = string_ops.as_string(input_, scientific=True, shortest=True)
output.eval(feed_dict={input_: float_inputs_})
with self.assertRaisesOpError("Fill string must be one or fewer"):
output = string_ops.as_string(input_, fill="ab")
output.eval(feed_dict={input_: float_inputs_})
@test_util.run_deprecated_v1
def testInt(self):
# Cannot use values outside -128..127 for test, because we're also
# testing int8
int_inputs_ = [0, -1, 1, -128, 127, -101, 101, -0]
s = lambda strs: [x.decode("ascii") for x in strs]
with self.cached_session():
for dtype in (dtypes.int32, dtypes.int64, dtypes.int8):
input_ = array_ops.placeholder(dtype)
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%3d" % x for x in int_inputs_])
output = string_ops.as_string(input_, width=3, fill="0")
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%03d" % x for x in int_inputs_])
with self.assertRaisesOpError("scientific and shortest"):
output = string_ops.as_string(input_, scientific=True)
output.eval(feed_dict={input_: int_inputs_})
with self.assertRaisesOpError("scientific and shortest"):
output = string_ops.as_string(input_, shortest=True)
output.eval(feed_dict={input_: int_inputs_})
with self.assertRaisesOpError("precision not supported"):
output = string_ops.as_string(input_, precision=0)
output.eval(feed_dict={input_: int_inputs_})
@test_util.run_deprecated_v1
def testLargeInt(self):
# Cannot use values outside -128..127 for test, because we're also
# testing int8
s = lambda strs: [x.decode("ascii") for x in strs]
with self.cached_session():
input_ = array_ops.placeholder(dtypes.int32)
int_inputs_ = [np.iinfo(np.int32).min, np.iinfo(np.int32).max]
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
input_ = array_ops.placeholder(dtypes.int64)
int_inputs_ = [np.iinfo(np.int64).min, np.iinfo(np.int64).max]
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
@test_util.run_deprecated_v1
def testHalfInt(self):
s = lambda strs: [x.decode("ascii") for x in strs]
with self.cached_session():
input_ = array_ops.placeholder(dtypes.int16)
int_inputs_ = [np.iinfo(np.int16).min, np.iinfo(np.int16).max]
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
@test_util.run_deprecated_v1
def testBool(self):
bool_inputs_ = [False, True]
s = lambda strs: [x.decode("ascii") for x in strs]
with self.cached_session():
for dtype in (dtypes.bool,):
input_ = array_ops.placeholder(dtype)
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: bool_inputs_})
self.assertAllEqual(s(result), ["false", "true"])
@test_util.run_deprecated_v1
def testComplex(self):
float_inputs_ = [
0, 1, -1, 0.5, 0.25, 0.125, complex("INF"), complex("NAN"),
complex("-INF")
]
complex_inputs_ = [(x + (x + 1) * 1j) for x in float_inputs_]
with self.cached_session():
for dtype in (dtypes.complex64, dtypes.complex128):
input_ = array_ops.placeholder(dtype)
def clean_nans(s_l):
return [s.decode("ascii").replace("-nan", "nan") for s in s_l]
output = string_ops.as_string(input_, shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%g,%g)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, scientific=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%e,%e)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%f,%f)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03f,%03f)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03g,%03g)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, precision=10, width=3)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03.10f,%03.10f)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(
input_, precision=10, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03.10g,%03.10g)" % (x.real, x.imag) for x in complex_inputs_])
with self.assertRaisesOpError("Cannot select both"):
output = string_ops.as_string(input_, scientific=True, shortest=True)
output.eval(feed_dict={input_: complex_inputs_})
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/as_string_op_test.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.python.framework import test_util
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class IoOpsTest(test.TestCase):
@test_util.run_deprecated_v1
def testReadFile(self):
cases = ['', 'Some contents', 'Неки садржаји на српском']
for contents in cases:
contents = compat.as_bytes(contents)
with tempfile.NamedTemporaryFile(
prefix='ReadFileTest', dir=self.get_temp_dir(), delete=False) as temp:
temp.write(contents)
with self.cached_session():
read = io_ops.read_file(temp.name)
self.assertEqual([], read.get_shape())
self.assertEqual(read.eval(), contents)
os.remove(temp.name)
def testWriteFile(self):
cases = ['', 'Some contents']
for contents in cases:
contents = compat.as_bytes(contents)
with tempfile.NamedTemporaryFile(
prefix='WriteFileTest', dir=self.get_temp_dir(),
delete=False) as temp:
pass
with self.cached_session() as sess:
w = io_ops.write_file(temp.name, contents)
self.evaluate(w)
with open(temp.name, 'rb') as f:
file_contents = f.read()
self.assertEqual(file_contents, contents)
os.remove(temp.name)
def testWriteFileCreateDir(self):
cases = ['', 'Some contents']
for contents in cases:
contents = compat.as_bytes(contents)
subdir = os.path.join(self.get_temp_dir(), 'subdir1')
filepath = os.path.join(subdir, 'subdir2', 'filename')
with self.cached_session() as sess:
w = io_ops.write_file(filepath, contents)
self.evaluate(w)
with open(filepath, 'rb') as f:
file_contents = f.read()
self.assertEqual(file_contents, contents)
shutil.rmtree(subdir)
def _subset(self, files, indices):
return set(
compat.as_bytes(files[i].name) for i in range(len(files))
if i in indices)
@test_util.run_deprecated_v1
def testMatchingFiles(self):
cases = [
'ABcDEF.GH', 'ABzDEF.GH', 'ABasdfjklDEF.GH', 'AB3DEF.GH', 'AB4DEF.GH',
'ABDEF.GH', 'XYZ'
]
files = [
tempfile.NamedTemporaryFile(
prefix=c, dir=self.get_temp_dir(), delete=True) for c in cases
]
with self.cached_session():
# Test exact match without wildcards.
for f in files:
self.assertEqual(
io_ops.matching_files(f.name).eval(), compat.as_bytes(f.name))
# We will look for files matching "ABxDEF.GH*" where "x" is some wildcard.
directory_path = files[0].name[:files[0].name.find(cases[0])]
pattern = directory_path + 'AB%sDEF.GH*'
self.assertEqual(
set(io_ops.matching_files(pattern % 'z').eval()),
self._subset(files, [1]))
self.assertEqual(
set(io_ops.matching_files(pattern % '?').eval()),
self._subset(files, [0, 1, 3, 4]))
self.assertEqual(
set(io_ops.matching_files(pattern % '*').eval()),
self._subset(files, [0, 1, 2, 3, 4, 5]))
# NOTE(mrry): Windows uses PathMatchSpec to match file patterns, which
# does not support the following expressions.
if os.name != 'nt':
self.assertEqual(
set(io_ops.matching_files(pattern % '[cxz]').eval()),
self._subset(files, [0, 1]))
self.assertEqual(
set(io_ops.matching_files(pattern % '[0-9]').eval()),
self._subset(files, [3, 4]))
# Test an empty list input.
self.assertItemsEqual(io_ops.matching_files([]).eval(), [])
# Test multiple exact filenames.
self.assertItemsEqual(
io_ops.matching_files([
files[0].name, files[1].name, files[2].name]).eval(),
self._subset(files, [0, 1, 2]))
# Test multiple globs.
self.assertItemsEqual(
io_ops.matching_files([
pattern % '?', directory_path + 'X?Z*']).eval(),
self._subset(files, [0, 1, 3, 4, 6]))
for f in files:
f.close()
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/io_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom user ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.framework import load_library
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class DuplicateOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
library_filename = os.path.join(resource_loader.get_data_files_path(),
'duplicate_op.so')
duplicate = load_library.load_op_library(library_filename)
self.assertEqual(len(duplicate.OP_LIST.op), 0)
with self.cached_session():
self.assertEqual(math_ops.add(1, 41).eval(), 42)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/duplicate_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.MatrixDeterminant."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class DeterminantOpTest(test.TestCase):
def _compareDeterminantBase(self, matrix_x, tf_ans):
out = self.evaluate(tf_ans)
shape = matrix_x.shape
if shape[-1] == 0 and shape[-2] == 0:
np_ans = np.ones(shape[:-2]).astype(matrix_x.dtype)
else:
np_ans = np.array(np.linalg.det(matrix_x)).astype(matrix_x.dtype)
self.assertShapeEqual(np_ans, tf_ans)
self.assertAllClose(np_ans, out, atol=5e-5)
def _compareLogDeterminantBase(self, matrix_x, tf_ans):
sign_tf, abs_log_det_tf = tf_ans
shape = matrix_x.shape
if shape[-1] == 0 or shape[-2] == 0:
np_sign, np_ans = (1.0, np.zeros(shape[:-2]).astype(matrix_x.dtype))
else:
np_sign, np_ans = np.linalg.slogdet(matrix_x)
np_ans = np_ans.astype(matrix_x.dtype)
self.assertShapeEqual(np_ans, abs_log_det_tf)
sign_tf_val = self.evaluate(sign_tf)
abs_log_det_tf_val = self.evaluate(abs_log_det_tf)
self.assertAllClose(
sign_tf_val * np.exp(abs_log_det_tf_val),
np_sign * np.exp(np_ans),
atol=5e-5)
def _compareDeterminant(self, matrix_x):
with test_util.use_gpu():
self._compareDeterminantBase(matrix_x,
linalg_ops.matrix_determinant(matrix_x))
self._compareLogDeterminantBase(
matrix_x, gen_linalg_ops.log_matrix_determinant(matrix_x))
def testBasic(self):
# 2x2 matrices
self._compareDeterminant(np.array([[2., 3.], [3., 4.]]).astype(np.float32))
self._compareDeterminant(np.array([[0., 0.], [0., 0.]]).astype(np.float32))
# 5x5 matrices (Eigen forces LU decomposition)
self._compareDeterminant(
np.array([[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [
2., 5., 8., 3., 8.
], [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float32))
# A multidimensional batch of 2x2 matrices
self._compareDeterminant(np.random.rand(3, 4, 5, 2, 2).astype(np.float32))
def testBasicDouble(self):
# 2x2 matrices
self._compareDeterminant(np.array([[2., 3.], [3., 4.]]).astype(np.float64))
self._compareDeterminant(np.array([[0., 0.], [0., 0.]]).astype(np.float64))
# 5x5 matrices (Eigen forces LU decomposition)
self._compareDeterminant(
np.array([[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [
2., 5., 8., 3., 8.
], [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float64))
# A multidimensional batch of 2x2 matrices
self._compareDeterminant(np.random.rand(3, 4, 5, 2, 2).astype(np.float64))
def testBasicComplex64(self):
# 2x2 matrices
self._compareDeterminant(
np.array([[2., 3.], [3., 4.]]).astype(np.complex64))
self._compareDeterminant(
np.array([[0., 0.], [0., 0.]]).astype(np.complex64))
self._compareDeterminant(
np.array([[1. + 1.j, 1. - 1.j], [-1. + 1.j, -1. - 1.j]]).astype(
np.complex64))
# 5x5 matrices (Eigen forces LU decomposition)
self._compareDeterminant(
np.array([[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [
2., 5., 8., 3., 8.
], [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.complex64))
# A multidimensional batch of 2x2 matrices
self._compareDeterminant(np.random.rand(3, 4, 5, 2, 2).astype(np.complex64))
def testBasicComplex128(self):
# 2x2 matrices
self._compareDeterminant(
np.array([[2., 3.], [3., 4.]]).astype(np.complex128))
self._compareDeterminant(
np.array([[0., 0.], [0., 0.]]).astype(np.complex128))
self._compareDeterminant(
np.array([[1. + 1.j, 1. - 1.j], [-1. + 1.j, -1. - 1.j]]).astype(
np.complex128))
# 5x5 matrices (Eigen forces LU decomposition)
self._compareDeterminant(
np.array([[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [
2., 5., 8., 3., 8.
], [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.complex128))
# A multidimensional batch of 2x2 matrices
self._compareDeterminant(
np.random.rand(3, 4, 5, 2, 2).astype(np.complex128))
def testInfiniteDeterminant(self):
max_double = np.finfo("d").max
huge_matrix = np.array([[max_double, 0.0], [0.0, max_double]])
self._compareDeterminant(huge_matrix)
@test_util.run_v1_only("b/120545219")
def testNonSquareMatrix(self):
# When the determinant of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
linalg_ops.matrix_determinant(
np.array([[1., 2., 3.], [3., 5., 4.]]).astype(np.float32))
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self):
# The input to the determinant should be a 2-dimensional tensor.
tensor1 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.matrix_determinant(tensor1)
def testEmpty(self):
self._compareDeterminant(np.empty([0, 2, 2]))
self._compareDeterminant(np.empty([2, 0, 0]))
@test_util.run_v1_only("b/120545219")
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
det1 = linalg_ops.matrix_determinant(matrix1)
det2 = linalg_ops.matrix_determinant(matrix2)
det1_val, det2_val = self.evaluate([det1, det2])
self.assertEqual(det1_val, det2_val)
class MatrixDeterminantBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))
def benchmarkMatrixDeterminantOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), session.Session(
config=benchmark.benchmark_config()) as sess, ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
d = linalg_ops.matrix_determinant(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
d,),
min_iters=25,
name="matrix_determinant_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), session.Session(
config=benchmark.benchmark_config()) as sess, ops.device("/gpu:0"):
matrix = self._GenerateMatrix(shape)
d = linalg_ops.matrix_determinant(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
d,),
min_iters=25,
name="matrix_determinant_gpu_{shape}".format(shape=shape))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/determinant_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.edit_distance_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def ConstantOf(x):
x = np.asarray(x)
# Convert to int64 if it's not a string or unicode
if x.dtype.char not in "SU":
x = np.asarray(x, dtype=np.int64)
return constant_op.constant(x)
class EditDistanceTest(test.TestCase):
def _testEditDistanceST(self,
hypothesis_st,
truth_st,
normalize,
expected_output,
expected_shape,
expected_err_re=None):
edit_distance = array_ops.edit_distance(
hypothesis=hypothesis_st, truth=truth_st, normalize=normalize)
if expected_err_re is None:
self.assertEqual(edit_distance.get_shape(), expected_shape)
output = self.evaluate(edit_distance)
self.assertAllClose(output, expected_output)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(edit_distance)
def _testEditDistance(self,
hypothesis,
truth,
normalize,
expected_output,
expected_err_re=None):
# Shape inference figures out the shape from the shape variables
# Explicit tuple() needed since zip returns an iterator in Python 3.
expected_shape = [
max(h, t) for h, t in tuple(zip(hypothesis[2], truth[2]))[:-1]
]
# SparseTensorValue inputs.
with ops.Graph().as_default() as g, self.session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
hypothesis_st=sparse_tensor.SparseTensorValue(
*[ConstantOf(x) for x in hypothesis]),
truth_st=sparse_tensor.SparseTensorValue(
*[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
# SparseTensor inputs.
with ops.Graph().as_default() as g, self.session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
hypothesis_st=sparse_tensor.SparseTensor(
*[ConstantOf(x) for x in hypothesis]),
truth_st=sparse_tensor.SparseTensor(*[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
def testEditDistanceNormalized(self):
hypothesis_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
hypothesis_values = [0, 1, 1, -1]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [1, 0], [1, 1]]
truth_values = [0, 1, 1]
truth_shape = [2, 2]
expected_output = [1.0, 0.5]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceUnnormalized(self):
hypothesis_indices = [[0, 0], [1, 0], [1, 1]]
hypothesis_values = [10, 10, 11]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
truth_values = [1, 2, 1, -1]
truth_shape = [2, 3]
expected_output = [2.0, 2.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_output)
def testEditDistanceProperDistance(self):
# In this case, the values are individual characters stored in the
# SparseTensor (type DT_STRING)
hypothesis_indices = ([[0, i] for i, _ in enumerate("algorithm")] +
[[1, i] for i, _ in enumerate("altruistic")])
hypothesis_values = [x for x in "algorithm"] + [x for x in "altruistic"]
hypothesis_shape = [2, 11]
truth_indices = ([[0, i] for i, _ in enumerate("altruistic")] +
[[1, i] for i, _ in enumerate("algorithm")])
truth_values = [x for x in "altruistic"] + [x for x in "algorithm"]
truth_shape = [2, 11]
expected_unnormalized = [6.0, 6.0]
expected_normalized = [6.0 / len("altruistic"), 6.0 / len("algorithm")]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_unnormalized)
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_normalized)
def testEditDistance3D(self):
hypothesis_indices = [[0, 0, 0], [1, 0, 0]]
hypothesis_values = [0, 1]
hypothesis_shape = [2, 1, 1]
truth_indices = [[0, 1, 0], [1, 0, 0], [1, 1, 0]]
truth_values = [0, 1, 1]
truth_shape = [2, 2, 1]
expected_output = [
[np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.0, 1.0]
] # (1,0): match, (1,1): no hypothesis
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthHypothesis(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = [[0, 0]]
truth_values = [0]
truth_shape = [1, 1]
expected_output = [1.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthTruth(self):
hypothesis_indices = [[0, 0]]
hypothesis_values = [0]
hypothesis_shape = [1, 1]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [np.inf] # Normalized, loss is 1/0 = inf
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthHypothesisAndTruth(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [0] # Normalized is 0 because of exact match
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/edit_distance_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.check_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class AssertV2Asserts(test.TestCase):
def test_passes_when_it_should(self):
# This is a v2 test and need to run eagerly
with context.eager_mode():
c1 = constant_op.constant(-1, name="minus_one", dtype=dtypes.int32)
c2 = constant_op.constant(2, name="two", dtype=dtypes.int32)
c3 = constant_op.constant([3., 3.], name="three", dtype=dtypes.float32)
c4 = constant_op.constant([3., 3.5], name="three_and_a_half",
dtype=dtypes.float32)
scalar = c1
non_scalar = c3
integer = c1
non_integer = c3
positive = c2
negative = c1
cases = [
(check_ops.assert_equal_v2, (c1, c1), (c1, c2)),
(check_ops.assert_less_v2, (c1, c2), (c1, c1)),
(check_ops.assert_near_v2, (c3, c3), (c3, c4)),
(check_ops.assert_greater_v2, (c2, c1), (c1, c1)),
(check_ops.assert_negative_v2, (negative,), (positive,)),
(check_ops.assert_positive_v2, (positive,), (negative,)),
(check_ops.assert_less_equal_v2, (c1, c1), (c2, c1)),
(check_ops.assert_none_equal_v2, (c1, c2), (c3, c4)),
(check_ops.assert_non_negative_v2, (positive,), (negative,)),
(check_ops.assert_non_positive_v2, (negative,), (positive,)),
(check_ops.assert_greater_equal_v2, (c1, c1), (c1, c2)),
(check_ops.assert_type_v2, (c1, dtypes.int32), (c1, dtypes.float32),
TypeError),
(check_ops.assert_integer_v2, (integer,), (non_integer,),
TypeError),
(check_ops.assert_scalar_v2, (scalar,), (non_scalar,),
ValueError),
(check_ops.assert_rank_v2, (c1, 0), (c3, 2), ValueError),
(check_ops.assert_rank_in_v2, (c1, [0, 1]), (c1, [1, 2]),
ValueError),
(check_ops.assert_rank_at_least_v2, (non_scalar, 1), (scalar, 1),
ValueError),
]
for case in cases:
fn = case[0]
passing_args = case[1]
failing_args = case[2]
error = errors.InvalidArgumentError if len(case) < 4 else case[3]
print("Testing %s passing properly." % fn)
fn(*passing_args)
print("Testing %s failing properly." % fn)
@def_function.function
def failing_fn():
fn(*failing_args, message="fail") # pylint: disable=cell-var-from-loop
with self.assertRaisesRegexp(error, "fail"):
failing_fn()
del failing_fn
class AssertProperIterableTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_single_tensor_raises(self):
tensor = constant_op.constant(1)
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(tensor)
@test_util.run_in_graph_and_eager_modes
def test_single_sparse_tensor_raises(self):
ten = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(ten)
@test_util.run_in_graph_and_eager_modes
def test_single_ndarray_raises(self):
array = np.array([1, 2, 3])
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(array)
@test_util.run_in_graph_and_eager_modes
def test_single_string_raises(self):
mystr = "hello"
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(mystr)
@test_util.run_in_graph_and_eager_modes
def test_non_iterable_object_raises(self):
non_iterable = 1234
with self.assertRaisesRegexp(TypeError, "to be iterable"):
check_ops.assert_proper_iterable(non_iterable)
@test_util.run_in_graph_and_eager_modes
def test_list_does_not_raise(self):
list_of_stuff = [
constant_op.constant([11, 22]), constant_op.constant([1, 2])
]
check_ops.assert_proper_iterable(list_of_stuff)
@test_util.run_in_graph_and_eager_modes
def test_generator_does_not_raise(self):
generator_of_stuff = (constant_op.constant([11, 22]), constant_op.constant(
[1, 2]))
check_ops.assert_proper_iterable(generator_of_stuff)
class AssertEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies([check_ops.assert_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_scalar_comparison(self):
const_true = constant_op.constant(True, name="true")
const_false = constant_op.constant(False, name="false")
with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(const_true, const_false, message="fail")
def test_returns_none_with_eager(self):
with context.eager_mode():
small = constant_op.constant([1, 2], name="small")
x = check_ops.assert_equal(small, small)
assert x is None
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_greater(self):
# Static check
static_small = constant_op.constant([1, 2], name="small")
static_big = constant_op.constant([3, 4], name="big")
with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(static_big, static_small, message="fail")
@test_util.run_deprecated_v1
def test_raises_when_greater_dynamic(self):
with self.cached_session():
small = array_ops.placeholder(dtypes.int32, name="small")
big = array_ops.placeholder(dtypes.int32, name="big")
with ops.control_dependencies(
[check_ops.assert_equal(big, small, message="fail")]):
out = array_ops.identity(small)
with self.assertRaisesOpError("fail.*big.*small"):
out.eval(feed_dict={small: [1, 2], big: [3, 4]})
def test_error_message_eager(self):
expected_error_msg_full = r"""big does not equal small
Condition x == y did not hold.
Indices of first 3 different values:
\[\[0 0\]
\[1 1\]
\[2 0\]\]
Corresponding x values:
\[2 3 6\]
Corresponding y values:
\[20 30 60\]
First 6 elements of x:
\[2 2 3 3 6 6\]
First 6 elements of y:
\[20 2 3 30 60 6\]
"""
expected_error_msg_default = r"""big does not equal small
Condition x == y did not hold.
Indices of first 3 different values:
\[\[0 0\]
\[1 1\]
\[2 0\]\]
Corresponding x values:
\[2 3 6\]
Corresponding y values:
\[20 30 60\]
First 3 elements of x:
\[2 2 3\]
First 3 elements of y:
\[20 2 3\]
"""
expected_error_msg_short = r"""big does not equal small
Condition x == y did not hold.
Indices of first 2 different values:
\[\[0 0\]
\[1 1\]\]
Corresponding x values:
\[2 3\]
Corresponding y values:
\[20 30\]
First 2 elements of x:
\[2 2\]
First 2 elements of y:
\[20 2\]
"""
with context.eager_mode():
big = constant_op.constant([[2, 2], [3, 3], [6, 6]])
small = constant_op.constant([[20, 2], [3, 30], [60, 6]])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg_full):
check_ops.assert_equal(big, small, message="big does not equal small",
summarize=10)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg_default):
check_ops.assert_equal(big, small, message="big does not equal small")
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg_short):
check_ops.assert_equal(big, small, message="big does not equal small",
summarize=2)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_less(self):
# Static check
static_small = constant_op.constant([3, 1], name="small")
static_big = constant_op.constant([4, 2], name="big")
with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(static_big, static_small, message="fail")
@test_util.run_deprecated_v1
def test_raises_when_less_dynamic(self):
with self.cached_session():
small = array_ops.placeholder(dtypes.int32, name="small")
big = array_ops.placeholder(dtypes.int32, name="big")
with ops.control_dependencies([check_ops.assert_equal(small, big)]):
out = array_ops.identity(small)
with self.assertRaisesOpError("small.*big"):
out.eval(feed_dict={small: [3, 1], big: [4, 2]})
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal_and_broadcastable_shapes(self):
small = constant_op.constant([[1, 2], [1, 2]], name="small")
small_2 = constant_op.constant([1, 2], name="small_2")
with ops.control_dependencies([check_ops.assert_equal(small, small_2)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
small_2 = constant_op.constant([1, 1], name="small_2")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[3\] vs. \[2\]|"
r"Dimensions must be equal, but are 3 and 2")):
with ops.control_dependencies([check_ops.assert_equal(small, small_2)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_not_equal_and_broadcastable_shapes(self):
cond = constant_op.constant([True, False], name="small")
with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(cond, False, message="fail")
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
class AssertNoneEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_not_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([10, 20], name="small")
with ops.control_dependencies(
[check_ops.assert_none_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_equal(self):
small = constant_op.constant([3, 1], name="small")
with self.assertRaisesOpError("x != y did not hold"):
with ops.control_dependencies(
[check_ops.assert_none_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_not_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3], name="big")
with ops.control_dependencies(
[check_ops.assert_none_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_not_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
big = constant_op.constant([10, 10], name="big")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(ValueError, errors.InvalidArgumentError),
(r"Incompatible shapes: \[3\] vs. \[2\]|"
r"Dimensions must be equal, but are 3 and 2")):
with ops.control_dependencies(
[check_ops.assert_none_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies(
[check_ops.assert_none_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
def test_returns_none_with_eager(self):
with context.eager_mode():
t1 = constant_op.constant([1, 2])
t2 = constant_op.constant([3, 4])
x = check_ops.assert_none_equal(t1, t2)
assert x is None
def test_error_message_eager(self):
# Note that the following three strings are regexes
expected_error_msg_full = r"""0.0, 1.0, 2.0, 3.0, 4.0, 5.0"""
expected_error_msg_default = r"""0.0, 1.0, 2.0, \.\.\."""
expected_error_msg_short = r"""0.0, 1.0, \.\.\."""
with context.eager_mode():
t = constant_op.constant(
np.array(range(6)), shape=[2, 3], dtype=np.float32)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg_full):
check_ops.assert_none_equal(
t, t, message="This is the error message.", summarize=10)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg_full):
check_ops.assert_none_equal(
t, t, message="This is the error message.", summarize=-1)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg_default):
check_ops.assert_none_equal(t, t, message="This is the error message.")
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg_short):
check_ops.assert_none_equal(
t, t, message="This is the error message.", summarize=2)
class AssertAllCloseTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal(self):
x = constant_op.constant(1., name="x")
y = constant_op.constant(1., name="y")
with ops.control_dependencies(
[check_ops.assert_near(x, y, message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_32_bit_due_to_default_rtol(self):
eps = np.finfo(np.float32).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(1., name="x")
y = constant_op.constant(1. + 2 * eps, name="y", dtype=np.float32)
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_32_bit_due_to_default_atol(self):
eps = np.finfo(np.float32).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(0., name="x")
y = constant_op.constant(0. + 2 * eps, name="y", dtype=np.float32)
with ops.control_dependencies(
[check_ops.assert_near(x, y, rtol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_64_bit_due_to_default_rtol(self):
eps = np.finfo(np.float64).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(1., name="x", dtype=np.float64)
y = constant_op.constant(1. + 2 * eps, name="y", dtype=np.float64)
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_64_bit_due_to_default_atol(self):
eps = np.finfo(np.float64).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(0., name="x", dtype=np.float64)
y = constant_op.constant(0. + 2 * eps, name="y", dtype=np.float64)
with ops.control_dependencies(
[check_ops.assert_near(x, y, rtol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_due_to_custom_rtol(self):
x = constant_op.constant(1., name="x")
y = constant_op.constant(1.1, name="y")
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0., rtol=0.5,
message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_due_to_custom_atol(self):
x = constant_op.constant(0., name="x")
y = constant_op.constant(0.1, name="y", dtype=np.float32)
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0.5, rtol=0.,
message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_near(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_atol_violated(self):
x = constant_op.constant(10., name="x")
y = constant_op.constant(10.2, name="y")
with self.assertRaisesOpError("x and y not equal to tolerance"):
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0.1,
message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_default_rtol_violated(self):
x = constant_op.constant(0.1, name="x")
y = constant_op.constant(0.0, name="y")
with self.assertRaisesOpError("x and y not equal to tolerance"):
with ops.control_dependencies(
[check_ops.assert_near(x, y, message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
def test_returns_none_with_eager(self):
with context.eager_mode():
t1 = constant_op.constant([1., 2.])
t2 = constant_op.constant([1., 2.])
x = check_ops.assert_near(t1, t2)
assert x is None
class AssertLessTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with self.assertRaisesOpError("failure message.*\n*.* x < y did not hold"):
with ops.control_dependencies(
[check_ops.assert_less(
small, small, message="failure message")]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_greater(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("x < y did not hold"):
with ops.control_dependencies([check_ops.assert_less(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_less(self):
small = constant_op.constant([3, 1], name="small")
big = constant_op.constant([4, 2], name="big")
with ops.control_dependencies([check_ops.assert_less(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_less_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 2], name="big")
with ops.control_dependencies([check_ops.assert_less(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_less_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
big = constant_op.constant([3, 2], name="big")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(ValueError, errors.InvalidArgumentError),
(r"Incompatible shapes: \[3\] vs. \[2\]|"
"Dimensions must be equal, but are 3 and 2")):
with ops.control_dependencies([check_ops.assert_less(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_less(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
def test_returns_none_with_eager(self):
with context.eager_mode():
t1 = constant_op.constant([1, 2])
t2 = constant_op.constant([3, 4])
x = check_ops.assert_less(t1, t2)
assert x is None
class AssertLessEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies(
[check_ops.assert_less_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_greater(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_less_equal(
big, small, message="fail")]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_less_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 2], name="big")
with ops.control_dependencies([check_ops.assert_less_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_less_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 1], name="big")
with ops.control_dependencies([check_ops.assert_less_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([3, 1], name="small")
big = constant_op.constant([1, 1, 1], name="big")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[2\] vs. \[3\]|"
r"Dimensions must be equal, but are 2 and 3")):
with ops.control_dependencies(
[check_ops.assert_less_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies(
[check_ops.assert_less_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
class AssertGreaterTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_greater(
small, small, message="fail")]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_less(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("x > y did not hold"):
with ops.control_dependencies([check_ops.assert_greater(small, big)]):
out = array_ops.identity(big)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_greater(self):
small = constant_op.constant([3, 1], name="small")
big = constant_op.constant([4, 2], name="big")
with ops.control_dependencies([check_ops.assert_greater(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_greater_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 2], name="big")
with ops.control_dependencies([check_ops.assert_greater(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_greater_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
big = constant_op.constant([3, 2], name="big")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[2\] vs. \[3\]|"
r"Dimensions must be equal, but are 2 and 3")):
with ops.control_dependencies([check_ops.assert_greater(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_greater(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
class AssertGreaterEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies(
[check_ops.assert_greater_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_less(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_greater_equal(
small, big, message="fail")]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_greater_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 2], name="big")
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_greater_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 1], name="big")
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="big")
big = constant_op.constant([3, 1], name="small")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[2\] vs. \[3\]|"
r"Dimensions must be equal, but are 2 and 3")):
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies(
[check_ops.assert_greater_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
class AssertNegativeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_negative(self):
frank = constant_op.constant([-1, -2], name="frank")
with ops.control_dependencies([check_ops.assert_negative(frank)]):
out = array_ops.identity(frank)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_positive(self):
doug = constant_op.constant([1, 2], name="doug")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_negative(
doug, message="fail")]):
out = array_ops.identity(doug)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_zero(self):
claire = constant_op.constant([0], name="claire")
with self.assertRaisesOpError("x < 0 did not hold"):
with ops.control_dependencies([check_ops.assert_negative(claire)]):
out = array_ops.identity(claire)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_doesnt_raise(self):
# A tensor is negative when it satisfies:
# For every element x_i in x, x_i < 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_negative(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
class AssertPositiveTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_negative(self):
freddie = constant_op.constant([-1, -2], name="freddie")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_positive(
freddie, message="fail")]):
out = array_ops.identity(freddie)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_positive(self):
remmy = constant_op.constant([1, 2], name="remmy")
with ops.control_dependencies([check_ops.assert_positive(remmy)]):
out = array_ops.identity(remmy)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_zero(self):
meechum = constant_op.constant([0], name="meechum")
with self.assertRaisesOpError("x > 0 did not hold"):
with ops.control_dependencies([check_ops.assert_positive(meechum)]):
out = array_ops.identity(meechum)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_doesnt_raise(self):
# A tensor is positive when it satisfies:
# For every element x_i in x, x_i > 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_positive(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
class EnsureShapeTest(test.TestCase):
# Static shape inference
@test_util.run_deprecated_v1
def testStaticShape(self):
placeholder = array_ops.placeholder(dtypes.int32)
ensure_shape_op = check_ops.ensure_shape(placeholder, (3, 3, 3))
self.assertEqual(ensure_shape_op.get_shape(), (3, 3, 3))
@test_util.run_deprecated_v1
def testStaticShape_MergesShapes(self):
placeholder = array_ops.placeholder(dtypes.int32, shape=(None, None, 3))
ensure_shape_op = check_ops.ensure_shape(placeholder, (5, 4, None))
self.assertEqual(ensure_shape_op.get_shape(), (5, 4, 3))
@test_util.run_deprecated_v1
def testStaticShape_RaisesErrorWhenRankIncompatible(self):
placeholder = array_ops.placeholder(dtypes.int32, shape=(None, None, 3))
with self.assertRaises(ValueError):
check_ops.ensure_shape(placeholder, (2, 3))
@test_util.run_deprecated_v1
def testStaticShape_RaisesErrorWhenDimIncompatible(self):
placeholder = array_ops.placeholder(dtypes.int32, shape=(None, None, 3))
with self.assertRaises(ValueError):
check_ops.ensure_shape(placeholder, (2, 2, 4))
@test_util.run_deprecated_v1
def testStaticShape_CanSetUnknownShape(self):
placeholder = array_ops.placeholder(dtypes.int32)
derived = placeholder / 3
ensure_shape_op = check_ops.ensure_shape(derived, None)
self.assertEqual(ensure_shape_op.get_shape(), None)
# Dynamic shape check
@test_util.run_deprecated_v1
@test_util.disable_xla(
"b/123337890") # Dynamic shapes not supported now with XLA
def testEnsuresDynamicShape_RaisesError(self):
placeholder = array_ops.placeholder(dtypes.int32)
derived = math_ops.divide(placeholder, 3, name="MyDivide")
derived = check_ops.ensure_shape(derived, (3, 3, 3))
feed_val = [[1], [2]]
with self.cached_session() as sess:
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
r"Shape of tensor MyDivide \[2,1\] is not compatible with "
r"expected shape \[3,3,3\]."):
sess.run(derived, feed_dict={placeholder: feed_val})
@test_util.run_deprecated_v1
@test_util.disable_xla(
"b/123337890") # Dynamic shapes not supported now with XLA
def testEnsuresDynamicShape_RaisesErrorDimUnknown(self):
placeholder = array_ops.placeholder(dtypes.int32)
derived = placeholder / 3
derived = check_ops.ensure_shape(derived, (None, None, 3))
feed_val = [[1], [2]]
with self.cached_session() as sess:
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
r"Shape of tensor [A-Za-z_]* \[2,1\] is not compatible with "
r"expected shape \[\?,\?,3\]."):
sess.run(derived, feed_dict={placeholder: feed_val})
@test_util.run_deprecated_v1
def testEnsuresDynamicShape(self):
placeholder = array_ops.placeholder(dtypes.int32)
derived = placeholder / 3
derived = check_ops.ensure_shape(derived, (2, 1))
feed_val = [[1], [2]]
with self.cached_session() as sess:
sess.run(derived, feed_dict={placeholder: feed_val})
@test_util.run_deprecated_v1
def testEnsuresDynamicShape_WithUnknownDims(self):
placeholder = array_ops.placeholder(dtypes.int32)
derived = placeholder / 3
derived = check_ops.ensure_shape(derived, (None, None))
feed_val = [[1], [2]]
with self.cached_session() as sess:
sess.run(derived, feed_dict={placeholder: feed_val})
@test_util.run_deprecated_v1
def testGradient(self):
placeholder = array_ops.placeholder(dtypes.float32)
derived = check_ops.ensure_shape(placeholder, (None, None))
gradient = gradients.gradients(derived, placeholder)
feed_val = [[4.0], [-1.0]]
with self.cached_session() as sess:
gradient_values, = sess.run(gradient, feed_dict={placeholder: feed_val})
expected = [[1.0], [1.0]]
self.assertAllEqual(gradient_values, expected)
class EnsureShapeBenchmark(test.Benchmark):
def _grappler_all_off_config(self):
config = config_pb2.ConfigProto()
off = rewriter_config_pb2.RewriterConfig.OFF
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.disable_model_pruning = 1
config.graph_options.rewrite_options.constant_folding = off
config.graph_options.rewrite_options.layout_optimizer = off
config.graph_options.rewrite_options.arithmetic_optimization = off
config.graph_options.rewrite_options.dependency_optimization = off
return config
def _run(self, op, feed_dict=None, num_iters=5000, name=None, **kwargs):
config = self._grappler_all_off_config()
with session.Session(config=config) as sess:
deltas = []
# Warm up the session
for _ in range(5):
sess.run(op, feed_dict=feed_dict)
for _ in range(num_iters):
start = time.time()
sess.run(op, feed_dict=feed_dict)
end = time.time()
deltas.append(end - start)
mean_time = np.median(deltas)
mean_us = mean_time * 1e6
# mean_us = (end - start) * 1e6 / num_iters
self.report_benchmark(
name=name,
wall_time=mean_us,
extras=kwargs,
)
def benchmark_const_op(self):
# In this case, we expect that the overhead of a `session.run` call
# far outweighs the time taken to execute the op...
shape = (3, 3, 100)
input_op = random_ops.random_normal(shape)
self._run(array_ops.identity(input_op), name="SingleConstOp")
def benchmark_single_ensure_op(self):
# In this case, we expect that the overhead of a `session.run` call
# far outweighs the time taken to execute the op...
shape = (3, 3, 100)
input_op = random_ops.random_normal(shape)
ensure_shape_op = check_ops.ensure_shape(input_op, shape)
self._run(ensure_shape_op, name="SingleEnsureShapeOp")
def _apply_n_times(self, op, target, n=1000):
for _ in range(n):
target = op(target)
return target
def benchmark_n_ops(self):
shape = (1000,)
input_op = random_ops.random_normal(shape)
n_ops = self._apply_n_times(array_ops.identity, input_op)
self._run(n_ops, name="NIdentityOps_1000")
def benchmark_n_ensure_ops(self):
shape = (1000,)
input_op = random_ops.random_normal(shape)
n_ensure_ops = self._apply_n_times(
lambda x: check_ops.ensure_shape(array_ops.identity(x), shape),
input_op)
self._run(n_ensure_ops, name="NEnsureShapeAndIdentityOps_1000")
class AssertRankTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError,
"fail.*must have rank 1"):
with ops.control_dependencies(
[check_ops.assert_rank(
tensor, desired_rank, message="fail")]):
self.evaluate(array_ops.identity(tensor))
@test_util.run_deprecated_v1
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.cached_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank(
tensor, desired_rank, message="fail")]):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
@test_util.run_deprecated_v1
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.cached_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_raises_if_rank_too_large_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 0
with self.assertRaisesRegexp(ValueError, "rank"):
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
@test_util.run_deprecated_v1
def test_rank_one_tensor_raises_if_rank_too_large_dynamic_rank(self):
with self.cached_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
@test_util.run_deprecated_v1
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.cached_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "rank"):
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
@test_util.run_deprecated_v1
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.cached_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 2
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes
def test_raises_if_rank_is_not_scalar_static(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
check_ops.assert_rank(tensor, np.array([], dtype=np.int32))
@test_util.run_deprecated_v1
def test_raises_if_rank_is_not_scalar_dynamic(self):
with self.cached_session():
tensor = constant_op.constant(
[1, 2], dtype=dtypes.float32, name="my_tensor")
rank_tensor = array_ops.placeholder(dtypes.int32, name="rank_tensor")
with self.assertRaisesOpError("Rank must be a scalar"):
with ops.control_dependencies(
[check_ops.assert_rank(tensor, rank_tensor)]):
array_ops.identity(tensor).eval(feed_dict={rank_tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes
def test_raises_if_rank_is_not_integer_static(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
check_ops.assert_rank(tensor, .5)
@test_util.run_deprecated_v1
def test_raises_if_rank_is_not_integer_dynamic(self):
with self.cached_session():
tensor = constant_op.constant(
[1, 2], dtype=dtypes.float32, name="my_tensor")
rank_tensor = array_ops.placeholder(dtypes.float32, name="rank_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
with ops.control_dependencies(
[check_ops.assert_rank(tensor, rank_tensor)]):
array_ops.identity(tensor).eval(feed_dict={rank_tensor: .5})
class AssertRankInTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_raises_if_rank_mismatch_static_rank(self):
tensor_rank0 = constant_op.constant(42, name="my_tensor")
with self.assertRaisesRegexp(
ValueError, "fail.*must have rank.*in.*1.*2"):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, (1, 2), message="fail")]):
self.evaluate(array_ops.identity(tensor_rank0))
@test_util.run_deprecated_v1
def test_rank_zero_tensor_raises_if_rank_mismatch_dynamic_rank(self):
with self.cached_session():
tensor_rank0 = array_ops.placeholder(dtypes.float32, name="my_tensor")
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, (1, 2), message="fail")]):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
array_ops.identity(tensor_rank0).eval(feed_dict={tensor_rank0: 42.0})
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_doesnt_raise_if_rank_matches_static_rank(self):
tensor_rank0 = constant_op.constant(42, name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, desired_ranks)]):
self.evaluate(array_ops.identity(tensor_rank0))
@test_util.run_deprecated_v1
def test_rank_zero_tensor_doesnt_raise_if_rank_matches_dynamic_rank(self):
with self.cached_session():
tensor_rank0 = array_ops.placeholder(dtypes.float32, name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, desired_ranks)]):
array_ops.identity(tensor_rank0).eval(feed_dict={tensor_rank0: 42.0})
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_doesnt_raise_if_rank_matches_static_rank(self):
tensor_rank1 = constant_op.constant([42, 43], name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, desired_ranks)]):
self.evaluate(array_ops.identity(tensor_rank1))
@test_util.run_deprecated_v1
def test_rank_one_tensor_doesnt_raise_if_rank_matches_dynamic_rank(self):
with self.cached_session():
tensor_rank1 = array_ops.placeholder(dtypes.float32, name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, desired_ranks)]):
array_ops.identity(tensor_rank1).eval(feed_dict={
tensor_rank1: (42.0, 43.0)
})
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_raises_if_rank_mismatches_static_rank(self):
tensor_rank1 = constant_op.constant((42, 43), name="my_tensor")
with self.assertRaisesRegexp(ValueError, "rank"):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, (0, 2))]):
self.evaluate(array_ops.identity(tensor_rank1))
@test_util.run_deprecated_v1
def test_rank_one_tensor_raises_if_rank_mismatches_dynamic_rank(self):
with self.cached_session():
tensor_rank1 = array_ops.placeholder(dtypes.float32, name="my_tensor")
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, (0, 2))]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor_rank1).eval(feed_dict={
tensor_rank1: (42.0, 43.0)
})
@test_util.run_in_graph_and_eager_modes
def test_raises_if_rank_is_not_scalar_static(self):
tensor = constant_op.constant((42, 43), name="my_tensor")
desired_ranks = (
np.array(1, dtype=np.int32),
np.array((2, 1), dtype=np.int32))
with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
check_ops.assert_rank_in(tensor, desired_ranks)
@test_util.run_deprecated_v1
def test_raises_if_rank_is_not_scalar_dynamic(self):
with self.cached_session():
tensor = constant_op.constant(
(42, 43), dtype=dtypes.float32, name="my_tensor")
desired_ranks = (
array_ops.placeholder(dtypes.int32, name="rank0_tensor"),
array_ops.placeholder(dtypes.int32, name="rank1_tensor"))
with self.assertRaisesOpError("Rank must be a scalar"):
with ops.control_dependencies(
(check_ops.assert_rank_in(tensor, desired_ranks),)):
array_ops.identity(tensor).eval(feed_dict={
desired_ranks[0]: 1,
desired_ranks[1]: [2, 1],
})
@test_util.run_in_graph_and_eager_modes
def test_raises_if_rank_is_not_integer_static(self):
tensor = constant_op.constant((42, 43), name="my_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
check_ops.assert_rank_in(tensor, (1, .5,))
@test_util.run_deprecated_v1
def test_raises_if_rank_is_not_integer_dynamic(self):
with self.cached_session():
tensor = constant_op.constant(
(42, 43), dtype=dtypes.float32, name="my_tensor")
rank_tensor = array_ops.placeholder(dtypes.float32, name="rank_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
with ops.control_dependencies(
[check_ops.assert_rank_in(tensor, (1, rank_tensor))]):
array_ops.identity(tensor).eval(feed_dict={rank_tensor: .5})
class AssertRankAtLeastTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError, "rank at least 1"):
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
@test_util.run_deprecated_v1
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.cached_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
@test_util.run_deprecated_v1
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.cached_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes
def test_rank_one_ten_doesnt_raise_raise_if_rank_too_large_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
@test_util.run_deprecated_v1
def test_rank_one_ten_doesnt_raise_if_rank_too_large_dynamic_rank(self):
with self.cached_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
@test_util.run_deprecated_v1
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.cached_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "rank at least 2"):
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
@test_util.run_deprecated_v1
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.cached_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 2
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
class AssertNonNegativeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_negative(self):
zoe = constant_op.constant([-1, -2], name="zoe")
with self.assertRaisesOpError("x >= 0 did not hold"):
with ops.control_dependencies([check_ops.assert_non_negative(zoe)]):
out = array_ops.identity(zoe)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_zero_and_positive(self):
lucas = constant_op.constant([0, 2], name="lucas")
with ops.control_dependencies([check_ops.assert_non_negative(lucas)]):
out = array_ops.identity(lucas)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-negative when it satisfies:
# For every element x_i in x, x_i >= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_non_negative(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
class AssertNonPositiveTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_zero_and_negative(self):
tom = constant_op.constant([0, -2], name="tom")
with ops.control_dependencies([check_ops.assert_non_positive(tom)]):
out = array_ops.identity(tom)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_positive(self):
rachel = constant_op.constant([0, 2], name="rachel")
with self.assertRaisesOpError("x <= 0 did not hold"):
with ops.control_dependencies([check_ops.assert_non_positive(rachel)]):
out = array_ops.identity(rachel)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-positive when it satisfies:
# For every element x_i in x, x_i <= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_non_positive(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
class AssertIntegerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_integer(self):
integers = constant_op.constant([1, 2], name="integers")
with ops.control_dependencies([check_ops.assert_integer(integers)]):
out = array_ops.identity(integers)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_float(self):
floats = constant_op.constant([1.0, 2.0], name="floats")
with self.assertRaisesRegexp(TypeError, "Expected.*integer"):
check_ops.assert_integer(floats)
class AssertTypeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_correct_type(self):
integers = constant_op.constant([1, 2], dtype=dtypes.int64)
with ops.control_dependencies([
check_ops.assert_type(integers, dtypes.int64)]):
out = array_ops.identity(integers)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_wrong_type(self):
floats = constant_op.constant([1.0, 2.0], dtype=dtypes.float16)
with self.assertRaisesRegexp(TypeError, "must be of type.*float32"):
check_ops.assert_type(floats, dtypes.float32)
class AssertShapesTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_raise_static_shape_mismatch(self):
x = array_ops.ones([3, 2], name="x")
y = array_ops.ones([2, 3], name="y")
shapes = {
x: ("N", "Q"),
y: ("N", "D"),
}
regex = (r"Specified by tensor .* dimension 0. "
r"Tensor .* dimension 0 must have size 3. "
r"Received size 2")
self.raises_static_error(shapes=shapes, regex=regex)
def test_raise_dynamic_shape_mismatch(self):
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, [None, 2], name="x")
y = array_ops.placeholder(dtypes.float32, [None, 3], name="y")
shapes = {
x: ("N", "Q"),
y: ("N", "D"),
}
regex = (r"\[Specified by tensor x.* dimension 0\] "
r"\[Tensor y.* dimension\] \[0\] \[must have size\] \[3\]")
feed_dict = {x: np.ones([3, 2]), y: np.ones([2, 3])}
self.raises_dynamic_error(shapes=shapes, regex=regex, feed_dict=feed_dict)
@test_util.run_in_graph_and_eager_modes
def test_raise_static_shape_explicit_mismatch(self):
x = array_ops.ones([3, 2], name="x")
y = array_ops.ones([2, 3], name="y")
shapes = {
x: (3, "Q"),
y: (3, "D"),
}
regex = (r"Specified explicitly. "
r"Tensor .* dimension 0 must have size 3. "
r"Received size 2")
self.raises_static_error(shapes=shapes, regex=regex)
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_rank_one_size_one_equivalence(self):
rank_one_size_one = array_ops.ones([1], name="rank_one_size_one")
rank_zero = array_ops.constant(5, name="rank_zero")
check_ops.assert_shapes({
rank_one_size_one: (),
rank_zero: (),
})
check_ops.assert_shapes({
rank_one_size_one: (1,),
rank_zero: (1,),
})
@test_util.run_in_graph_and_eager_modes
def test_raise_static_rank_1_size_not_1_mismatch_scalar(self):
x = array_ops.constant([2, 2], name="x")
shapes = {
x: (),
}
regex = (r"Specified explicitly. "
r"Tensor .* dimension 0 must have size 1. "
r"Received size 2")
self.raises_static_error(shapes=shapes, regex=regex)
@test_util.run_in_graph_and_eager_modes
def test_raise_static_scalar_mismatch_rank_1_size_not_1(self):
x = array_ops.constant(2, name="x")
shapes = {
x: (2,),
}
regex = (r"Specified explicitly. "
r"Tensor .* dimension 0 must have size 2. "
r"Received size 1")
self.raises_static_error(shapes=shapes, regex=regex)
@test_util.run_in_graph_and_eager_modes
def test_scalar_implies_size_one(self):
scalar = array_ops.constant(5, name="rank_zero")
x = array_ops.ones([2, 2], name="x")
shapes = {scalar: ("a",), x: ("a", 2)}
regex = (r"Specified by tensor .* dimension 0. "
r"Tensor .* dimension 0 must have size 1. "
r"Received size 2")
self.raises_static_error(shapes=shapes, regex=regex)
@test_util.run_in_graph_and_eager_modes
def test_raise_not_iterable(self):
x = array_ops.constant([1, 2], name="x")
shapes = {x: 2}
regex = (r"Tensor .*. "
r"Specified shape must be an iterable. "
r"An iterable has the attribute `__iter__` or `__getitem__`. "
r"Received specified shape: 2")
self.raises_static_error(shapes=shapes, regex=regex)
def test_raise_dynamic_shape_explicit_mismatch(self):
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, [None, 2], name="xa")
y = array_ops.placeholder(dtypes.float32, [None, 3], name="y")
shapes = {
x: (3, "Q"),
y: (3, "D"),
}
regex = (r"\[Specified explicitly\] "
r"\[Tensor y.* dimension\] \[0\] \[must have size\] \[3\]")
feed_dict = {x: np.ones([3, 2]), y: np.ones([2, 3])}
self.raises_dynamic_error(shapes=shapes, regex=regex, feed_dict=feed_dict)
@test_util.run_in_graph_and_eager_modes
def test_no_op_when_specified_as_unknown(self):
x = array_ops.constant([1, 1], name="x")
assertion = check_ops.assert_shapes({x: None})
with ops.control_dependencies([assertion]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_static_incorrect_rank(self):
rank_two_shapes = [
(1, 1),
(1, 3),
("a", "b"),
(None, None),
]
rank_three_shapes = [
(1, 1, 1),
("a", "b", "c"),
(None, None, None),
(1, "b", None),
]
def raises_static_rank_error(shapes, x, correct_rank, actual_rank):
for shape in shapes:
regex = (r"Tensor .* must have rank %d. Received rank %d" %
(correct_rank, actual_rank))
self.raises_static_error(shapes={x: shape}, regex=regex)
raises_static_rank_error(
rank_two_shapes, array_ops.ones([1]), correct_rank=2, actual_rank=1)
raises_static_rank_error(
rank_three_shapes,
array_ops.ones([1, 1]),
correct_rank=3,
actual_rank=2)
raises_static_rank_error(
rank_three_shapes, array_ops.constant(1), correct_rank=3, actual_rank=0)
def test_raises_dynamic_incorrect_rank(self):
x_value = 5
rank_two_shapes = [(1, 1), (1, 3), ("a", "b"), (None, None)]
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, None)
for shape in rank_two_shapes:
regex = r"Tensor .* must have rank\] \[2\]"
self.raises_dynamic_error(
shapes={x: shape}, regex=regex, feed_dict={x: x_value})
@test_util.run_in_graph_and_eager_modes
def test_correctly_matching(self):
u = array_ops.constant(1, name="u")
v = array_ops.ones([1, 2], name="v")
w = array_ops.ones([3], name="w")
x = array_ops.ones([1, 2, 3], name="x")
y = array_ops.ones([3, 1, 2], name="y")
z = array_ops.ones([2, 3, 1], name="z")
assertion = check_ops.assert_shapes({
x: ("a", "b", "c"),
y: ("c", "a", "b"),
z: ("b", "c", "a"),
v: ("a", "b"),
w: ("c",),
u: "a"
})
with ops.control_dependencies([assertion]):
out = array_ops.identity(x)
self.evaluate(out)
assertion = check_ops.assert_shapes({
x: (1, "b", "c"),
y: ("c", "a", 2),
z: ("b", 3, "a"),
v: ("a", 2),
w: (3,),
u: ()
})
with ops.control_dependencies([assertion]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_variable_length_symbols(self):
x = array_ops.ones([4, 1], name="x")
y = array_ops.ones([4, 2], name="y")
assertion = check_ops.assert_shapes({
x: ("num_observations", "input_dim"),
y: ("num_observations", "output_dim"),
})
with ops.control_dependencies([assertion]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raise_implicit_mismatch_using_iterable_alternatives(self):
x = array_ops.ones([2, 2], name="x")
y = array_ops.ones([1, 3], name="y")
styles = [{
x: ("A", "B"),
y: ("A", "C"),
}, {
x: "AB",
y: "AC"
}, {
x: ["A", "B"],
y: ["A", "C"],
}, {
x: np.array(["A", "B"]),
y: np.array(["A", "C"])
}, {
x: ("A", "B"),
y: "AC"
}]
for shapes in styles:
self.raises_static_error(
shapes=shapes,
regex=(r"Specified by tensor .* dimension 0. "
"Tensor .* dimension 0 must have size 2. "
"Received size 1"))
@test_util.run_in_graph_and_eager_modes
def test_raise_explicit_mismatch_using_iterable_alternatives(self):
x = array_ops.ones([2, 2], name="x")
y = array_ops.ones([1, 3], name="y")
styles = [{
x: (2, 2),
y: (2, 3),
}, {
x: "22",
y: "23"
}, {
x: [2, 2],
y: [2, 3],
}, {
x: np.array([2, 2]),
y: np.array([2, 3])
}, {
x: (2, 2),
y: "23"
}]
for shapes in styles:
self.raises_static_error(
shapes=shapes,
regex=(r"Specified explicitly. "
"Tensor .* dimension 0 must have size 2. "
"Received size 1"))
@test_util.run_in_graph_and_eager_modes
def test_dim_size_specified_as_unknown(self):
x = array_ops.ones([1, 2, 3], name="x")
y = array_ops.ones([2, 1], name="y")
a1 = check_ops.assert_shapes({
x: (None, 2, None),
y: (None, 1),
})
a2 = check_ops.assert_shapes({
x: (".", 2, "."),
y: (".", 1),
})
a3 = check_ops.assert_shapes({
x: ".2.",
y: ".1",
})
with ops.control_dependencies([a1, a2, a3]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raise_static_shape_explicit_mismatch_innermost_dims(self):
x = array_ops.ones([3, 2], name="x")
y = array_ops.ones([2, 3], name="y")
s1 = {
x: (3, "Q"),
y: (Ellipsis, 3, "D"),
}
s2 = {
x: "3Q",
y: "*3D",
}
regex = (r"Specified explicitly. "
r"Tensor .* dimension -2 must have size 3. "
r"Received size 2")
self.raises_static_error(shapes=s1, regex=regex)
self.raises_static_error(shapes=s2, regex=regex)
@test_util.run_in_graph_and_eager_modes
def test_correctly_matching_innermost_dims(self):
x = array_ops.ones([1, 2, 3, 2], name="x")
y = array_ops.ones([2, 3, 3], name="y")
a1 = check_ops.assert_shapes({
x: (Ellipsis, "N", "Q"),
y: (Ellipsis, "N", "D"),
})
a2 = check_ops.assert_shapes({
x: "*NQ",
y: "*ND",
})
with ops.control_dependencies([a1, a2]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raise_variable_num_outer_dims_prefix_misuse(self):
x = array_ops.ones([1, 2], name="x")
s1 = {
x: ("N", Ellipsis, "Q"),
}
s2 = {
x: "N*Q",
}
regex = (r"Tensor .* specified shape index .*. "
r"Symbol `...` or `\*` for a variable number of "
r"unspecified dimensions is only allowed as the first entry")
self.raises_static_error(shapes=s1, regex=regex)
self.raises_static_error(shapes=s2, regex=regex)
@test_util.run_in_graph_and_eager_modes
def test_empty_shapes_dict_no_op(self):
assertion = check_ops.assert_shapes({})
with ops.control_dependencies([assertion]):
out = array_ops.identity(0)
self.evaluate(out)
def raises_static_error(self, shapes, regex):
with self.assertRaisesRegexp(ValueError, regex):
check_ops.assert_shapes(shapes)
def raises_dynamic_error(self, shapes, regex, feed_dict):
with self.session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, regex):
assertion = check_ops.assert_shapes(shapes)
with ops.control_dependencies([assertion]):
out = array_ops.identity(0)
sess.run(out, feed_dict=feed_dict)
class IsStrictlyIncreasingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_constant_tensor_is_not_strictly_increasing(self):
self.assertFalse(self.evaluate(check_ops.is_strictly_increasing([1, 1, 1])))
@test_util.run_in_graph_and_eager_modes
def test_decreasing_tensor_is_not_strictly_increasing(self):
self.assertFalse(self.evaluate(
check_ops.is_strictly_increasing([1, 0, -1])))
@test_util.run_in_graph_and_eager_modes
def test_2d_decreasing_tensor_is_not_strictly_increasing(self):
self.assertFalse(
self.evaluate(check_ops.is_strictly_increasing([[1, 3], [2, 4]])))
@test_util.run_in_graph_and_eager_modes
def test_increasing_tensor_is_increasing(self):
self.assertTrue(self.evaluate(check_ops.is_strictly_increasing([1, 2, 3])))
@test_util.run_in_graph_and_eager_modes
def test_increasing_rank_two_tensor(self):
self.assertTrue(
self.evaluate(check_ops.is_strictly_increasing([[-1, 2], [3, 4]])))
@test_util.run_in_graph_and_eager_modes
def test_tensor_with_one_element_is_strictly_increasing(self):
self.assertTrue(self.evaluate(check_ops.is_strictly_increasing([1])))
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_is_strictly_increasing(self):
self.assertTrue(self.evaluate(check_ops.is_strictly_increasing([])))
class IsNonDecreasingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_constant_tensor_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([1, 1, 1])))
@test_util.run_in_graph_and_eager_modes
def test_decreasing_tensor_is_not_non_decreasing(self):
self.assertFalse(self.evaluate(check_ops.is_non_decreasing([3, 2, 1])))
@test_util.run_in_graph_and_eager_modes
def test_2d_decreasing_tensor_is_not_non_decreasing(self):
self.assertFalse(self.evaluate(
check_ops.is_non_decreasing([[1, 3], [2, 4]])))
@test_util.run_in_graph_and_eager_modes
def test_increasing_rank_one_tensor_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([1, 2, 3])))
@test_util.run_in_graph_and_eager_modes
def test_increasing_rank_two_tensor(self):
self.assertTrue(self.evaluate(
check_ops.is_non_decreasing([[-1, 2], [3, 3]])))
@test_util.run_in_graph_and_eager_modes
def test_tensor_with_one_element_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([1])))
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([])))
class FloatDTypeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_assert_same_float_dtype(self):
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype(None, None))
self.assertIs(dtypes.float32, check_ops.assert_same_float_dtype([], None))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype([], dtypes.float32))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype(None, dtypes.float32))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype([None, None], None))
self.assertIs(
dtypes.float32,
check_ops.assert_same_float_dtype([None, None], dtypes.float32))
const_float = constant_op.constant(3.0, dtype=dtypes.float32)
self.assertIs(
dtypes.float32,
check_ops.assert_same_float_dtype([const_float], dtypes.float32))
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[const_float], dtypes.int32)
sparse_float = sparse_tensor.SparseTensor(
constant_op.constant([[111], [232]], dtypes.int64),
constant_op.constant([23.4, -43.2], dtypes.float32),
constant_op.constant([500], dtypes.int64))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype([sparse_float],
dtypes.float32))
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[sparse_float], dtypes.int32)
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[const_float, None, sparse_float], dtypes.float64)
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype(
[const_float, sparse_float]))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype(
[const_float, sparse_float], dtypes.float32))
const_int = constant_op.constant(3, dtype=dtypes.int32)
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[sparse_float, const_int])
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[sparse_float, const_int], dtypes.int32)
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[sparse_float, const_int], dtypes.float32)
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[const_int])
class AssertScalarTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_assert_scalar(self):
check_ops.assert_scalar(constant_op.constant(3))
check_ops.assert_scalar(constant_op.constant("foo"))
check_ops.assert_scalar(3)
check_ops.assert_scalar("foo")
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
check_ops.assert_scalar(constant_op.constant([3, 4]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/check_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""Functional tests for UnicodeScript op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class UnicodeScriptOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testValidScripts(self):
inputs = [
ord("a"),
0x0411, # CYRILLIC CAPITAL LETTER BE
0x82b8, # CJK UNIFIED IDEOGRAPH-82B8
ord(",")
]
with self.cached_session():
input_vector = constant_op.constant(inputs, dtypes.int32)
outputs = string_ops.unicode_script(input_vector).eval()
self.assertAllEqual(
outputs,
[
25, # USCRIPT_LATIN (LATN)
8, # USCRIPT_CYRILLIC (CYRL)
17, # USCRIPT_HAN (HANI)
0 # USCRIPT_COMMON (ZYYY)
])
@test_util.run_deprecated_v1
def testInvalidScript(self):
inputs = [-100, 0xffffff]
with self.cached_session():
input_vector = constant_op.constant(inputs, dtypes.int32)
outputs = string_ops.unicode_script(input_vector).eval()
self.assertAllEqual(outputs, [-1, -1])
class UnicodeScriptBenchmarks(test.Benchmark):
# Generate some random-ish input by jumping around in unicode characters
def _generateBenchmarkInput(self, size):
chars = []
i = 0
offset = 0
continuity_size = 20
while i < size:
chars.append(ord("a") + offset)
i += 1
offset += 1
if i % continuity_size == 0:
offset += 100
if offset > 0x1F940:
offset = 0
return chars
def benchmark_unicode_script(self):
with session.Session(config=benchmark.benchmark_config()) as sess:
chars = self._generateBenchmarkInput(1000000)
script = string_ops.unicode_script(chars)
self.run_op_benchmark(sess, script.op, min_iters=100)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/unicode_script_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for draw_bounding_box_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class DrawBoundingBoxOpTest(test.TestCase):
def _fillBorder(self, image, color):
"""Fill the border of the image.
Args:
image: Numpy array of shape [height, width, depth].
color: Numpy color of shape [depth] and either contents RGB/RGBA.
Returns:
image of original shape with border filled with "color".
Raises:
ValueError: Depths of image and color don"t match.
"""
height, width, depth = image.shape
if depth != color.shape[0]:
raise ValueError("Image (%d) and color (%d) depths must match." %
(depth, color.shape[0]))
image[0:height, 0, 0:depth] = color
image[0:height, width - 1, 0:depth] = color
image[0, 0:width, 0:depth] = color
image[height - 1, 0:width, 0:depth] = color
return image
def _testDrawBoundingBoxColorCycling(self, img, colors=None):
"""Tests if cycling works appropriately.
Args:
img: 3-D numpy image on which to draw.
"""
color_table = colors
if colors is None:
# THIS TABLE MUST MATCH draw_bounding_box_op.cc
color_table = np.asarray([[1, 1, 0, 1], [0, 0, 1, 1], [1, 0, 0, 1],
[0, 1, 0, 1], [0.5, 0, 0.5,
1], [0.5, 0.5, 0, 1],
[0.5, 0, 0, 1], [0, 0, 0.5, 1], [0, 1, 1, 1],
[1, 0, 1, 1]])
assert len(img.shape) == 3
depth = img.shape[2]
assert depth <= color_table.shape[1]
assert depth == 1 or depth == 3 or depth == 4
## Set red channel to 1 if image is GRY.
if depth == 1:
color_table[:, 0] = 1
num_colors = color_table.shape[0]
for num_boxes in range(1, num_colors + 2):
# Generate draw_bounding_box_op drawn image
image = np.copy(img)
color = color_table[(num_boxes - 1) % num_colors, 0:depth]
test_drawn_image = self._fillBorder(image, color)
bboxes = np.asarray([0, 0, 1, 1])
bboxes = np.vstack([bboxes for _ in range(num_boxes)])
bboxes = math_ops.cast(bboxes, dtypes.float32)
bboxes = array_ops.expand_dims(bboxes, 0)
image = ops.convert_to_tensor(image)
image = image_ops_impl.convert_image_dtype(image, dtypes.float32)
image = array_ops.expand_dims(image, 0)
image = image_ops.draw_bounding_boxes(image, bboxes, colors=colors)
with self.cached_session(use_gpu=False) as sess:
op_drawn_image = np.squeeze(sess.run(image), 0)
self.assertAllEqual(test_drawn_image, op_drawn_image)
def testDrawBoundingBoxRGBColorCycling(self):
"""Test if RGB color cycling works correctly."""
image = np.zeros([10, 10, 3], "float32")
self._testDrawBoundingBoxColorCycling(image)
def testDrawBoundingBoxRGBAColorCycling(self):
"""Test if RGBA color cycling works correctly."""
image = np.zeros([10, 10, 4], "float32")
self._testDrawBoundingBoxColorCycling(image)
def testDrawBoundingBoxGRY(self):
"""Test if drawing bounding box on a GRY image works."""
image = np.zeros([4, 4, 1], "float32")
self._testDrawBoundingBoxColorCycling(image)
def testDrawBoundingBoxRGBColorCyclingWithColors(self):
"""Test if RGB color cycling works correctly with provided colors."""
image = np.zeros([10, 10, 3], "float32")
colors = np.asarray([[1, 1, 0, 1], [0, 0, 1, 1], [0.5, 0, 0.5, 1],
[0.5, 0.5, 0, 1], [0, 1, 1, 1], [1, 0, 1, 1]])
self._testDrawBoundingBoxColorCycling(image, colors=colors)
def testDrawBoundingBoxRGBAColorCyclingWithColors(self):
"""Test if RGBA color cycling works correctly with provided colors."""
image = np.zeros([10, 10, 4], "float32")
colors = np.asarray([[0.5, 0, 0.5, 1], [0.5, 0.5, 0, 1], [0.5, 0, 0, 1],
[0, 0, 0.5, 1]])
self._testDrawBoundingBoxColorCycling(image, colors=colors)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/draw_bounding_box_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
GRADIENT_TESTS_DTYPES = (dtypes.float16, dtypes.float32, dtypes.float64)
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _FlatInnerDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape([
functools.reduce(lambda x, y: x * y, shape[:-ndims + 1], 1)
] + shape[-ndims + 1:])
def _FlatOuterDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(shape[:ndims - 1] + [
functools.reduce(lambda x, y: x * y, shape[ndims - 1:], 1)
])
def _NumpyScatterNd(ref, indices, updates, op):
ixdim = indices.shape[-1]
num_updates = indices.size // ixdim
total_nd = len(ref.shape)
slice_size = 1
for i in range(ixdim, total_nd):
slice_size *= ref.shape[i]
flat_indices = _FlatInnerDims(indices)
flat_updates = updates.reshape((num_updates, slice_size))
output_flat = _FlatOuterDims(ref, ixdim + 1)
for ix_updates, ix_output in enumerate(flat_indices):
ix_output = tuple(ix_output)
output_flat[ix_output] = op(output_flat[ix_output],
flat_updates[ix_updates])
return output_flat.reshape(ref.shape)
def _NumpyUpdate(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
def _NumpyAdd(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u)
def _NumpySub(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u)
def _NumpyMul(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u)
def _NumpyDiv(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p / u)
class StatefulScatterNdTest(test.TestCase):
def _VariableRankTest(self,
np_scatter,
tf_scatter,
vtype,
itype,
repeat_indices=False):
np.random.seed(8)
ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)]
indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)]
with self.cached_session(use_gpu=True):
for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
num_updates = indices_shape[0]
ixdim = indices_shape[-1]
indexable_area_shape = ()
for i in range(ixdim):
indexable_area_shape += (ref_shape[i],)
all_indices = [
list(coord)
for coord, _ in np.ndenumerate(
np.empty(indexable_area_shape, vtype))
]
np.random.shuffle(all_indices)
indices = np.array(all_indices[:num_updates])
if num_updates > 1 and repeat_indices:
indices = indices[:num_updates // 2]
for _ in range(num_updates - num_updates // 2):
indices = np.append(
indices, [indices[np.random.randint(num_updates // 2)]], axis=0)
np.random.shuffle(indices)
indices = _AsType(indices[:num_updates], itype)
updates_shape = (num_updates,)
for i in range(ixdim, len(ref_shape)):
updates_shape += (ref_shape[i],)
updates = _AsType(np.random.randn(*(updates_shape)), vtype)
ref = _AsType(np.random.randn(*(ref_shape)), vtype)
# Scatter via numpy
new = ref.copy()
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref_var = variables.VariableV1(ref)
ref_var.initializer.run()
tf_scatter(ref_var, indices, updates).eval()
# Compare
self.assertAllClose(new, self.evaluate(ref_var))
def _VariableRankTests(self, np_scatter, tf_scatter):
for vtype in (np.int32, np.float16, np.float32, np.float64, np.complex64,
np.complex128):
for itype in (np.int32, np.int64):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype)
def testSimple(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = variables.Variable([0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)
@test_util.run_deprecated_v1
def testSimpleResource(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = resource_variable_ops.ResourceVariable(
[0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
self.evaluate(scatter)
self.assertAllClose(ref.eval(), expected)
def testSimple2(self):
indices = constant_op.constant([[1, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([11., 12.], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)
def testSimple3(self):
indices = constant_op.constant([[1]], dtype=dtypes.int32)
updates = constant_op.constant([[11., 12.]], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)
@test_util.run_deprecated_v1
def testVariableRankUpdate(self):
self._VariableRankTests(_NumpyUpdate, state_ops.scatter_nd_update)
@test_util.run_deprecated_v1
def testVariableRankAdd(self):
self._VariableRankTests(_NumpyAdd, state_ops.scatter_nd_add)
@test_util.run_deprecated_v1
def testVariableRankSub(self):
self._VariableRankTests(_NumpySub, state_ops.scatter_nd_sub)
# TODO(ebrevdo): Re-enable when we need ScatterNdMul.
# def testVariableRankMul(self):
# self._VariableRankTests(_NumpyMul, state_ops.scatter_nd_mul)
# TODO(ebrevdo): Re-enable when we need ScatterNdDiv.
# def testVariableRankDiv(self):
# self._VariableRankTests(_NumpyDiv, state_ops.scatter_nd_div)
def _ScatterRepeatIndicesTest(self, np_scatter, tf_scatter):
for vtype in (np.int32, np.float16, np.float32, np.float64):
for itype in (np.int32, np.int64):
self._VariableRankTest(
np_scatter, tf_scatter, vtype, itype, repeat_indices=True)
@test_util.run_v1_only("b/120545219")
def testScatterRepeatIndices(self):
"""This tests scatter_add using indices that repeat."""
self._ScatterRepeatIndicesTest(_NumpyAdd, state_ops.scatter_nd_add)
self._ScatterRepeatIndicesTest(_NumpySub, state_ops.scatter_nd_sub)
# TODO(ebrevdo): Re-enable when we need ScatterNdMul and ScatterNdDiv.
# self._ScatterRepeatIndicesTest(_NumpyMul, state_ops.scatter_nd_mul)
# self._ScatterRepeatIndicesTest(_NumpyDiv, state_ops.scatter_nd_div)
# TODO(simister): Re-enable once binary size increase due to
# extra templating is back under control and this op is re-enabled
# def testBooleanScatterUpdate(self):
# with self.session(use_gpu=False) as session:
# var = tf.Variable([True, False])
# update0 = tf.compat.v1.scatter_nd_update(var, [[1]], [True])
# update1 = tf.compat.v1.scatter_nd_update(
# var, tf.constant(
# [[0]], dtype=tf.int64), [False])
# var.initializer.run()
# session.run([update0, update1])
# self.assertAllEqual([False, True], self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testScatterOutOfRangeCpu(self):
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.cached_session(use_gpu=False):
ref = variables.VariableV1(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([[2], [0], [5]])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([[-1], [0], [5]])
with self.assertRaisesOpError(
r"indices\[0\] = \[-1\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
indices = np.array([[2], [0], [6]])
with self.assertRaisesOpError(
r"indices\[2\] = \[6\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
def testRank3ValidShape(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
self.assertAllEqual(
state_ops.scatter_nd_update(ref, indices,
updates).get_shape().as_list(), shape)
@test_util.run_v1_only("b/120545219")
@test_util.disable_xla("b/123337890") # Error messages differ
def testResVarInvalidOutputShape(self):
res = variables.Variable(
initial_value=lambda: array_ops.zeros(shape=[], dtype=dtypes.float32),
dtype=dtypes.float32)
with self.cached_session():
res.initializer.run()
with self.assertRaisesOpError("Output must be at least 1-D"):
state_ops.scatter_nd_update(res, [[0]], [0.22]).eval()
@test_util.run_deprecated_v1
def testExtraIndicesDimensions(self):
indices = array_ops.zeros([1, 1, 2], dtypes.int32)
updates = array_ops.zeros([1, 1], dtypes.int32)
shape = np.array([2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
scatter_update = state_ops.scatter_nd_update(ref, indices, updates)
self.assertAllEqual(scatter_update.get_shape().as_list(), shape)
expected_result = np.zeros([2, 2], dtype=np.int32)
with self.cached_session():
ref.initializer.run()
self.assertAllEqual(expected_result, self.evaluate(scatter_update))
@test_util.run_deprecated_v1
def testRank3InvalidShape1(self):
indices = array_ops.zeros([3, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, r"The outer \d+ dimensions of indices\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
@test_util.run_deprecated_v1
def testRank3InvalidShape2(self):
indices = array_ops.zeros([2, 2, 1], dtypes.int32)
updates = array_ops.zeros([2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, r"The inner \d+ dimensions of input\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
@test_util.run_deprecated_v1
def testConcurrentUpdates(self):
num_updates = 10000
update_values = np.random.rand(num_updates)
ref = variables.Variable(np.zeros([2, 2]), dtype=dtypes.float64)
indices = constant_op.constant([[0, 1]] * num_updates, dtype=dtypes.int32)
updates = constant_op.constant(update_values, dtype=dtypes.float64)
expected_result = np.zeros([2, 2], dtype=np.float64)
expected_result[0, 1] = np.sum(update_values)
scatter = state_ops.scatter_nd_add(ref, indices, updates)
init = variables.global_variables_initializer()
with session.Session() as sess:
self.evaluate(init)
result = self.evaluate(scatter)
assert np.allclose(result, expected_result)
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if not test.IsBuiltWithCuda():
return
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.cached_session(force_gpu=True):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Indices out of range should not fail.
indices = np.array([-1, 0, 5])
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
op(ref, indices, updates).eval()
class ScatterNdTest(test.TestCase):
non_aliasing_add_test = False
def scatter_nd(self, indices, updates, shape, input_=None):
del input_ # input_ is not used in scatter_nd
return array_ops.scatter_nd(indices, updates, shape)
@test_util.run_in_graph_and_eager_modes
def testBool(self):
indices = constant_op.constant(
[[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant(
[False, True, False, True], dtype=dtypes.bool)
expected = np.array(
[False, False, False, True, False, False, False, True])
scatter = self.scatter_nd(indices, updates, shape=(8,))
result = self.evaluate(scatter)
self.assertAllEqual(expected, result)
# Same indice is updated twice by same value.
indices = constant_op.constant(
[[4], [3], [3], [7]], dtype=dtypes.int32)
updates = constant_op.constant(
[False, True, True, True], dtype=dtypes.bool)
expected = np.array([
False, False, False, True, False, False, False, True])
scatter = self.scatter_nd(indices, updates, shape=(8,))
result = self.evaluate(scatter)
self.assertAllEqual(expected, result)
@test_util.run_in_graph_and_eager_modes
def testInvalidShape(self):
# TODO(apassos) figure out how to unify these errors
with self.assertRaises(errors.InvalidArgumentError
if context.executing_eagerly() else ValueError):
array_ops.scatter_nd(indices=[0], # this should be indices=[[0]]
updates=[0.0],
shape=[1])
def testString(self):
indices = constant_op.constant([[4], [3], [1], [7]],
dtype=dtypes.int32)
updates = constant_op.constant(["four", "three", "one", "seven"],
dtype=dtypes.string)
expected = np.array([b"", b"one", b"", b"three", b"four",
b"", b"", b"seven"])
scatter = self.scatter_nd(indices, updates, shape=(8,))
with self.cached_session() as sess:
result = self.evaluate(scatter)
self.assertAllEqual(expected, result)
# Same indice is updated twice by same value.
indices = constant_op.constant([[4], [3], [3], [7]],
dtype=dtypes.int32)
updates = constant_op.constant(["a", "b", "b", "c"],
dtype=dtypes.string)
expected = np.array([b"", b"", b"", b"bb", b"a", b"", b"", b"c"])
scatter = self.scatter_nd(indices, updates, shape=(8,))
with self.cached_session() as sess:
result = self.evaluate(scatter)
self.assertAllEqual(expected, result)
# Same indice is updated twice by different value.
indices = constant_op.constant([[4], [3], [3], [7]],
dtype=dtypes.int32)
updates = constant_op.constant(["a", "b", "c", "d"],
dtype=dtypes.string)
expected = [np.array([b"", b"", b"", b"bc", b"a", b"", b"", b"d"]),
np.array([b"", b"", b"", b"cb", b"a", b"", b"", b"d"])]
scatter = self.scatter_nd(indices, updates, shape=(8,))
with self.cached_session() as sess:
result = self.evaluate(scatter)
self.assertTrue(np.array_equal(result, expected[0]) or
np.array_equal(result, expected[1]))
def testRank3ValidShape(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
self.assertAllEqual(
self.scatter_nd(indices, updates, shape).get_shape().as_list(), shape)
@test_util.run_deprecated_v1
def testExtraIndicesDimensions(self):
indices = array_ops.zeros([1, 1, 2], dtypes.int32)
updates = array_ops.zeros([1, 1], dtypes.int32)
shape = np.array([2, 2])
scatter = self.scatter_nd(indices, updates, shape)
self.assertAllEqual(scatter.get_shape().as_list(), shape)
expected_result = np.zeros([2, 2], dtype=np.int32)
with self.cached_session():
self.assertAllEqual(expected_result, self.evaluate(scatter))
@test_util.run_deprecated_v1
def testUndefinedIndicesShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=None)
updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
shape = constant_op.constant([2, 2, 2], dtypes.int32)
self.scatter_nd(indices, updates, shape)
@test_util.run_deprecated_v1
def testUndefinedUpdatesShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
updates = array_ops.placeholder(dtypes.int32, shape=None)
shape = constant_op.constant([2, 2, 2], dtypes.int32)
self.scatter_nd(indices, updates, shape)
@test_util.run_deprecated_v1
def testUndefinedOutputShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
shape = array_ops.placeholder(dtypes.int32, shape=[None])
self.scatter_nd(indices, updates, shape)
@test_util.run_deprecated_v1
def testEmptyOutputShape1(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.assertRaisesWithPredicateMatch(
ValueError, "Indices and updates specified for empty output shape"):
self.scatter_nd(indices, updates, shape)
@test_util.run_v1_only("b/120545219")
def testEmptyOutputShape2(self):
indices = array_ops.placeholder(dtypes.int32, shape=None)
updates = array_ops.placeholder(dtypes.int32, shape=None)
shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.cached_session():
with self.assertRaisesOpError(
"Indices and updates specified for empty output"):
self.scatter_nd(indices, updates, shape).eval(feed_dict={
indices: np.zeros([2, 2, 2], dtype=np.int32),
updates: np.zeros([2, 2, 2], dtype=np.int32)
})
@test_util.run_deprecated_v1
def testEmptyOutputShape3(self):
indices = array_ops.zeros([0], dtypes.int32)
updates = array_ops.zeros([0], dtypes.int32)
shape = constant_op.constant([0], dtypes.int32)
scatter = self.scatter_nd(indices, updates, shape)
with self.cached_session():
self.assertEqual(scatter.eval().size, 0)
@test_util.run_deprecated_v1
def testRank3InvalidShape1(self):
indices = array_ops.zeros([3, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, r"The outer \d+ dimensions of indices\.shape="):
self.scatter_nd(indices, updates, shape)
@test_util.run_deprecated_v1
def testRank3InvalidShape2(self):
indices = array_ops.zeros([2, 2, 1], dtypes.int32)
updates = array_ops.zeros([2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, r"The inner \d+ dimensions of (input|output)\.shape="):
self.scatter_nd(indices, updates, shape)
@test_util.run_deprecated_v1
def testGradientsRank2ElementUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([1, 4], dtype=dtype)
shape = constant_op.constant([2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array([1, 4], dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array([[1, 2], [3, 4]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
@test_util.run_deprecated_v1
def testGradientsRank2SliceUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)
updates = constant_op.constant([[3, 4], [1, 2]], dtype=dtype)
shape = constant_op.constant([2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant([[3, 4], [1, 2]], dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array([[1, 2], [3, 4]],
dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array([[3, 4], [1, 2]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
@test_util.run_deprecated_v1
def testGradientsRank3SliceUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant([[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
dtype=dtypes.int32)
updates = constant_op.constant([[[5, 7], [2, 4]], [[1, 3], [6, 8]]],
dtype=dtype)
shape = constant_op.constant([2, 2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array([[[3, 4], [5, 6]], [[1, 2], [7, 8]]],
dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
@test_util.run_deprecated_v1
def testGradientsRank7SliceUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant(
[[[[[[[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1]]]]]]],
dtype=dtypes.int32)
updates = constant_op.constant(
[[[[[[[5, 6], [2, 4]]]], [[[[1, 3], [6, 8]]]]]]], dtype=dtype)
shape = constant_op.constant([1, 1, 2, 1, 1, 2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant(
[[[[[[[1, 2], [3, 4]]]], [[[[5, 6], [7, 8]]]]]]], dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array(
[[[[[[[3, 4], [5, 6]]]], [[[[1, 2], [7, 8]]]]]]],
dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array(
[[[[[[[1, 2], [3, 4]]]], [[[[5, 6], [7, 8]]]]]]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
@test_util.run_deprecated_v1
def testScatterNdRepatedIndicesAdd(self):
indices = array_ops.zeros([100000, 1], dtypes.int32)
values = np.random.randn(100000)
shape = [1]
with self.cached_session():
val = self.scatter_nd(indices, values, shape).eval()
self.assertAllClose([np.sum(values)], val)
@test_util.run_deprecated_v1
def testSmokeScatterNdBatch2DSliceDim2(self):
with self.cached_session():
indices = array_ops.zeros([3, 5, 2], dtype=dtypes.int32)
values = array_ops.zeros([3, 5, 7])
shape = [4, 6, 7]
self.scatter_nd(indices, values, shape).eval()
@test_util.run_deprecated_v1
def testSmokeScatterNdBatch1DSliceDim2(self):
with self.cached_session():
indices = array_ops.zeros([0, 2], dtype=dtypes.int32)
values = array_ops.zeros([0, 7])
shape = [4, 6, 7]
self.scatter_nd(indices, values, shape).eval()
@test_util.run_deprecated_v1
def testSmokeScatterNdBatch1DSliceDim3ShapeRank7(self):
with self.cached_session():
indices = array_ops.zeros([1, 3], dtype=dtypes.int32)
values = array_ops.zeros([1, 6, 7, 8, 9])
shape = [3, 4, 5, 6, 7, 8, 9]
self.scatter_nd(indices, values, shape).eval()
@test_util.run_deprecated_v1
def testSmokeScatterNdBatch2DSliceDim3ShapeRank7(self):
with self.cached_session():
indices = array_ops.zeros([1, 2, 3], dtype=dtypes.int32)
values = array_ops.zeros([1, 2, 6, 7, 8, 9])
shape = [3, 4, 5, 6, 7, 8, 9]
self.scatter_nd(indices, values, shape).eval()
class ScatterNdNonAliasingAddTest(ScatterNdTest):
non_aliasing_add_test = True
def scatter_nd(self, indices, updates, shape, input_=None):
input_ = (input_ if input_ is not None else array_ops.zeros(
shape, dtype=updates.dtype))
return array_ops.scatter_nd_non_aliasing_add(input_, indices, updates)
def testString(self):
# Not supported yet.
pass
class ScatterNdTensorTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testUpdateAddSub(self):
indices = constant_op.constant([[4], [3], [1], [7]])
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
t = array_ops.ones([8], dtype=dtypes.float32)
assigned = array_ops.tensor_scatter_update(t, indices, updates)
added = array_ops.tensor_scatter_add(t, indices, updates)
subbed = array_ops.tensor_scatter_sub(t, indices, updates)
self.assertAllEqual(assigned,
constant_op.constant([1, 11, 1, 10, 9, 1, 1, 12]))
self.assertAllEqual(added,
constant_op.constant([1, 12, 1, 11, 10, 1, 1, 13]))
self.assertAllEqual(subbed,
constant_op.constant([1, -10, 1, -9, -8, 1, 1, -11]))
@test_util.run_v1_only("b/120545219")
def testUpdateAddSubGradients(self):
with self.cached_session():
indices = constant_op.constant([[3], [1]])
updates = constant_op.constant([9, 10], dtype=dtypes.float32)
x = array_ops.ones([4], dtype=dtypes.float32)
assigned = array_ops.tensor_scatter_update(x, indices, updates)
added = array_ops.tensor_scatter_add(x, indices, updates)
subbed = array_ops.tensor_scatter_sub(x, indices, updates)
err_assigned = gradient_checker.compute_gradient_error(
x, [4], assigned, [4])
err_added = gradient_checker.compute_gradient_error(x, [4], added, [4])
err_subbed = gradient_checker.compute_gradient_error(x, [4], subbed, [4])
self.assertLess(err_assigned, 2e-4)
self.assertLess(err_added, 2e-4)
self.assertLess(err_subbed, 2e-4)
err_assigned_wrt_updates = gradient_checker.compute_gradient_error(
updates, [2], assigned, [4])
err_added_wrt_updates = gradient_checker.compute_gradient_error(
updates, [2], added, [4])
err_subbed_wrt_updates = gradient_checker.compute_gradient_error(
updates, [2], subbed, [4])
self.assertLess(err_assigned_wrt_updates, 2e-4)
self.assertLess(err_added_wrt_updates, 2e-4)
self.assertLess(err_subbed_wrt_updates, 2e-4)
def testTensorScatterUpdateWithForwarding(self):
@def_function.function
def _TestFn():
indices = constant_op.constant([[4], [3], [1], [7]])
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
t = array_ops.ones([8], dtype=dtypes.float32)
return array_ops.tensor_scatter_update(t, indices, updates)
self.assertAllEqual(_TestFn(), [1, 11, 1, 10, 9, 1, 1, 12])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/scatter_nd_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import test
class SaveTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testRelativePath(self):
os.chdir(self.get_temp_dir())
self.evaluate(io_ops.save_v2(
"ckpt", ["x"], [""], [constant_op.constant(100.)]))
self.assertAllEqual([100.],
self.evaluate(io_ops.restore_v2(
"ckpt", ["x"], [""], [dtypes.float32])))
class ShardedFileOpsTest(test.TestCase):
def testShardedFileName(self):
with session.Session(
target="", config=config_pb2.ConfigProto(device_count={"CPU": 2})):
self.assertEqual(
gen_io_ops.sharded_filename("foo", 4, 100).eval(),
b"foo-00004-of-00100")
self.assertEqual(
gen_io_ops.sharded_filespec("foo", 100).eval(), b"foo-?????-of-00100")
class ShapeInferenceTest(test.TestCase):
@test_util.run_deprecated_v1
def testRestoreV2WithSliceInput(self):
op = io_ops.restore_v2("model", ["var1", "var2"], ["", "3 4 0,1:-"],
[dtypes.float32, dtypes.float32])
self.assertEqual(2, len(op))
self.assertFalse(op[0].get_shape().is_fully_defined())
self.assertEqual([1, 4], op[1].get_shape())
@test_util.run_deprecated_v1
def testRestoreV2NumSlicesNotMatch(self):
with self.assertRaises(ValueError):
io_ops.restore_v2("model", ["var1", "var2", "var3"], ["", "3 4 0,1:-"],
[dtypes.float32, dtypes.float32])
@test_util.run_deprecated_v1
def testRestoreSlice(self):
op = gen_io_ops.restore_slice("model", "var", "3 4 0,1:-", dtypes.float32)
self.assertEqual([1, 4], op.get_shape())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/save_restore_ops_test.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_lower_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringLowerOpTest(test.TestCase):
"""Test cases for tf.strings.lower."""
def test_string_lower(self):
strings = ["Pigs on The Wing", "aNimals"]
with self.cached_session():
output = string_ops.string_lower(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [b"pigs on the wing", b"animals"])
def test_string_lower_2d(self):
strings = [["pigS on THE wIng", "aniMals"], [" hello ", "\n\tWorld! \r \n"]]
with self.cached_session():
output = string_ops.string_lower(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [[b"pigs on the wing", b"animals"],
[b" hello ", b"\n\tworld! \r \n"]])
def test_string_upper_unicode(self):
strings = [["ÓÓSSCHLOË"]]
with self.cached_session():
output = string_ops.string_lower(strings, encoding="utf-8")
output = self.evaluate(output)
# output: "óósschloë"
self.assertAllEqual(output, [[b"\xc3\xb3\xc3\xb3sschlo\xc3\xab"]])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/string_lower_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class BaseReductionTest(test.TestCase):
def _tf_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
class BigReductionTest(BaseReductionTest):
"""Test reductions for sum and boolean all over a wide range of shapes."""
def _tf_reduce_max(self, x, reduction_axes, keepdims):
return math_ops.reduce_max(x, reduction_axes, keepdims)
def _tf_reduce_all(self, x, reduction_axes, keepdims):
return math_ops.reduce_all(x, reduction_axes, keepdims)
def _tf_reduce_mean(self, x, reduction_axes, keepdims):
return math_ops.reduce_mean(x, reduction_axes, keepdims)
def _tf_reduce_sum(self, x, reduction_axes, keepdims):
return math_ops.reduce_sum(x, reduction_axes, keepdims)
@test_util.run_deprecated_v1
def testFloat32Sum(self):
# make sure we test all possible kernel invocations
# logic is the same for all ops, test just float32 for brevity
arr_ = np.ones([4097, 4097], dtype=np.float32)
for size_x in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
for size_y in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
arr = arr_[0:size_x, 0:size_y]
col_sum = np.ones([size_y], dtype=np.float32) * size_x
row_sum = np.ones([size_x], dtype=np.float32) * size_y
full_sum = np.ones([], dtype=np.float32) * size_x * size_y
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(dtype=np.float32,
shape=(size_x, size_y))
tf_row_sum = self._tf_reduce_sum(arr_placeholder, 1, False)
tf_col_sum = self._tf_reduce_sum(arr_placeholder, 0, False)
tf_full_sum = self._tf_reduce_sum(arr_placeholder, [0, 1], False)
tf_out_row, tf_out_col, tf_out_full = sess.run(
[tf_row_sum, tf_col_sum, tf_full_sum], {arr_placeholder: arr})
self.assertAllClose(col_sum, tf_out_col)
self.assertAllClose(row_sum, tf_out_row)
self.assertAllClose(full_sum, tf_out_full)
arr_ = np.ones([130, 130, 130], dtype=np.float32)
for size_x in range(1, 130, 13):
for size_y in range(1, 130, 13):
for size_z in range(1, 130, 13):
arr = arr_[0:size_x, 0:size_y, 0:size_z]
sum_y = np.ones([size_x, size_z], dtype=np.float32)
sum_xz = np.ones([size_y], dtype=np.float32)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(
dtype=np.float32, shape=(size_x, size_y, size_z))
tf_sum_xz = self._tf_reduce_mean(arr_placeholder, [0, 2], False)
tf_sum_y = self._tf_reduce_mean(arr_placeholder, 1, False)
tf_out_sum_xz, tf_out_sum_y = sess.run([tf_sum_xz, tf_sum_y],
{arr_placeholder: arr})
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
@test_util.run_deprecated_v1
def testFloat32Max(self):
# make sure we test all possible kernel invocations
# logic is the same for all ops, test just float32 for brevity
arr_ = np.random.uniform(
low=-3, high=-1, size=[4105, 4105]).astype(np.float32)
for size_x in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
for size_y in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
arr = arr_[0:size_x, 0:size_y]
col_max = np.max(arr, axis=0)
row_max = np.max(arr, axis=1)
full_max = np.max(col_max)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(dtype=np.float32,
shape=(size_x, size_y))
tf_row_max = self._tf_reduce_max(arr_placeholder, 1, False)
tf_col_max = self._tf_reduce_max(arr_placeholder, 0, False)
tf_full_max = self._tf_reduce_max(arr_placeholder, [0, 1], False)
tf_out_row, tf_out_col, tf_out_full = sess.run(
[tf_row_max, tf_col_max, tf_full_max], {arr_placeholder: arr})
self.assertAllClose(col_max, tf_out_col)
self.assertAllClose(row_max, tf_out_row)
self.assertAllClose(full_max, tf_out_full)
arr_ = np.random.uniform(
low=-3, high=-1, size=[130, 130, 130]).astype(np.float32)
for size_x in range(1, 130, 13):
for size_y in range(1, 130, 13):
for size_z in range(1, 130, 13):
arr = arr_[0:size_x, 0:size_y, 0:size_z]
sum_y = np.max(arr, axis=1)
sum_xz = np.max(arr, axis=(0, 2))
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(
dtype=np.float32, shape=(size_x, size_y, size_z))
tf_sum_xz = self._tf_reduce_max(arr_placeholder, [0, 2], False)
tf_sum_y = self._tf_reduce_max(arr_placeholder, 1, False)
tf_out_sum_xz, tf_out_sum_y = sess.run(
[tf_sum_xz, tf_sum_y], {arr_placeholder: arr})
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
@test_util.run_deprecated_v1
def testBooleanAll(self):
# make sure we test all possible kernel invocations
# test operation where T(0) is not the identity
arr_ = np.ones([4097, 4097], dtype=np.bool)
for size_x in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
for size_y in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
arr = arr_[0:size_x, 0:size_y]
col_sum = np.ones([size_y], dtype=np.bool)
row_sum = np.ones([size_x], dtype=np.bool)
full_sum = np.ones([1], dtype=np.bool).reshape([])
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(dtype=np.bool,
shape=(size_x, size_y))
tf_row_sum = self._tf_reduce_all(arr_placeholder, 1, False)
tf_col_sum = self._tf_reduce_all(arr_placeholder, 0, False)
tf_full_sum = self._tf_reduce_all(arr_placeholder, [0, 1], False)
tf_out_row, tf_out_col, tf_out_full = sess.run(
[tf_row_sum, tf_col_sum, tf_full_sum], {arr_placeholder: arr})
self.assertAllClose(col_sum, tf_out_col)
self.assertAllClose(row_sum, tf_out_row)
self.assertAllClose(full_sum, tf_out_full)
arr_ = np.ones([130, 130, 130], dtype=np.bool)
for size_x in range(1, 130, 13):
for size_y in range(1, 130, 13):
for size_z in range(1, 130, 13):
arr = arr_[0:size_x, 0:size_y, 0:size_z]
sum_y = np.ones([size_x, size_z], dtype=np.bool)
sum_xz = np.ones([size_y], dtype=np.bool)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(
dtype=np.bool, shape=(size_x, size_y, size_z))
tf_sum_xz = self._tf_reduce_all(arr_placeholder, [0, 2], False)
tf_sum_y = self._tf_reduce_all(arr_placeholder, 1, False)
tf_out_sum_xz, tf_out_sum_y = sess.run(
[tf_sum_xz, tf_sum_y], {arr_placeholder: arr})
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/reduction_ops_test_big.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.stack_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class StackOpTest(test.TestCase):
def _testStackPushPop(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], self.evaluate(c1))
@test_util.run_deprecated_v1
def testStackPushPop(self):
self._testStackPushPop(use_gpu=False)
self._testStackPushPop(use_gpu=True)
def _testStackPushPopSwap(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
a = np.arange(2000)
x = constant_op.constant(a, dtype=dtypes.float32)
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, x, swap_memory=True)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose(a, self.evaluate(c1))
@test_util.run_deprecated_v1
def testStackPushPopSwap(self):
self._testStackPushPopSwap(use_gpu=False)
self._testStackPushPopSwap(use_gpu=True)
def _testStackWhileSwap(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(0)
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
def c(x):
return math_ops.less(x, 10)
def b(x):
with ops.control_dependencies([x]):
a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
v = gen_data_flow_ops.stack_push_v2(h, a, swap_memory=True)
with ops.control_dependencies([v]):
return math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n])
v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32)
def c1(x, y):
del y
return math_ops.greater(x, 0)
def b1(x, y):
nx = math_ops.subtract(x, 1)
ny = y + gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
return [nx, ny]
_, ry = control_flow_ops.while_loop(
c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()])
self.assertAllClose(np.ones(2000) * 10.0, self.evaluate(ry))
@test_util.run_v1_only("b/120545219")
def testStackWhileSwap(self):
self._testStackWhileSwap(use_gpu=False)
self._testStackWhileSwap(use_gpu=True)
def _testMultiStack(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, 4.0)
with ops.control_dependencies([c1]):
c1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
h2 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="bar")
c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
with ops.control_dependencies([c2]):
c2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testMultiStack(self):
self._testMultiStack(use_gpu=False)
self._testMultiStack(use_gpu=True)
def _testSameNameStacks(self, use_gpu):
"""Different stacks with the same name do not interfere."""
with self.cached_session(use_gpu=use_gpu) as sess:
h1 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
h2 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, 4.0)
with ops.control_dependencies([c1]):
c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
with ops.control_dependencies([c2]):
pop1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
pop2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
out1, out2 = self.evaluate([pop1, pop2])
self.assertAllClose(out1, 4.0)
self.assertAllClose(out2, 5.0)
@test_util.run_deprecated_v1
def testSameNameStacks(self):
self._testSameNameStacks(use_gpu=False)
self._testSameNameStacks(use_gpu=True)
def _testCloseStack(self, use_gpu):
with self.cached_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_close_v2(h)
self.evaluate(c1)
@test_util.run_deprecated_v1
def testCloseStack(self):
self._testCloseStack(use_gpu=False)
self._testCloseStack(use_gpu=True)
def _testPushCloseStack(self, use_gpu):
with self.cached_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_close_v2(h)
self.evaluate(c1)
@test_util.run_deprecated_v1
def testPushCloseStack(self):
self._testPushCloseStack(use_gpu=False)
self._testPushCloseStack(use_gpu=True)
class StackOpRefTest(test.TestCase):
"""Tests for deprecated non-resource variant of stack ops."""
def _testStackPushPop(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], self.evaluate(c1))
@test_util.run_deprecated_v1
def testStackPushPop(self):
self._testStackPushPop(use_gpu=False)
self._testStackPushPop(use_gpu=True)
def _testStackPushPopSwap(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
a = np.arange(2000)
x = constant_op.constant(a, dtype=dtypes.float32)
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push(h, x, swap_memory=True)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop(h, dtypes.float32)
self.assertAllClose(a, self.evaluate(c1))
@test_util.run_deprecated_v1
def testStackPushPopSwap(self):
self._testStackPushPopSwap(use_gpu=False)
self._testStackPushPopSwap(use_gpu=True)
def _testMultiStack(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push(h1, 4.0)
with ops.control_dependencies([c1]):
c1 = gen_data_flow_ops.stack_pop(h1, dtypes.float32)
h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="bar")
c2 = gen_data_flow_ops.stack_push(h2, 5.0)
with ops.control_dependencies([c2]):
c2 = gen_data_flow_ops.stack_pop(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, self.evaluate(r))
def _testStackWhileSwap(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(0)
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
def c(x):
return math_ops.less(x, 10)
def b(x):
with ops.control_dependencies([x]):
a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
v = gen_data_flow_ops.stack_push(h, a, swap_memory=True)
with ops.control_dependencies([v]):
return math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n])
v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32)
def c1(x, y):
del y
return math_ops.greater(x, 0)
def b1(x, y):
nx = math_ops.subtract(x, 1)
ny = y + gen_data_flow_ops.stack_pop(h, dtypes.float32)
return [nx, ny]
_, ry = control_flow_ops.while_loop(
c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()])
self.assertAllClose(np.ones(2000) * 10.0, self.evaluate(ry))
@test_util.run_v1_only("b/120545219")
def testStackWhileSwap(self):
self._testStackWhileSwap(use_gpu=False)
self._testStackWhileSwap(use_gpu=True)
@test_util.run_deprecated_v1
def testMultiStack(self):
self._testMultiStack(use_gpu=False)
self._testMultiStack(use_gpu=True)
def _testSameNameStacks(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push(h1, 4.0)
h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c2 = gen_data_flow_ops.stack_push(h2, 5.0)
_ = c1 + c2
self.assertNotEqual(h1.eval()[1], self.evaluate(h2)[1])
@test_util.run_deprecated_v1
def testSameNameStacks(self):
self._testSameNameStacks(use_gpu=False)
self._testSameNameStacks(use_gpu=True)
def _testCloseStack(self, use_gpu):
with self.cached_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_close(h)
self.evaluate(c1)
@test_util.run_deprecated_v1
def testCloseStack(self):
self._testCloseStack(use_gpu=False)
self._testCloseStack(use_gpu=True)
def _testPushCloseStack(self, use_gpu):
with self.cached_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_close(h)
self.evaluate(c1)
@test_util.run_deprecated_v1
def testPushCloseStack(self):
self._testPushCloseStack(use_gpu=False)
self._testPushCloseStack(use_gpu=True)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/stack_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_length_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringLengthOpTest(test.TestCase):
def testStringLength(self):
strings = [[["1", "12"], ["123", "1234"], ["12345", "123456"]]]
with self.cached_session() as sess:
lengths = string_ops.string_length(strings)
values = self.evaluate(lengths)
self.assertAllEqual(values, [[[1, 2], [3, 4], [5, 6]]])
@test_util.run_deprecated_v1
def testUnit(self):
unicode_strings = [u"H\xc3llo", u"\U0001f604"]
utf8_strings = [s.encode("utf-8") for s in unicode_strings]
expected_utf8_byte_lengths = [6, 4]
expected_utf8_char_lengths = [5, 1]
with self.session() as sess:
utf8_byte_lengths = string_ops.string_length(utf8_strings, unit="BYTE")
utf8_char_lengths = string_ops.string_length(
utf8_strings, unit="UTF8_CHAR")
self.assertAllEqual(
self.evaluate(utf8_byte_lengths), expected_utf8_byte_lengths)
self.assertAllEqual(
self.evaluate(utf8_char_lengths), expected_utf8_char_lengths)
with self.assertRaisesRegexp(
ValueError, "Attr 'unit' of 'StringLength' Op passed string 'XYZ' "
'not in: "BYTE", "UTF8_CHAR"'):
string_ops.string_length(utf8_strings, unit="XYZ")
@test_util.run_deprecated_v1
def testLegacyPositionalName(self):
# Code that predates the 'unit' parameter may have used a positional
# argument for the 'name' parameter. Check that we don't break such code.
strings = [[["1", "12"], ["123", "1234"], ["12345", "123456"]]]
lengths = string_ops.string_length(strings, "some_name")
with self.session():
self.assertAllEqual(lengths.eval(), [[[1, 2], [3, 4], [5, 6]]])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/string_length_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.one_hot_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class OneHotTest(test.TestCase):
def _testOneHot(self,
truth,
use_gpu=False,
expected_err_re=None,
raises=None,
**inputs):
with self.cached_session(use_gpu=use_gpu):
if raises is not None:
with self.assertRaises(raises):
array_ops.one_hot(**inputs)
else:
ans = array_ops.one_hot(**inputs)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllEqual(tf_ans, truth)
self.assertEqual(tf_ans.shape, ans.get_shape())
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBothOneHot(self, truth, expected_err_re=None, raises=None, **inputs):
self._testOneHot(truth, True, expected_err_re, raises, **inputs)
self._testOneHot(truth, False, expected_err_re, raises, **inputs)
def _testBasic(self, dtype):
indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
# axis == 0
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=0,
dtype=dtype,
truth=truth.T) # Output is transpose version in this case
def _testDefaultBasic(self, dtype):
indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
depth = 3
truth = np.asarray(
[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
dtype=dtype)
# axis == -1
self._testBothOneHot(indices=indices, depth=depth, truth=truth)
# axis == 0
self._testBothOneHot(
indices=indices, depth=depth, axis=0,
truth=truth.T) # Output is transpose version in this case
def testFloatBasic(self):
self._testBasic(np.float32)
self._testDefaultBasic(np.float32)
def testDoubleBasic(self):
self._testBasic(np.float64)
self._testDefaultBasic(np.float64)
def testInt32Basic(self):
self._testBasic(np.int32)
self._testDefaultBasic(np.int32)
def testInt64Basic(self):
self._testBasic(np.int64)
self._testDefaultBasic(np.int64)
def testComplex64Basic(self):
self._testBasic(np.complex64)
self._testDefaultBasic(np.complex64)
def testComplex128Basic(self):
self._testBasic(np.complex128)
self._testDefaultBasic(np.complex128)
def _testBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testDefaultValuesBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
depth = 3
truth = np.asarray(
[[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(indices=indices, depth=depth, dtype=dtype, truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
depth=depth,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testValueTypeBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
dtype=dtype,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testEmpty(self, dtype):
indices = np.zeros((0, 16), dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.empty((0, 16, 3), dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
def testHalfBatch(self):
self._testEmpty(np.float16)
self._testBatch(np.float16)
self._testDefaultValuesBatch(np.float16)
self._testValueTypeBatch(np.float16)
def testFloatBatch(self):
self._testEmpty(np.float32)
self._testBatch(np.float32)
self._testDefaultValuesBatch(np.float32)
self._testValueTypeBatch(np.float32)
def testDoubleBatch(self):
self._testEmpty(np.float64)
self._testBatch(np.float64)
self._testDefaultValuesBatch(np.float64)
self._testValueTypeBatch(np.float64)
def testInt32Batch(self):
self._testEmpty(np.int32)
self._testBatch(np.int32)
self._testDefaultValuesBatch(np.int32)
self._testValueTypeBatch(np.int32)
def testInt64Batch(self):
self._testEmpty(np.int64)
self._testBatch(np.int64)
self._testDefaultValuesBatch(np.int64)
self._testValueTypeBatch(np.int64)
def testComplexBatch(self):
self._testEmpty(np.complex64)
self._testBatch(np.complex64)
# self._testDefaultValuesBatch(np.complex64)
self._testValueTypeBatch(np.complex64)
def testSimpleCases(self):
indices = [0, 1, 2]
depth = 3
truth = np.asarray(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.float32)
self._testBothOneHot(indices=indices, depth=depth, truth=truth)
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.int32)
self._testBothOneHot(
indices=indices, depth=depth, dtype=np.int32, truth=truth)
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[1, -1, -1], [-1, 1, -1], [-1, -1, 1]], dtype=np.int32)
self._testBothOneHot(
indices=indices, depth=depth, on_value=1, off_value=-1, truth=truth)
def testSingleValueGiven(self):
# Only on_value provided
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.int32)
self._testBothOneHot(indices=indices, depth=depth, on_value=1, truth=truth)
# Only off_value provided
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32)
self._testBothOneHot(
indices=indices, depth=depth, off_value=0.0, truth=truth)
def testString(self):
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[b"1.0", b"0.0", b"0.0"], [b"0.0", b"1.0", b"0.0"],
[b"0.0", b"0.0", b"1.0"]])
on_value = np.asarray(b"1.0")
off_value = np.asarray(b"0.0")
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtypes.string,
truth=truth)
on_value = constant_op.constant(b"1.0")
off_value = constant_op.constant(b"0.0")
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtypes.string,
truth=truth)
on_value = b"1.0"
off_value = b"0.0"
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtypes.string,
truth=truth)
def testIndicesTypes(self):
tf_types = [dtypes.uint8, dtypes.int32, dtypes.int64]
np_types = [np.int32, np.int64]
for itype in tf_types + np_types:
# Note: to keep the tests simple in the case of uint8 the index -1 below
# maps to 255 which is out of the depth range, just like -1.
if itype in tf_types:
indices = constant_op.constant(
[[0, 2, -1, 1], [1, 0, 1, -1]], dtype=itype)
elif itype in np_types:
indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=itype)
depth = 3
on_value = np.asarray(1.0, dtype=np.float32)
off_value = np.asarray(-1.0, dtype=np.float32)
truth = np.asarray(
[[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]],
dtype=np.float32)
# axis == -1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
axis=1,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def testPrefixDimOverflow(self):
for itype in [dtypes.int32, dtypes.int64, dtypes.uint8]:
prefix_dim_size = 65536
depth = 2
x = [i % depth for i in range(prefix_dim_size)]
indices = constant_op.constant(x, dtype=itype)
truth = np.zeros((prefix_dim_size, depth), np.float32)
for i in range(prefix_dim_size):
truth[i, x[i]] = 1.0
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=1.0,
off_value=0.0,
truth=truth)
def testOnOffMismatchTypeError(self):
indices = [0, 1, 2]
depth = 3
on_value = np.asarray(1.0, np.float64)
off_value = np.asarray(0.0, np.float32)
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
truth=None,
raises=TypeError)
def testDtypeMismatchTypeError(self):
indices = [0, 1, 2]
depth = 3
on_value = np.asarray(1.0, np.float32)
off_value = np.asarray(0.0, np.float32)
dtype = np.int32
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
dtype=dtype,
truth=None,
raises=TypeError)
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=off_value,
dtype=dtype,
truth=None,
raises=TypeError)
def testOneHotUint8WithLargeArray(self):
with self.cached_session(use_gpu=False) as sess:
matrix = np.random.rand(256) * 10
tensor = constant_op.constant(matrix, dtypes.uint8, shape=matrix.shape)
tensor_one_hot = array_ops.one_hot(tensor, depth=10, axis=0)
self.assertEqual(sess.run(tensor_one_hot).shape, (10, 256))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/one_hot_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.layers import convolutional
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# Returns true iff the two initializers produce the same tensor to
# within a tiny tolerance.
def identicaltest(tc, init1, init2, shape=None):
"""Tests if two initializations are identical to within tiny tolerances.
Args:
tc: An instance of TensorFlowTestCase.
init1: An Initializer that generates a tensor of a given shape
init2: An Initializer that generates a tensor of a given shape
shape: Shape of the tensor to initialize or `None` to use a vector of length
100.
Returns:
True or False as determined by test.
"""
if shape is None:
shape = [100]
with tc.test_session(graph=ops.Graph()):
t1 = init1(shape).eval()
with tc.test_session(graph=ops.Graph()):
t2 = init2(shape).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def duplicated_initializer(tc, init, graph_seed, shape=None):
"""Tests duplicated random initializer within the same graph.
This test generates two random kernels from the same initializer to the same
graph, and checks if the results are close enough. Even given the same global,
seed, two different instances of random kernels should generate different
results.
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
graph_seed: A graph-level seed to use.
shape: Shape of the tensor to initialize or `None` to use a vector of length
100.
Returns:
True or False as determined by test.
"""
if shape is None:
shape = [100]
with tc.test_session(graph=ops.Graph()):
random_seed.set_random_seed(graph_seed)
t1 = init(shape).eval()
t2 = init(shape).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def _init_sampler(tc, init, num):
"""Returns a func to generate a random tensor of shape [num].
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
num: Size of 1D tensor to create.
Returns:
Function to generate a random tensor.
"""
def func():
with tc.test_session(use_gpu=True):
return init([num]).eval()
return func
class ConstantInitializersTest(test.TestCase):
@test_util.run_deprecated_v1
def testZerosInitializer(self):
with self.session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.zeros_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
@test_util.run_deprecated_v1
def testOnesInitializer(self):
with self.session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.ones_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
@test_util.run_deprecated_v1
def testConstantZeroInitializer(self):
with self.session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.constant_initializer(0.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
@test_util.run_deprecated_v1
def testConstantOneInitializer(self):
with self.session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.constant_initializer(1.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
@test_util.run_deprecated_v1
def testConstantIntInitializer(self):
with self.session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x",
shape=shape,
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(7))
x.initializer.run()
self.assertEqual(x.dtype.base_dtype, dtypes.int32)
self.assertAllEqual(x.eval(), 7 * np.ones(shape, dtype=np.int32))
@test_util.run_deprecated_v1
def testConstantTupleInitializer(self):
with self.session(use_gpu=True):
shape = [3]
x = variable_scope.get_variable(
"x",
shape=shape,
dtype=dtypes.int32,
initializer=init_ops.constant_initializer((10, 20, 30)))
x.initializer.run()
self.assertEqual(x.dtype.base_dtype, dtypes.int32)
self.assertAllEqual(x.eval(), [10, 20, 30])
def _testNDimConstantInitializer(self, name, value, shape, expected):
with self.cached_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
x = variable_scope.get_variable(name, shape=shape, initializer=init)
x.initializer.run()
actual = array_ops.reshape(x, [-1]).eval()
self.assertEqual(len(actual), len(expected))
for a, e in zip(actual, expected):
self.assertEqual(a, e)
@test_util.run_deprecated_v1
def testNDimConstantInitializer(self):
value = [0, 1, 2, 3, 4, 5]
shape = [2, 3]
expected = list(value)
self._testNDimConstantInitializer("list", value, shape, expected)
self._testNDimConstantInitializer("ndarray",
np.asarray(value), shape, expected)
self._testNDimConstantInitializer("2D-ndarray",
np.asarray(value).reshape(tuple(shape)),
shape, expected)
def _testNDimConstantInitializerLessValues(self, name, value, shape,
expected):
with self.cached_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
x = variable_scope.get_variable(name, shape=shape, initializer=init)
x.initializer.run()
actual = array_ops.reshape(x, [-1]).eval()
self.assertGreater(len(actual), len(expected))
for i in xrange(len(actual)):
a = actual[i]
e = expected[i] if i < len(expected) else expected[-1]
self.assertEqual(a, e)
@test_util.run_deprecated_v1
def testNDimConstantInitializerLessValues(self):
value = [0, 1, 2, 3, 4, 5]
shape = [2, 4]
expected = list(value)
self._testNDimConstantInitializerLessValues("list", value, shape, expected)
self._testNDimConstantInitializerLessValues("ndarray",
np.asarray(value), shape,
expected)
self._testNDimConstantInitializerLessValues(
"2D-ndarray", np.asarray(value).reshape(tuple([2, 3])), shape, expected)
def _testNDimConstantInitializerMoreValues(self, value, shape):
ops.reset_default_graph()
with self.cached_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
self.assertRaises(
ValueError,
variable_scope.get_variable,
"x",
shape=shape,
initializer=init)
@test_util.run_deprecated_v1
def testNDimConstantInitializerMoreValues(self):
value = [0, 1, 2, 3, 4, 5, 6, 7]
shape = [2, 3]
self._testNDimConstantInitializerMoreValues(value, shape)
self._testNDimConstantInitializerMoreValues(np.asarray(value), shape)
self._testNDimConstantInitializerMoreValues(
np.asarray(value).reshape(tuple([2, 4])), shape)
def testInvalidValueTypeForConstantInitializerCausesTypeError(self):
c = constant_op.constant([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
TypeError, r"Invalid type for initial value: .*Tensor.*"):
init_ops.constant_initializer(c, dtype=dtypes.float32)
v = variables.Variable([3.0, 2.0, 1.0])
with self.assertRaisesRegexp(
TypeError, r"Invalid type for initial value: .*Variable.*"):
init_ops.constant_initializer(v, dtype=dtypes.float32)
class RandomNormalInitializationTest(test.TestCase):
@test_util.run_deprecated_v1
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
@test_util.run_deprecated_v1
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
@test_util.run_deprecated_v1
def testDuplicatedInitializer(self):
init = init_ops.random_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.random_normal_initializer,
0.0,
1.0,
dtype=dtypes.string)
class TruncatedNormalInitializationTest(test.TestCase):
@test_util.run_deprecated_v1
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
@test_util.run_deprecated_v1
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
@test_util.run_deprecated_v1
def testDuplicatedInitializer(self):
init = init_ops.truncated_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.truncated_normal_initializer,
0.0,
1.0,
dtype=dtypes.string)
class RandomUniformInitializationTest(test.TestCase):
@test_util.run_deprecated_v1
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64, dtypes.int64]:
init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
init2 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
@test_util.run_deprecated_v1
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64]:
init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
init2 = init_ops.random_uniform_initializer(0, 7, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
@test_util.run_deprecated_v1
def testDuplicatedInitializer(self):
init = init_ops.random_uniform_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
class UniformUnitScalingInitializationTest(test.TestCase):
@test_util.run_deprecated_v1
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
init4 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init3, init4))
@test_util.run_deprecated_v1
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=2, dtype=dtype)
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
self.assertFalse(identicaltest(self, init1, init3))
self.assertFalse(identicaltest(self, init2, init3))
@test_util.run_deprecated_v1
def testZeroSize(self):
shape = [0, 2]
with self.cached_session():
x = variable_scope.get_variable(
"x",
shape=shape,
initializer=init_ops.uniform_unit_scaling_initializer())
variables.global_variables_initializer().run()
self.assertAllEqual(shape, self.evaluate(x).shape)
@test_util.run_deprecated_v1
def testDuplicatedInitializer(self):
init = init_ops.uniform_unit_scaling_initializer()
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.uniform_unit_scaling_initializer,
dtype=dtypes.string)
class VarianceScalingInitializationTest(test.TestCase):
@test_util.run_deprecated_v1
def testTruncatedNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops.variance_scaling_initializer(
distribution='truncated_normal')
with self.session(use_gpu=True), \
test.mock.patch.object(
random_ops, 'truncated_normal', wraps=random_ops.truncated_normal) \
as mock_truncated_normal:
x = init(shape).eval()
self.assertTrue(mock_truncated_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_deprecated_v1
def testNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops.variance_scaling_initializer(distribution='normal')
with self.session(use_gpu=True), \
test.mock.patch.object(
random_ops, 'truncated_normal', wraps=random_ops.truncated_normal) \
as mock_truncated_normal:
x = init(shape).eval()
self.assertTrue(mock_truncated_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_deprecated_v1
def testUntruncatedNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops.variance_scaling_initializer(
distribution='untruncated_normal')
with self.session(use_gpu=True), \
test.mock.patch.object(
random_ops, 'random_normal', wraps=random_ops.random_normal) \
as mock_random_normal:
x = init(shape).eval()
self.assertTrue(mock_random_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_deprecated_v1
def testUniformDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops.variance_scaling_initializer(distribution='uniform')
with self.session(use_gpu=True):
x = init(shape).eval()
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
# TODO(vrv): move to sequence_ops_test?
class RangeTest(test.TestCase):
def _Range(self, start, limit, delta):
with self.cached_session(use_gpu=True):
tf_ans = math_ops.range(start, limit, delta, name="range")
self.assertEqual([len(np.arange(start, limit, delta))],
tf_ans.get_shape())
return self.evaluate(tf_ans)
def testBasic(self):
self.assertTrue(
np.array_equal(self._Range(0, 5, 1), np.array([0, 1, 2, 3, 4])))
self.assertTrue(np.array_equal(self._Range(0, 5, 2), np.array([0, 2, 4])))
self.assertTrue(np.array_equal(self._Range(0, 6, 2), np.array([0, 2, 4])))
self.assertTrue(
np.array_equal(self._Range(13, 32, 7), np.array([13, 20, 27])))
self.assertTrue(
np.array_equal(
self._Range(100, 500, 100), np.array([100, 200, 300, 400])))
self.assertEqual(math_ops.range(0, 5, 1).dtype, dtypes.int32)
@test_util.run_deprecated_v1
def testLimitOnly(self):
with self.session(use_gpu=True):
self.assertAllEqual(np.arange(5), math_ops.range(5).eval())
def testEmpty(self):
for start in 0, 5:
self.assertTrue(np.array_equal(self._Range(start, start, 1), []))
def testNonInteger(self):
self.assertTrue(
np.allclose(self._Range(0, 2, 0.5), np.array([0, 0.5, 1, 1.5])))
self.assertTrue(np.allclose(self._Range(0, 5, 2.5), np.array([0, 2.5])))
self.assertTrue(
np.allclose(self._Range(0, 3, 0.9), np.array([0, 0.9, 1.8, 2.7])))
self.assertTrue(
np.allclose(
self._Range(100., 500., 100.), np.array([100, 200, 300, 400])))
self.assertEqual(math_ops.range(0., 5., 1.).dtype, dtypes.float32)
def testNegativeDelta(self):
self.assertTrue(
np.array_equal(self._Range(5, -1, -1), np.array([5, 4, 3, 2, 1, 0])))
self.assertTrue(
np.allclose(self._Range(2.5, 0, -0.5), np.array([2.5, 2, 1.5, 1, 0.5])))
self.assertTrue(
np.array_equal(self._Range(-5, -10, -3), np.array([-5, -8])))
def testDType(self):
zero_int32 = math_ops.cast(0, dtypes.int32)
zero_int64 = math_ops.cast(0, dtypes.int64)
zero_float32 = math_ops.cast(0, dtypes.float32)
zero_float64 = math_ops.cast(0, dtypes.float64)
self.assertEqual(math_ops.range(zero_int32, 0, 1).dtype, dtypes.int32)
self.assertEqual(math_ops.range(zero_int64, 0, 1).dtype, dtypes.int64)
self.assertEqual(math_ops.range(zero_float32, 0, 1).dtype, dtypes.float32)
self.assertEqual(math_ops.range(zero_float64, 0, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(zero_int32, zero_int64, 1).dtype, dtypes.int64)
self.assertEqual(
math_ops.range(zero_int64, zero_float32, 1).dtype, dtypes.float32)
self.assertEqual(
math_ops.range(zero_float32, zero_float64, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(zero_float64, zero_int32, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.int32).dtype, dtypes.int32)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.int64).dtype, dtypes.int64)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.float32).dtype, dtypes.float32)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.float64).dtype, dtypes.float64)
# TODO(vrv): move to sequence_ops_test?
class LinSpaceTest(test.TestCase):
def _gpu_modes(self):
if test.is_gpu_available():
return [False, True]
else:
return [False]
def _LinSpace(self, start, stop, num):
with ops.Graph().as_default() as graph:
with self.session(graph=graph, force_gpu=self.force_gpu):
tf_ans = math_ops.linspace(start, stop, num, name="linspace")
self.assertEqual([num], tf_ans.get_shape())
return self.evaluate(tf_ans)
def testPositive(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(1., 5., 1), np.array([1.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 2), np.array([1., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(1., 5., 3), np.array([1., 3., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(1., 5., 4), np.array([1., 7. / 3., 11. / 3., 5.]),
1e-5)
def testNegative(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(-1., -5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 2), np.array([-1., -5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 3), np.array([-1., -3., -5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 4),
np.array([-1., -7. / 3., -11. / 3., -5.]), 1e-5)
def testNegativeToPositive(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(-1., 5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 2), np.array([-1., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 3), np.array([-1., 2., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 4), np.array([-1., 1., 3., 5.]), 1e-5)
def testPoint(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(5., 5., 1), np.array([5.]), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 2), np.array([5.] * 2), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 3), np.array([5.] * 3), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 4), np.array([5.] * 4), 1e-5)
def testEndpointsAreExact(self):
for self.force_gpu in self._gpu_modes():
# Test some cases that produce last values not equal to "stop" when
# computed via start + (num - 1) * ((stop - start) / (num - 1)), since
# float arithmetic will introduce error through precision loss.
self.assertAllEqual(
self._LinSpace(0., 1., 42)[[0, -1]], np.array([0., 1.], np.float32))
self.assertAllEqual(
self._LinSpace(-1., 0., 42)[[0, -1]], np.array([-1., 0.], np.float32))
self.assertAllEqual(
self._LinSpace(.1, .2, 4)[[0, -1]], np.array([.1, .2], np.float32))
# Check a case for float64 error too.
self.assertAllEqual(
self._LinSpace(np.array(0., np.float64), .1, 12)[[0, -1]],
np.array([0., .1], np.float64))
class DeviceTest(test.TestCase):
def testNoDevice(self):
with ops.Graph().as_default():
var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual(None, var.device)
self.assertDeviceEqual(None, var.initializer.device)
def testDevice(self):
with ops.Graph().as_default():
with ops.device("/job:ps"):
var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual("/job:ps", var.device)
self.assertDeviceEqual("/job:ps", var.initializer.device)
class OrthogonalInitializerTest(test.TestCase):
@test_util.run_deprecated_v1
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (10, 10)))
@test_util.run_deprecated_v1
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (10, 10)))
@test_util.run_deprecated_v1
def testDuplicatedInitializer(self):
init = init_ops.orthogonal_initializer()
self.assertFalse(duplicated_initializer(self, init, 1, (10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.orthogonal_initializer()
with self.session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[5])
@test_util.run_deprecated_v1
def testGain(self):
shape = (10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(gain=3.14, seed=1, dtype=dtype)
with self.session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
t2 = init2(shape).eval()
self.assertAllClose(t1, t2 / 3.14)
@test_util.run_deprecated_v1
def testShapesValues(self):
for dtype in [dtypes.float32, dtypes.float64]:
for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:
init = init_ops.orthogonal_initializer(dtype=dtype)
tol = 1e-5 if dtype == dtypes.float32 else 1e-12
with self.session(graph=ops.Graph(), use_gpu=True):
# Check the shape
t = init(shape).eval()
self.assertAllEqual(shape, t.shape)
# Check orthogonality by computing the inner product
t = t.reshape((np.prod(t.shape[:-1]), t.shape[-1]))
if t.shape[0] > t.shape[1]:
self.assertAllClose(
np.dot(t.T, t), np.eye(t.shape[1]), rtol=tol, atol=tol)
else:
self.assertAllClose(
np.dot(t, t.T), np.eye(t.shape[0]), rtol=tol, atol=tol)
class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase):
@test_util.run_deprecated_v1
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
init2 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (3, 3, 10, 10)))
@test_util.run_deprecated_v1
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
init2 = init_ops.convolutional_delta_orthogonal(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (3, 3, 10, 10)))
@test_util.run_deprecated_v1
def testDuplicatedInitializer(self):
init = init_ops.convolutional_delta_orthogonal()
self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.convolutional_delta_orthogonal,
dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.convolutional_delta_orthogonal()
with self.session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[3, 3, 6, 5])
@test_util.run_deprecated_v1
def testGain(self):
shape = (3, 3, 10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
init2 = init_ops.convolutional_delta_orthogonal(gain=3.14,
seed=1, dtype=dtype)
with self.session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
t2 = init2(shape).eval()
self.assertAllClose(t1, t2 / 3.14)
@test_util.run_deprecated_v1
def testShapesValues(self):
gain = 3.14
for dtype in [dtypes.float32]:
for kernel_size in [[3], [8], [3, 5], [2, 4], [3, 3, 3], [2, 2, 2]]:
tol = 1e-2
# Check orthogonality by computing ratio between
# the 2-norms of the inputs and outputs.
if len(kernel_size) == 1:
shape = [4, 32, 64]
convolution = convolutional.conv1d
elif len(kernel_size) == 2:
convolution = convolutional.conv2d
shape = [4, 32, 32, 64]
else:
shape = [4, 16, 16, 16, 64]
convolution = convolutional.conv3d
inputs = random_ops.random_normal(shape, dtype=dtype)
inputs_2norm = linalg_ops.norm(inputs)
outputs = convolution(
inputs, padding="same", filters=128,
kernel_size=kernel_size, use_bias=False,
kernel_initializer=init_ops.convolutional_delta_orthogonal(
gain=gain))
outputs_shape = shape[0:-1] + [128]
outputs_2norm = linalg_ops.norm(outputs)
ratio = outputs_2norm / inputs_2norm
my_ops = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(my_ops)
# Check the shape of the outputs
t = self.evaluate(outputs)
self.assertAllEqual(t.shape, outputs_shape)
# Check isometry of the delta-orthogonal kernel.
self.assertAllClose(self.evaluate(ratio), gain, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testNonuniformity(self):
value = 0
abs_value = 0
shape = [3, 3, 10, 10]
count = 70
tol = 1e-5
with self.session(use_gpu=True):
for i in range(count):
x = variable_scope.get_variable("{}".format(i), shape=shape,
initializer=
init_ops.convolutional_delta_orthogonal)
x.initializer.run()
y = self.evaluate(x)[1, 1, :, :]
determinant = np.linalg.det(y)
value += determinant
abs_value += np.abs(determinant)
# Check there is some variation in the signs of the determinants
self.assertLess(value, count - tol)
self.assertLess(-count + tol, value)
# Check all determinants have absolute value 1
# Compute the sum of the absolute values of 'count' determinants
self.assertAllClose(abs_value, count, rtol=tol, atol=tol)
class ConvolutionOrthogonal1dInitializerTest(test.TestCase):
@test_util.run_deprecated_v1
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_1d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_1d(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (3, 10, 10)))
@test_util.run_deprecated_v1
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_1d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_1d(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (3, 10, 10)))
@test_util.run_deprecated_v1
def testDuplicatedInitializer(self):
init = init_ops.convolutional_orthogonal_1d()
self.assertFalse(duplicated_initializer(self, init, 1, (3, 10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.convolutional_orthogonal_1d,
dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.convolutional_orthogonal_1d()
with self.session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[3, 6, 5])
@test_util.run_deprecated_v1
def testGain(self):
shape = (3, 10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_1d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_1d(gain=3.14,
seed=1, dtype=dtype)
with self.session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
t2 = init2(shape).eval()
self.assertAllClose(t1, t2 / 3.14)
@test_util.run_deprecated_v1
def testNonuniformity(self):
value = 0
abs_value = 0
shape = [3, 10, 10]
count = 70
tol = 1e-5
with self.session(use_gpu=True):
for i in range(count):
x = variable_scope.get_variable("{}".format(i), shape=shape,
initializer=
init_ops.convolutional_orthogonal_1d)
x.initializer.run()
y = np.sum(x.eval(), axis=0)
determinant = np.linalg.det(y)
value += determinant
abs_value += np.abs(determinant)
# Check there is some variation in the signs of the determinants.
self.assertLess(value, count - tol)
self.assertLess(-count + tol, value)
# Check all determinants have absolute value 1
# Compute the sum of the absolute values of 'count' determinants
self.assertAllClose(abs_value, count, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testShapesValues(self):
def circular_pad(input_, width, kernel_size):
"""Pad input_ for computing (circular) convolution.
Args:
input_: the input tensor
width: the width of the tensor.
kernel_size: the kernel size of the filter.
Returns:
a tensor whose width is (width + kernel_size - 1).
"""
beginning = kernel_size // 2
end = kernel_size - 1 - beginning
tmp_up = array_ops.slice(input_, [0, width - beginning, 0],
[-1, beginning, -1])
tmp_down = array_ops.slice(input_, [0, 0, 0], [-1, end, -1])
tmp = array_ops.concat([tmp_up, input_, tmp_down], 1)
return tmp
cout = 64
shape = [10, 20, 32]
outputs_shape = shape[0:-1] + [cout]
dtype = dtypes.float32
tol = 1e-3
gain = 3.14
# Check orthogonality/isometry by computing the ratio between
# the 2-norms of the inputs and outputs.
for kernel_size in [[1], [2], [3], [4], [5], [6]]:
convolution = convolutional.conv1d
inputs = random_ops.random_normal(shape, dtype=dtype)
inputs_2norm = linalg_ops.norm(inputs)
input_with_circular_pad = circular_pad(inputs, shape[1], kernel_size[0])
outputs = convolution(
input_with_circular_pad, padding="valid", filters=cout,
kernel_size=kernel_size[0], use_bias=False,
kernel_initializer=init_ops.convolutional_orthogonal_1d(gain=gain))
outputs_2norm = linalg_ops.norm(outputs)
ratio = outputs_2norm / inputs_2norm
my_ops = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(my_ops)
# Check the shape of the outputs
t = self.evaluate(outputs)
self.assertAllEqual(t.shape, outputs_shape)
# Check isometry of the orthogonal kernel.
self.assertAllClose(self.evaluate(ratio), gain, rtol=tol, atol=tol)
class ConvolutionOrthogonal2dInitializerTest(test.TestCase):
@test_util.run_deprecated_v1
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (3, 3, 10, 10)))
@test_util.run_deprecated_v1
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_2d(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (3, 3, 10, 10)))
@test_util.run_deprecated_v1
def testDuplicatedInitializer(self):
init = init_ops.convolutional_orthogonal_2d()
self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.convolutional_orthogonal_2d,
dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.convolutional_orthogonal_2d()
with self.session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[3, 3, 6, 5])
@test_util.run_deprecated_v1
def testGain(self):
shape = (3, 3, 10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_2d(gain=3.14,
seed=1, dtype=dtype)
with self.session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
t2 = init2(shape).eval()
self.assertAllClose(t1, t2 / 3.14)
@test_util.run_deprecated_v1
def testShapesValues(self):
def circular_pad(input_, width, kernel_size):
"""Pad input_ for computing (circular) convolution.
Args:
input_: the input tensor
width: the width of the tensor.
kernel_size: the kernel size of the filter.
Returns:
a tensor whose width is (width + kernel_size - 1).
"""
beginning = kernel_size // 2
end = kernel_size - 1 - beginning
tmp_up = array_ops.slice(input_, [0, width - beginning, 0, 0],
[-1, beginning, width, -1])
tmp_down = array_ops.slice(input_, [0, 0, 0, 0], [-1, end, width, -1])
tmp = array_ops.concat([tmp_up, input_, tmp_down], 1)
new_width = width + kernel_size - 1
tmp_left = array_ops.slice(tmp, [0, 0, width - beginning, 0],
[-1, new_width, beginning, -1])
tmp_right = array_ops.slice(tmp, [0, 0, 0, 0], [-1, new_width, end, -1])
final = array_ops.concat([tmp_left, tmp, tmp_right], 2)
return final
cout = 45
shape = [64, 28, 28, 32]
outputs_shape = shape[0:-1] + [cout]
dtype = dtypes.float32
tol = 1e-3
gain = 3.14
# Check orthogonality/isometry by computing the ratio between
# the 2-norms of the inputs and outputs.
for kernel_size in [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]:
convolution = convolutional.conv2d
inputs = random_ops.random_normal(shape, dtype=dtype)
inputs_2norm = linalg_ops.norm(inputs)
input_with_circular_pad = circular_pad(inputs, shape[1], kernel_size[0])
outputs = convolution(
input_with_circular_pad, padding="valid", filters=cout,
kernel_size=kernel_size, use_bias=False,
kernel_initializer=init_ops.convolutional_orthogonal_2d(gain=gain))
outputs_2norm = linalg_ops.norm(outputs)
ratio = outputs_2norm / inputs_2norm
my_ops = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(my_ops)
# Check the shape of the outputs
t = self.evaluate(outputs)
self.assertAllEqual(t.shape, outputs_shape)
# Check isometry of the orthogonal kernel.
self.assertAllClose(self.evaluate(ratio), gain, rtol=tol, atol=tol)
class ConvolutionOrthogonal3dInitializerTest(test.TestCase):
@test_util.run_deprecated_v1
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (3, 3, 3, 10, 10)))
@test_util.run_deprecated_v1
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_3d(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (3, 3, 3, 10, 10)))
@test_util.run_deprecated_v1
def testDuplicatedInitializer(self):
init = init_ops.convolutional_orthogonal_3d()
self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 3, 10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.convolutional_orthogonal_3d,
dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.convolutional_orthogonal_3d()
with self.session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[3, 3, 3, 6, 5])
@test_util.run_deprecated_v1
def testGain(self):
shape = (3, 3, 3, 10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_3d(gain=3.14,
seed=1, dtype=dtype)
with self.session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
t2 = init2(shape).eval()
self.assertAllClose(t1, t2 / 3.14)
@test_util.run_deprecated_v1
def testNonuniformity(self):
value = 0
abs_value = 0
shape = [3, 3, 3, 5, 5]
count = 20
tol = 1e-5
with self.session(use_gpu=True):
for i in range(count):
x = variable_scope.get_variable("{}".format(i), shape=shape,
initializer=
init_ops.convolutional_orthogonal_3d)
x.initializer.run()
y = np.sum(x.eval(), axis=(0, 1, 2))
determinant = np.linalg.det(y)
value += determinant
abs_value += np.abs(determinant)
# Check there is some variation in the signs of the determinants
self.assertLess(value, count - tol)
self.assertLess(-count + tol, value)
# Check all determinants have absolute value 1
# Compute the sum of the absolute values of 'count' determinants
self.assertAllClose(abs_value, count, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testShapesValues(self):
def circular_pad(input_, width, kernel_size):
"""Padding input_ for computing circular convolution.
Args:
input_: the input tensor
width: the width of the tensor.
kernel_size: the kernel size of the filter.
Returns:
a tensor whose width is (width + kernel_size - 1).
"""
beginning = kernel_size // 2
end = kernel_size - 1 - beginning
tmp_up = array_ops.slice(input_, [0, width - beginning, 0, 0, 0],
[-1, beginning, -1, -1, -1])
tmp_down = array_ops.slice(input_, [0, 0, 0, 0, 0],
[-1, end, -1, -1, -1])
tmp = array_ops.concat([tmp_up, input_, tmp_down], 1)
tmp_left = array_ops.slice(tmp, [0, 0, width - beginning, 0, 0],
[-1, -1, beginning, -1, -1])
tmp_right = array_ops.slice(tmp, [0, 0, 0, 0, 0],
[-1, -1, end, -1, -1])
tmp = array_ops.concat([tmp_left, tmp, tmp_right], 2)
tmp_front = array_ops.slice(tmp, [0, 0, 0, width - beginning, 0],
[-1, -1, -1, beginning, -1])
tmp_back = array_ops.slice(tmp, [0, 0, 0, 0, 0], [-1, -1, -1, end, -1])
return array_ops.concat([tmp_front, tmp, tmp_back], 3)
cout = 32
shape = [1, 7, 7, 7, 16]
outputs_shape = shape[0:-1] + [cout]
dtype = dtypes.float32
tol = 1e-3
gain = 3.14
# Check orthogonality/isometry by computing the ratio between
# the 2-norms of the inputs and outputs.
for kernel_size in [[1, 1, 1], [2, 2, 2], [3, 3, 3]]:
convolution = convolutional.conv3d
inputs = random_ops.random_normal(shape, dtype=dtype)
inputs_2norm = linalg_ops.norm(inputs)
input_with_circular_pad = circular_pad(inputs, shape[1], kernel_size[0])
outputs = convolution(
input_with_circular_pad, padding="valid", filters=cout,
kernel_size=kernel_size[0], use_bias=False,
kernel_initializer=init_ops.convolutional_orthogonal_3d(gain=gain))
outputs_2norm = linalg_ops.norm(outputs)
ratio = outputs_2norm / inputs_2norm
my_ops = variables.global_variables_initializer()
with self.cached_session(use_gpu=True) as sess:
self.evaluate(my_ops)
# Check the shape of the outputs
t = self.evaluate(outputs)
self.assertAllEqual(t.shape, outputs_shape)
# Check isometry of the orthogonal kernel.
self.assertAllClose(self.evaluate(ratio), gain, rtol=tol, atol=tol)
class IdentityInitializerTest(test.TestCase):
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string)
def testInvalidShape(self):
init = init_ops.identity_initializer()
with self.session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init, shape=[5, 7, 7])
self.assertRaises(ValueError, init, shape=[5])
self.assertRaises(ValueError, init, shape=[])
@test_util.run_deprecated_v1
def testNonSquare(self):
init = init_ops.identity_initializer()
shape = (10, 5)
with self.session(graph=ops.Graph(), use_gpu=True):
self.assertAllClose(init(shape).eval(), np.eye(*shape))
@test_util.run_deprecated_v1
def testGain(self):
shape = (10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init_default = init_ops.identity_initializer(dtype=dtype)
init_custom = init_ops.identity_initializer(gain=0.9, dtype=dtype)
with self.session(graph=ops.Graph(), use_gpu=True):
self.assertAllClose(init_default(shape).eval(), np.eye(*shape))
with self.session(graph=ops.Graph(), use_gpu=True):
self.assertAllClose(init_custom(shape).eval(), np.eye(*shape) * 0.9)
@test_util.run_deprecated_v1
def testPartitions(self):
shape = (10, 10)
init = init_ops.identity_initializer()
partitioner = partitioned_variables.variable_axis_size_partitioner(1)
with self.session(graph=ops.Graph(), use_gpu=True):
with variable_scope.variable_scope(
"foo", partitioner=partitioner, initializer=init):
v = array_ops.identity(variable_scope.get_variable("bar", shape=shape))
variables.global_variables_initializer().run()
self.assertAllClose(v.eval(), np.eye(*shape))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/init_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for denormal handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import platform
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class DenormalTest(test.TestCase):
def testPythonHasDenormals(self):
"""Non-tf numpy code should treat denormals correctly."""
for dtype in np.float32, np.float64:
tiny = np.finfo(dtype).tiny
self.assertEqual(tiny, tiny / 16 * 16)
def _flushDenormalsTest(self, use_gpu, dtypes):
if platform.machine() == "ppc64le" or platform.machine(
) == "s390x" or platform.machine() == "aarch64":
# Disabled denormal_test on power/s390x/aarch64 platform
# Check relevant discussion - https://github.com/tensorflow/tensorflow/issues/11902
return
with self.cached_session(use_gpu=use_gpu):
array_ops.identity(7).eval()
for dtype in dtypes:
tiny = np.finfo(dtype).tiny
# Small shape to test main thread, large shape to test thread pool
for shape in (), (1 << 20,):
flush = 0.1 * constant_op.constant(tiny, shape=shape)
self.assertAllEqual(flush.eval(), np.zeros(shape))
# Make sure the flags don't leak out
self.testPythonHasDenormals()
@test_util.run_deprecated_v1
def testFlushDenormalsCPU(self):
# On CPUs, the processor flags flush for both single and double precision.
self._flushDenormalsTest(use_gpu=False, dtypes=(np.float32, np.float64))
@test_util.run_deprecated_v1
def testFlushDenormalsGPU(self):
# On GPUs, only single precision can flush to zero.
self._flushDenormalsTest(use_gpu=True, dtypes=(np.float32,))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/denormal_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SparseMaskTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
values = np.random.rand(4, 4).astype(np.single)
indices = np.array([0, 2, 3, 4], dtype=np.int32)
mask_indices = np.array([0], dtype=np.int32)
out_values = values[1:, :]
out_indices = np.array([2, 3, 4], dtype=np.int32)
with self.cached_session() as sess:
values_tensor = ops.convert_to_tensor(values)
indices_tensor = ops.convert_to_tensor(indices)
mask_indices_tensor = ops.convert_to_tensor(mask_indices)
t = ops.IndexedSlices(values_tensor, indices_tensor)
masked_t = array_ops.sparse_mask(t, mask_indices_tensor)
tf_out_values, tf_out_indices = sess.run(
[masked_t.values, masked_t.indices])
self.assertAllEqual(tf_out_values, out_values)
self.assertAllEqual(tf_out_indices, out_indices)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparsemask_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReshape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseReshapeTest(test.TestCase):
def _SparseTensorPlaceholder(self):
return sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.float64),
array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.float64)
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_2x3x4(self):
ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 1],
[1, 1, 3], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensorValue(ind, val, shape)
def testStaticShapeInfoPreserved(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_5x6())
self.assertAllEqual((5, 6), sp_input.get_shape())
sp_output = sparse_ops.sparse_reshape(sp_input, shape=(1, 5, 2, 3))
self.assertAllEqual((1, 5, 2, 3), sp_output.get_shape())
def testStaticShapeInfoPreservedWithInferredDims(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_2x3x4())
self.assertAllEqual((2, 3, 4), sp_input.get_shape())
sp_output = sparse_ops.sparse_reshape(sp_input, shape=(2, -1))
self.assertAllEqual((2, 3 * 4), sp_output.get_shape())
@test_util.run_deprecated_v1
def testRaisesIfMoreThanOneInferredDim(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_2x3x4())
with self.assertRaisesRegexp(ValueError, "At most one dimension can"):
sparse_ops.sparse_reshape(sp_input, shape=(-1, 2, -1))
@test_util.run_deprecated_v1
def testRaisesIfInferredShapeNotPossible(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_2x3x4())
with self.assertRaisesRegexp(ValueError, "Cannot reshape"):
sparse_ops.sparse_reshape(sp_input, shape=(-1, 7))
def testSameShape(self):
with self.session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(input_val, [5, 6])
output_val = self.evaluate(sp_output)
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
@test_util.run_deprecated_v1
def testFeedSameShape(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [5, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
@test_util.run_deprecated_v1
def testWorksWellWithTfShape(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
shape = array_ops.shape(sp_input) # tf.shape generates int32 output
sp_output = sparse_ops.sparse_reshape(sp_input, shape)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
@test_util.run_deprecated_v1
def testFeedSameShapeWithInferredDim(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [-1, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
@test_util.run_deprecated_v1
def testFeedNewShapeSameRank(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [3, 10])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
[2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
@test_util.run_deprecated_v1
def testFeedNewShapeSameRankWithInferredDim(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [3, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
[2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
def testUpRank(self):
with self.session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(input_val, [2, 3, 5])
output_val = self.evaluate(sp_output)
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
@test_util.run_deprecated_v1
def testFeedUpRank(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
@test_util.run_deprecated_v1
def testFeedUpRankWithInferredDim(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, -1, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
@test_util.run_deprecated_v1
def testFeedDownRank(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
sp_output = sparse_ops.sparse_reshape(sp_input, [6, 4])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
[4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
@test_util.run_deprecated_v1
def testFeedDownRankWithInferredDim(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
sp_output = sparse_ops.sparse_reshape(sp_input, [6, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
[4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
@test_util.run_deprecated_v1
def testFeedMultipleInferredDims(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1, -1])
with self.assertRaisesOpError("only one output dimension may be -1"):
sess.run(sp_output, {sp_input: input_val})
@test_util.run_deprecated_v1
def testProvideStaticallyMismatchedSizes(self):
input_val = self._SparseTensorValue_5x6()
sp_input = sparse_tensor.SparseTensor.from_value(input_val)
with self.assertRaisesRegexp(ValueError, "Cannot reshape"):
sparse_ops.sparse_reshape(sp_input, [4, 7])
@test_util.run_deprecated_v1
def testFeedMismatchedSizes(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, 7])
with self.assertRaisesOpError(
"Input to reshape is a tensor with 30 dense values"):
sess.run(sp_output, {sp_input: input_val})
@test_util.run_deprecated_v1
def testFeedMismatchedSizesWithInferredDim(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1])
with self.assertRaisesOpError("requested shape requires a multiple"):
sess.run(sp_output, {sp_input: input_val})
@test_util.run_deprecated_v1
def testFeedPartialShapes(self):
with self.session(use_gpu=False):
# Incorporate new rank into shape information if known
sp_input = self._SparseTensorPlaceholder()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [None, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
# Incorporate known shape information about input indices in output
# indices
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
# Even if new_shape has no shape information, we know the ranks of
# output indices and shape
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
new_shape = array_ops.placeholder(dtypes.int64)
sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, None])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [None])
@test_util.run_deprecated_v1
def testFeedDenseReshapeSemantics(self):
with self.session(use_gpu=False) as sess:
# Compute a random rank-5 initial shape and new shape, randomly sparsify
# it, and check that the output of SparseReshape has the same semantics
# as a dense reshape.
factors = np.array([2] * 4 + [3] * 4 + [5] * 4) # 810k total elements
orig_rank = np.random.randint(2, 7)
orig_map = np.random.randint(orig_rank, size=factors.shape)
orig_shape = [np.prod(factors[orig_map == d]) for d in range(orig_rank)]
new_rank = np.random.randint(2, 7)
new_map = np.random.randint(new_rank, size=factors.shape)
new_shape = [np.prod(factors[new_map == d]) for d in range(new_rank)]
orig_dense = np.random.uniform(size=orig_shape)
orig_indices = np.transpose(np.nonzero(orig_dense < 0.5))
orig_values = orig_dense[orig_dense < 0.5]
new_dense = np.reshape(orig_dense, new_shape)
new_indices = np.transpose(np.nonzero(new_dense < 0.5))
new_values = new_dense[new_dense < 0.5]
sp_input = self._SparseTensorPlaceholder()
input_val = sparse_tensor.SparseTensorValue(orig_indices, orig_values,
orig_shape)
sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, new_indices)
self.assertAllEqual(output_val.values, new_values)
self.assertAllEqual(output_val.dense_shape, new_shape)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_reshape_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.gather."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_TEST_TYPES = (dtypes.int64, dtypes.float32,
dtypes.complex64, dtypes.complex128)
# TODO(virimia): Add a benchmark for gather_v2, with batch_dims and axis set.
class GatherTest(test.TestCase, parameterized.TestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testScalar1D(self):
with self.cached_session(use_gpu=True):
data = np.array([0, 1, 2, 3, 7, 5])
for dtype in _TEST_TYPES:
for indices in 4, [1, 2, 2, 4, 5]:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = self.evaluate(gather_t)
np_val = params_np[indices]
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def testScalar2D(self):
with self.session(use_gpu=True):
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices = constant_op.constant(2)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np.take(params_np, 2, axis=axis), gather_val)
expected_shape = data.shape[:axis] + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
def testSimpleTwoD32(self):
with self.session(use_gpu=True):
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
# The indices must be in bounds for any axis.
indices = constant_op.constant([0, 1, 0, 2])
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np.take(params_np, [0, 1, 0, 2], axis=axis),
gather_val)
expected_shape = data.shape[:axis] + (4,) + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
@test_util.run_deprecated_v1
def testHigherRank(self):
# We check that scalar and empty indices shapes work as well
shape = (2, 1, 3, 2)
for indices_shape in (), (0,), (2, 0), (2, 3):
for dtype in _TEST_TYPES:
for axis in range(len(shape)):
params = self._buildParams(np.random.randn(*shape), dtype)
indices = np.random.randint(shape[axis], size=indices_shape)
with self.cached_session(use_gpu=True) as sess:
tf_params = constant_op.constant(params)
tf_indices = constant_op.constant(indices)
# Check that both positive and negative indices for axis work.
tf_axis = constant_op.constant(axis)
tf_negative_axis = constant_op.constant(-len(shape) + axis)
gather = array_ops.gather(tf_params, tf_indices, axis=tf_axis)
gather_negative_axis = array_ops.gather(
tf_params, tf_indices, axis=tf_negative_axis)
gather_value, gather_negative_axis_value = sess.run(
[gather, gather_negative_axis])
gather_np = np.take(params, indices, axis)
self.assertAllEqual(gather_np, gather_value)
self.assertAllEqual(gather_np, gather_negative_axis_value)
expected_shape = (params.shape[:axis] + indices.shape +
params.shape[axis + 1:])
self.assertEqual(expected_shape, gather.shape)
self.assertEqual(expected_shape, gather_negative_axis.shape)
# Test gradients
gather_grad = np.random.randn(
*gather.get_shape().as_list()).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
gather_grad -= 1j * gather_grad
params_grad, indices_grad, axis_grad = gradients_impl.gradients(
gather, [tf_params, tf_indices, tf_axis], gather_grad)
self.assertEqual(indices_grad, None)
self.assertEqual(axis_grad, None)
if dtype.is_integer:
self.assertEqual(params_grad, None)
continue
# For axis 0, we are able to create an efficient IndexedSlices for
# the gradient.
if axis == 0:
self.assertEqual(type(params_grad), ops.IndexedSlices)
params_grad = ops.convert_to_tensor(params_grad)
correct_params_grad = np.zeros(shape).astype(dtype.as_numpy_dtype)
outer_dims = axis
inner_dims = len(shape) - axis - 1
gather_grad = gather_grad.reshape(
shape[:axis] + (indices.size,) + shape[axis + 1:])
for source_index, dest_index in enumerate(indices.flat):
dest_slice = ((slice(None),) * outer_dims + (dest_index,) +
(slice(None),) * inner_dims)
source_slice = ((slice(None),) * outer_dims + (source_index,) +
(slice(None),) * inner_dims)
correct_params_grad[dest_slice] += gather_grad[source_slice]
self.assertAllClose(
correct_params_grad,
self.evaluate(params_grad),
atol=2e-6,
rtol=2e-6)
@test_util.run_deprecated_v1
def testString(self):
params = np.array([[b"asdf", b"zxcv"], [b"qwer", b"uiop"]])
with self.cached_session():
self.assertAllEqual([b"qwer", b"uiop"],
array_ops.gather(params, 1, axis=0).eval())
self.assertAllEqual([b"asdf", b"qwer"],
array_ops.gather(params, 0, axis=1).eval())
@test_util.run_deprecated_v1
def testUInt32AndUInt64(self):
for unsigned_type in (dtypes.uint32, dtypes.uint64):
params = self._buildParams(
np.array([[1, 2, 3], [7, 8, 9]]), unsigned_type)
with self.cached_session():
self.assertAllEqual([7, 8, 9],
array_ops.gather(params, 1, axis=0).eval())
self.assertAllEqual([1, 7], array_ops.gather(params, 0, axis=1).eval())
@test_util.run_deprecated_v1
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices)
self.assertEqual(None, gather_t.get_shape())
@test_util.run_deprecated_v1
def testUnknownAxis(self):
params = constant_op.constant([[0, 1, 2]])
indices = constant_op.constant([[0, 0], [0, 0]])
axis = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
# Rank 2 params with rank 2 indices results in a rank 3 shape.
self.assertEqual([None, None, None], gather_t.shape.as_list())
# If indices is also unknown the result rank is unknown.
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
self.assertEqual(None, gather_t.shape)
def testBadIndicesCPU(self):
with test_util.force_cpu():
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
self.evaluate(array_ops.gather(params, [[7]], axis=0))
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
self.evaluate(array_ops.gather(params, [[7]], axis=1))
def _disabledTestBadIndicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
# On GPU the bad indices do not raise error but fetch 0 values
if not test.is_gpu_available():
return
with self.session(use_gpu=True):
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
array_ops.gather(params, [[7]], axis=0).eval()
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
array_ops.gather(params, [[7]], axis=1).eval()
@test_util.run_deprecated_v1
def testBadAxis(self):
with self.session(use_gpu=True):
params = [0, 1, 2]
params_ph = array_ops.placeholder(dtypes.int32)
indices = 0
for bad_axis in (1, 2, -2):
# Shape inference can validate axis for known params rank.
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be at least rank . but is rank 1"):
array_ops.gather(params, indices, axis=bad_axis)
# If params rank is unknown, an op error occurs.
with self.assertRaisesOpError(
r"Expected axis in the range \[-1, 1\), but got %s" % bad_axis):
array_ops.gather(params_ph, indices, axis=bad_axis).eval(
feed_dict={params_ph: params})
@test_util.run_deprecated_v1
def testEmptySlices(self):
with self.session(use_gpu=True):
for dtype in _TEST_TYPES:
for itype in np.int32, np.int64:
# Leading axis gather.
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
indices = np.array([3, 4], dtype=itype)
gather = array_ops.gather(params, indices, axis=0)
self.assertAllEqual(gather.eval(), np.zeros((2, 0, 0)))
# Middle axis gather.
params = np.zeros((0, 7, 0), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=1)
self.assertAllEqual(gather.eval(), np.zeros((0, 2, 0)))
# Trailing axis gather.
params = np.zeros((0, 0, 7), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=2)
self.assertAllEqual(gather.eval(), np.zeros((0, 0, 2)))
@parameterized.parameters([
# batch_dims=0 (equivalent to tf.gather)
dict( # 2D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[2, 1], [0, 3]],
expected=[[8, 7], [6, 9]]),
dict( # 3D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[9, 7], [8, 6]], [[6, 9], [8, 8]]]),
dict( # 4D indices
batch_dims=0,
params=[8, 9],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[8, 9], [9, 8]], [[8, 8], [9, 9]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
# batch_dims=indices.shape.ndims - 1
# (equivalent to tf.compat.v1.batch_gather)
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
dict( # 2D indices (1 batch dim)
batch_dims=-1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=-1,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
# 0 < batch_dims < indices.shape.ndims - 1
dict( # 3D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[13, 11], [12, 10]], [[20, 23], [22, 22]]]),
dict( # 4D indices (1 batch dim)
batch_dims=1,
params=[[6, 7], [8, 9]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[6, 7], [7, 6]], [[6, 6], [7, 7]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
dict( # 4D indices (2 batch dims)
batch_dims=2,
params=[[[2, 3], [4, 5]], [[6, 7], [8, 9]]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[2, 3], [3, 2]], [[4, 4], [5, 5]]],
[[[7, 7], [6, 6]], [[8, 9], [9, 8]]]]),
# axis > 0
dict( # 3D indices, batch_dims=1, axis=2
# params.shape = [I1, J1, J2] = [2, 2, 3]
# indices.shape = [I1, K1, K2] = [2, 1, 5]
# result.shape = [I1, J1, K1, K2] = [2, 2, 1, 5]
batch_dims=1,
axis=2,
params=[[[10, 11, 12], [13, 14, 15]], [[20, 21, 22], [23, 24, 25]]],
indices=[[[0, 1, 2, 1, 0]], [[0, 1, 2, 1, 0]]],
expected=[[[[10, 11, 12, 11, 10]], [[13, 14, 15, 14, 13]]],
[[[20, 21, 22, 21, 20]], [[23, 24, 25, 24, 23]]]]),
dict( # 3D indices, batch_dims=None, axis=1
batch_dims=None,
axis=1,
params=[[10, 11, 12], [13, 14, 15]],
indices=[1, 0],
expected=[[11, 10], [14, 13]]),
])
@test_util.run_in_graph_and_eager_modes
def testBatchDims(self, params, indices, batch_dims, expected=None,
axis=None):
result = array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
with compat.forward_compatibility_horizon(2019, 6, 11):
result = array_ops.gather(
params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=2,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=3,
output_shape=[2, 3, 4, 8, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[2:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=4,
output_shape=[2, 3, 4, 5, 8, 9, 10, 7]
# = params.shape[:4] + indices.shape[2:] + params.shape[5:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=5,
output_shape=[2, 3, 4, 5, 6, 8, 9, 10]
# = params.shape[:5] + indices.shape[2:] + params.shape[6:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-4,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-3,
output_shape=[2, 3, 4, 8, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[2:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-2,
output_shape=[2, 3, 4, 5, 8, 9, 10, 7]
# = params.shape[:4] + indices.shape[2:] + params.shape[5:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-1,
output_shape=[2, 3, 4, 5, 6, 8, 9, 10]
# = params.shape[:5] + indices.shape[2:] + params.shape[6:]
),
])
@test_util.run_in_graph_and_eager_modes
def testBatchDimsMatchesPythonBatching(self, params_shape, indices_shape,
batch_dims, axis, output_shape):
"""Checks that batch_dims matches multiple calls to tf.gather()."""
# Generate a `params` tensor with the indicated shape.
params_size = np.prod(params_shape)
params = np.reshape(np.arange(params_size), params_shape)
# Generate an `indices` tensor with the indicated shape, where each index
# is within the appropriate range.
indices_size = np.prod(indices_shape)
indices = np.reshape(np.arange(indices_size), indices_shape)
indices = indices % params_shape[axis]
# Perform repeated (batched) gather operations with numpy, to find the
# expected result.
expected = self._batchNumpyGather(params, indices, axis, batch_dims)
# On Windows, we get an exception if we pass in the transformed numpy
# arrays ("Failed to convert numpy ndarray to a Tensor (Unsupported
# feed type)."); so convert them back to lists before calling tf.gather.
params = params.tolist()
indices = indices.tolist()
result = array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
with compat.forward_compatibility_horizon(2019, 6, 11):
result = array_ops.gather(
params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
def _batchNumpyGather(self, params, indices, axis, batch_dims):
"""Performs a batch gather by making recursive calls to np.take().
This is used by testBatchDims() to construct the expected value.
Args:
params: A numpy array
indices: A numpy array
axis: An integer
batch_dims: An integer
Returns:
A numpy array
"""
if batch_dims == 0:
return np.take(params, indices, axis=axis)
self.assertEqual(params.shape[0], indices.shape[0])
if axis > 0:
axis -= 1
return np.stack([
self._batchNumpyGather(params[i], indices[i], axis, batch_dims - 1)
for i in range(params.shape[0])
])
@test_util.run_v1_only("RefVariable is not supported in v2")
def testGatherRefVariable(self):
with self.cached_session():
v = variables.RefVariable(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather(v, [0, 2])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("GatherV2", gather.op.name)
self.assertAllEqual([[1, 2], [5, 6]], gather)
@test_util.run_in_graph_and_eager_modes
def testGatherResourceVariable(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather(v, [0, 2])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("ResourceGather", gather.op.inputs[0].op.type)
self.assertAllEqual([[1, 2], [5, 6]], gather)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/gather_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for slice op that consume a lot of GPU memory."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SliceTest(test.TestCase):
def testInt64Slicing(self):
with self.cached_session(force_gpu=test.is_gpu_available()):
a_large = array_ops.tile(
constant_op.constant(np.array([False, True] * 4)), [2**29 + 3])
slice_t = array_ops.slice(a_large, np.asarray([3]).astype(np.int64), [3])
slice_val = self.evaluate(slice_t)
self.assertAllEqual([True, False, True], slice_val)
slice_t = array_ops.slice(
a_large, constant_op.constant([long(2)**32 + 3], dtype=dtypes.int64),
[3])
slice_val = self.evaluate(slice_t)
self.assertAllEqual([True, False, True], slice_val)
|
tensorflow-master
|
tensorflow/python/kernel_tests/huge_slice_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.gather."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
_TEST_TYPES = (dtypes.int64, dtypes.float32,
dtypes.complex64, dtypes.complex128)
class GatherTest(test.TestCase, parameterized.TestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
@parameterized.parameters(dtypes.int32, dtypes.int64)
def testSimpleGather(self, indices_dtype):
data = np.array([0, 1, 2, 3, 7, 5, 8, 9, 10, 11, 15, 13])
indices = [3, 4]
with self.session(use_gpu=True):
for dtype in _TEST_TYPES:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices, dtype=indices_dtype)
gather_t = array_ops.batch_gather(params, indices_tf)
expected_result = np.array([3, 7])
np_val = self._buildParams(expected_result, dtype)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
@parameterized.parameters(dtypes.int32, dtypes.int64)
def test2DArray(self, indices_dtype):
data = np.array([[0, 1, 2, 3, 7, 5], [8, 9, 10, 11, 15, 13]])
indices = [[3], [4]]
with self.session(use_gpu=True):
for dtype in _TEST_TYPES:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices, dtype=indices_dtype)
gather_t = array_ops.batch_gather(params, indices_tf)
expected_result = np.array([[3], [15]])
np_val = self._buildParams(expected_result, dtype)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def testHigherRank(self):
data = np.array([[[0, 1, 2], [3, 7, 5]], [[8, 9, 10], [11, 15, 13]]])
indices = [[[2, 0], [1, 2]], [[2, 0], [0, 1]]]
with self.session(use_gpu=True):
for dtype in _TEST_TYPES:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.batch_gather(params, indices_tf)
gather_val = self.evaluate(gather_t)
expected_result = np.array([[[2, 0], [7, 5]], [[10, 8], [11, 15]]])
np_val = self._buildParams(expected_result, dtype)
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def testString(self):
params = np.array([[b"asdf", b"zxcv"], [b"qwer", b"uiop"]])
with self.cached_session():
indices_tf = constant_op.constant([1])
self.assertAllEqual(
[[b"qwer", b"uiop"]],
self.evaluate(array_ops.batch_gather(params, indices_tf)))
@test_util.run_deprecated_v1
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32, shape=[None, None])
gather_t = array_ops.batch_gather(params, indices)
self.assertEqual([1, None], gather_t.get_shape().as_list())
def testBadIndicesCPU(self):
with self.session(use_gpu=False):
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0\] = 7 is not in \[0, 2\)"):
self.evaluate(array_ops.batch_gather(params, [7]))
def testEmptySlices(self):
with self.session(use_gpu=True):
for dtype in _TEST_TYPES:
for itype in np.int32, np.int64:
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
indices = np.array([3, 4], dtype=itype)
self.assertAllEqual(
self.evaluate(array_ops.batch_gather(params, indices)),
np.zeros((2, 0, 0)))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/batch_gather_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Concat Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class LargeConcatOpTest(test.TestCase):
"""Tests that belong in concat_op_test.py, but run over large tensors."""
def testConcatLargeTensors(self):
# CPU-only test, because it fails on GPUs with <= 4GB memory.
with ops.device("/cpu:0"):
a = array_ops.ones([2**31 + 6], dtype=dtypes.int8)
b = array_ops.zeros([1024], dtype=dtypes.int8)
onezeros = array_ops.concat([a, b], 0)
with self.session(use_gpu=False):
# TODO(dga): Add more depth to this test to validate correctness,
# not just non-crashingness, once other large tensor fixes have gone in.
_ = self.evaluate(onezeros)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/large_concat_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import errors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
TIMEOUT = 1
class MapStageTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.float32])
stage = stager.put(pi, [v], [0])
k, y = stager.get(gi)
y = math_ops.reduce_max(math_ops.matmul(y, y))
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
self.assertAllClose(4 * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
@test_util.run_deprecated_v1
def testMultiple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.float32, dtypes.float32])
stage = stager.put(pi, [x, v], [0, 1])
k, (z, y) = stager.get(gi)
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
@test_util.run_deprecated_v1
def testDictionary(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put(pi, {'x': x, 'v': v})
key, ret = stager.get(gi)
z = ret['x']
y = ret['v']
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testColocation(self):
gpu_dev = test.gpu_device_name()
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(gpu_dev):
stager = data_flow_ops.MapStagingArea([dtypes.float32])
y = stager.put(1, [v], [0])
expected_name = gpu_dev if 'gpu' not in gpu_dev else '/device:GPU:0'
self.assertEqual(y.device, expected_name)
with ops.device('/cpu:0'):
_, x = stager.get(1)
y = stager.peek(1)[0]
_, z = stager.get()
self.assertEqual(x[0].device, '/device:CPU:0')
self.assertEqual(y.device, '/device:CPU:0')
self.assertEqual(z[0].device, '/device:CPU:0')
G.finalize()
@test_util.run_deprecated_v1
def testPeek(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
p = array_ops.placeholder(dtypes.int32, name='p')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[
dtypes.int32,
], shapes=[[]])
stage = stager.put(pi, [x], [0])
peek = stager.peek(gi)
size = stager.size()
G.finalize()
n = 10
with self.session(use_gpu=True, graph=G) as sess:
for i in range(n):
sess.run(stage, feed_dict={x: i, pi: i})
for i in range(n):
self.assertTrue(sess.run(peek, feed_dict={gi: i})[0] == i)
self.assertTrue(sess.run(size) == 10)
@test_util.run_deprecated_v1
def testSizeAndClear(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32, name='x')
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put(pi, {'x': x, 'v': v})
size = stager.size()
clear = stager.clear()
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 3})
self.assertEqual(sess.run(size), 1)
sess.run(stage, feed_dict={x: -1, pi: 1})
self.assertEqual(sess.run(size), 2)
sess.run(clear)
self.assertEqual(sess.run(size), 0)
@test_util.run_deprecated_v1
def testCapacity(self):
capacity = 3
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[
dtypes.int32,
], capacity=capacity, shapes=[[]])
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
queue = Queue.Queue()
n = 8
with self.session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: i, pi: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
sess.run(get)
self.assertTrue(sess.run(size) == 0)
@test_util.run_deprecated_v1
def testMemoryLimit(self):
memory_limit = 512 * 1024 # 512K
chunk = 200 * 1024 # 256K
capacity = memory_limit // chunk
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.uint8, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.uint8], memory_limit=memory_limit, shapes=[[]])
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
import numpy as np
queue = Queue.Queue()
n = 8
with self.session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
data = np.full(chunk, i, dtype=np.uint8)
sess.run(stage, feed_dict={x: data, pi: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
sess.run(get)
self.assertTrue(sess.run(size) == 0)
@test_util.run_deprecated_v1
def testOrdering(self):
import six
import random
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[
dtypes.int32,
], shapes=[[]], ordered=True)
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
G.finalize()
n = 10
with self.session(use_gpu=True, graph=G) as sess:
# Keys n-1..0
keys = list(reversed(six.moves.range(n)))
for i in keys:
sess.run(stage, feed_dict={pi: i, x: i})
self.assertTrue(sess.run(size) == n)
# Check that key, values come out in ascending order
for i, k in enumerate(reversed(keys)):
get_key, values = sess.run(get)
self.assertTrue(i == k == get_key == values)
self.assertTrue(sess.run(size) == 0)
@test_util.run_deprecated_v1
def testPartialDictInsert(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test barrier with dictionary
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32],
names=['x', 'v', 'f'])
stage_xf = stager.put(pi, {'x': x, 'f': f})
stage_v = stager.put(pi, {'v': v})
key, ret = stager.get(gi)
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
# 0 complete and incomplete entries
self.assertTrue(sess.run([size, isize]) == [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now obtain tuple associated with key 0
self.assertTrue(
sess.run([key, ret], feed_dict={
gi: 0
}) == [0, {
'x': 1,
'f': 2,
'v': 1
}])
# 0 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 3})
# We can now obtain tuple associated with key 1
self.assertTrue(
sess.run([key, ret], feed_dict={
gi: 1
}) == [1, {
'x': 1,
'f': 2,
'v': 3
}])
@test_util.run_deprecated_v1
def testPartialIndexInsert(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32])
stage_xf = stager.put(pi, [x, f], [0, 2])
stage_v = stager.put(pi, [v], [1])
key, ret = stager.get(gi)
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
# 0 complete and incomplete entries
self.assertTrue(sess.run([size, isize]) == [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now obtain tuple associated with key 0
self.assertTrue(sess.run([key, ret], feed_dict={gi: 0}) == [0, [1, 1, 2]])
# 0 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 3})
# We can now obtain tuple associated with key 1
self.assertTrue(sess.run([key, ret], feed_dict={gi: 1}) == [1, [1, 3, 2]])
@test_util.run_deprecated_v1
def testPartialDictGetsAndPeeks(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
pei = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test barrier with dictionary
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32],
names=['x', 'v', 'f'])
stage_xf = stager.put(pi, {'x': x, 'f': f})
stage_v = stager.put(pi, {'v': v})
peek_xf = stager.peek(pei, ['x', 'f'])
peek_v = stager.peek(pei, ['v'])
key_xf, get_xf = stager.get(gi, ['x', 'f'])
key_v, get_v = stager.get(gi, ['v'])
pop_key_xf, pop_xf = stager.get(indices=['x', 'f'])
pop_key_v, pop_v = stager.get(pi, ['v'])
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
# 0 complete and incomplete entries
self.assertTrue(sess.run([size, isize]) == [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now peek at 'x' and 'f' values associated with key 0
self.assertTrue(sess.run(peek_xf, feed_dict={pei: 0}) == {'x': 1, 'f': 2})
# Peek at 'v' value associated with key 0
self.assertTrue(sess.run(peek_v, feed_dict={pei: 0}) == {'v': 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now obtain 'x' and 'f' values associated with key 0
self.assertTrue(
sess.run([key_xf, get_xf], feed_dict={
gi: 0
}) == [0, {
'x': 1,
'f': 2
}])
# Still have 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can no longer get 'x' and 'f' from key 0
with self.assertRaises(errors.InvalidArgumentError) as cm:
sess.run([key_xf, get_xf], feed_dict={gi: 0})
exc_str = ("Tensor at index '0' for key '0' " 'has already been removed.')
self.assertTrue(exc_str in cm.exception.message)
# Obtain 'v' value associated with key 0
self.assertTrue(
sess.run([key_v, get_v], feed_dict={
gi: 0
}) == [0, {
'v': 1
}])
# 0 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 0])
# Pop without key to obtain 'x' and 'f' values associated with key 1
self.assertTrue(sess.run([pop_key_xf, pop_xf]) == [1, {'x': 1, 'f': 2}])
# still 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 0])
# We can now obtain 'x' and 'f' values associated with key 1
self.assertTrue(
sess.run([pop_key_v, pop_v], feed_dict={
pi: 1
}) == [1, {
'v': 1
}])
# Nothing is left
self.assertTrue(sess.run([size, isize]) == [0, 0])
@test_util.run_deprecated_v1
def testPartialIndexGets(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
pei = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test again with partial index gets
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32])
stage_xvf = stager.put(pi, [x, v, f], [0, 1, 2])
key_xf, get_xf = stager.get(gi, [0, 2])
key_v, get_v = stager.get(gi, [1])
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
# Stage complete tuple
sess.run(stage_xvf, feed_dict={pi: 0, x: 1, f: 2, v: 3})
self.assertTrue(sess.run([size, isize]) == [1, 0])
# Partial get using indices
self.assertTrue(
sess.run([key_xf, get_xf], feed_dict={
gi: 0
}) == [0, [1, 2]])
# Still some of key 0 left
self.assertTrue(sess.run([size, isize]) == [1, 0])
# Partial get of remaining index
self.assertTrue(sess.run([key_v, get_v], feed_dict={gi: 0}) == [0, [3]])
# All gone
self.assertTrue(sess.run([size, isize]) == [0, 0])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/map_stage_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PriorityQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import random
import threading
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
@test_util.run_v1_only("PriorityQueue removed from v2")
class PriorityQueueTest(test.TestCase):
def testRoundTripInsertReadOnceSorts(self):
with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
enq_list = [
q.enqueue((e, constant_op.constant(v0), constant_op.constant(v1)))
for e, v0, v1 in zip(elem, side_value_0, side_value_1)
]
for enq in enq_list:
enq.run()
deq = q.dequeue_many(100)
deq_elem, deq_value_0, deq_value_1 = self.evaluate(deq)
allowed = {}
missed = set()
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
missed.add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
missed.remove((dv0, dv1))
self.assertEqual(missed, set())
def testRoundTripInsertMultiThreadedReadOnceSorts(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
enqueue_ops = [
q.enqueue((e, constant_op.constant(v0), constant_op.constant(v1)))
for e, v0, v1 in zip(elem, side_value_0, side_value_1)
]
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
dequeue_op = q.dequeue_many(100)
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
for t in enqueue_threads:
t.start()
deq_elem, deq_value_0, deq_value_1 = self.evaluate(dequeue_op)
for t in enqueue_threads:
t.join()
allowed = {}
missed = set()
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
missed.add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
missed.remove((dv0, dv1))
self.assertEqual(missed, set())
def testRoundTripFillsCapacityMultiThreadedEnqueueAndDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(10, (dtypes.int64), (()))
num_threads = 40
enqueue_counts = np.random.randint(10, size=num_threads)
enqueue_values = [
np.random.randint(
5, size=count) for count in enqueue_counts
]
enqueue_ops = [
q.enqueue_many((values, values)) for values in enqueue_values
]
shuffled_counts = copy.deepcopy(enqueue_counts)
random.shuffle(shuffled_counts)
dequeue_ops = [q.dequeue_many(count) for count in shuffled_counts]
all_enqueued_values = np.hstack(enqueue_values)
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
dequeued = []
def dequeue(dequeue_op):
(dequeue_indices, dequeue_values) = self.evaluate(dequeue_op)
self.assertAllEqual(dequeue_indices, dequeue_values)
dequeued.extend(dequeue_indices)
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
dequeue_threads = [
self.checkedThread(
target=dequeue, args=(op,)) for op in dequeue_ops
]
# Dequeue and check
for t in dequeue_threads:
t.start()
for t in enqueue_threads:
t.start()
for t in enqueue_threads:
t.join()
for t in dequeue_threads:
t.join()
self.assertAllEqual(sorted(dequeued), sorted(all_enqueued_values))
def testRoundTripInsertManyMultiThreadedReadManyMultithreadedSorts(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
num_threads = 40
enqueue_counts = np.random.randint(10, size=num_threads)
enqueue_values = [
np.random.randint(
5, size=count) for count in enqueue_counts
]
enqueue_ops = [
q.enqueue_many((values, values)) for values in enqueue_values
]
shuffled_counts = copy.deepcopy(enqueue_counts)
random.shuffle(shuffled_counts)
dequeue_ops = [q.dequeue_many(count) for count in shuffled_counts]
all_enqueued_values = np.hstack(enqueue_values)
dequeue_wait = threading.Condition()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
def dequeue(dequeue_op, dequeued):
(dequeue_indices, dequeue_values) = self.evaluate(dequeue_op)
self.assertAllEqual(dequeue_indices, dequeue_values)
dequeue_wait.acquire()
dequeued.extend(dequeue_indices)
dequeue_wait.release()
dequeued = []
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
dequeue_threads = [
self.checkedThread(
target=dequeue, args=(op, dequeued)) for op in dequeue_ops
]
for t in enqueue_threads:
t.start()
for t in enqueue_threads:
t.join()
# Dequeue and check
for t in dequeue_threads:
t.start()
for t in dequeue_threads:
t.join()
# We can't guarantee full sorting because we can't guarantee
# that the dequeued.extend() call runs immediately after the
# self.evaluate() call. Here we're just happy everything came out.
self.assertAllEqual(set(dequeued), set(all_enqueued_values))
def testRoundTripInsertManyMultiThreadedReadOnceSorts(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
batch = 5
enqueue_ops = [
q.enqueue_many((elem[i * batch:(i + 1) * batch],
side_value_0[i * batch:(i + 1) * batch],
side_value_1[i * batch:(i + 1) * batch]))
for i in range(20)
]
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
dequeue_op = q.dequeue_many(100)
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
for t in enqueue_threads:
t.start()
deq_elem, deq_value_0, deq_value_1 = self.evaluate(dequeue_op)
for t in enqueue_threads:
t.join()
allowed = {}
missed = set()
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
missed.add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
missed.remove((dv0, dv1))
self.assertEqual(missed, set())
def testRoundTripInsertOnceReadOnceSorts(self):
with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
side_value_0 = np.random.rand(1000).astype(bytes)
side_value_1 = np.random.rand(1000).astype(bytes)
q.enqueue_many((elem, side_value_0, side_value_1)).run()
deq = q.dequeue_many(1000)
deq_elem, deq_value_0, deq_value_1 = self.evaluate(deq)
allowed = {}
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
def testRoundTripInsertOnceReadManySorts(self):
with self.cached_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
q.enqueue_many((elem, elem)).run()
deq_values = np.hstack((q.dequeue_many(100)[0].eval() for _ in range(10)))
self.assertAllEqual(deq_values, sorted(elem))
def testRoundTripInsertOnceReadOnceLotsSorts(self):
with self.cached_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
q.enqueue_many((elem, elem)).run()
dequeue_op = q.dequeue()
deq_values = np.hstack(dequeue_op[0].eval() for _ in range(1000))
self.assertAllEqual(deq_values, sorted(elem))
def testInsertingNonInt64Fails(self):
with self.cached_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.string), (()))
with self.assertRaises(TypeError):
q.enqueue_many((["a", "b", "c"], ["a", "b", "c"])).run()
def testInsertingNonScalarFails(self):
with self.cached_session() as sess:
input_priority = array_ops.placeholder(dtypes.int64)
input_other = array_ops.placeholder(dtypes.string)
q = data_flow_ops.PriorityQueue(2000, (dtypes.string,), (()))
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
r"Shape mismatch in tuple component 0. Expected \[\], got \[2\]"):
sess.run([q.enqueue((input_priority, input_other))],
feed_dict={
input_priority: np.array(
[0, 2], dtype=np.int64),
input_other: np.random.rand(3, 5).astype(bytes)
})
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
r"Shape mismatch in tuple component 0. Expected \[2\], got \[2,2\]"):
sess.run(
[q.enqueue_many((input_priority, input_other))],
feed_dict={
input_priority: np.array(
[[0, 2], [3, 4]], dtype=np.int64),
input_other: np.random.rand(2, 3).astype(bytes)
})
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/priority_queue_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ExtractVolumePatches op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ExtractVolumePatches(test.TestCase):
"""Functional tests for ExtractVolumePatches op."""
def _VerifyValues(self, image, ksizes, strides, padding, patches):
"""Tests input-output pairs for the ExtractVolumePatches op.
Args:
image: Input tensor with shape:
[batch, in_planes, in_rows, in_cols, depth].
ksizes: Patch size specified as: [ksize_planes, ksize_rows, ksize_cols].
strides: Output strides, specified as:
[stride_planes, stride_rows, stride_cols].
padding: Padding type.
patches: Expected output.
Note:
rates are not supported as of now.
"""
ksizes = [1] + ksizes + [1]
strides = [1] + strides + [1]
out_tensor = array_ops.extract_volume_patches(
constant_op.constant(image),
ksizes=ksizes,
strides=strides,
padding=padding,
name="im2col_3d")
self.assertAllClose(patches, self.evaluate(out_tensor))
# pylint: disable=bad-whitespace
def testKsize1x1x1Stride1x1x1(self):
"""Verifies that for 1x1x1 kernel the output equals the input."""
image = np.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6]) + 1
patches = image
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1, 1],
strides=[1, 1, 1],
padding=padding,
patches=patches)
def testKsize1x1x1Stride2x3x4(self):
"""Test for 1x1x1 kernel and strides."""
image = np.arange(6 * 2 * 4 * 5 * 3).reshape([6, 2, 4, 5, 3]) + 1
patches = image[:, ::2, ::3, ::4, :]
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1, 1],
strides=[2, 3, 4],
padding=padding,
patches=patches)
def testKsize1x1x2Stride2x2x3(self):
"""Test for 1x1x2 kernel and strides."""
image = np.arange(45).reshape([1, 3, 3, 5, 1]) + 1
patches = np.array([[[[[ 1, 2],
[ 4, 5]],
[[11, 12],
[14, 15]]],
[[[31, 32],
[34, 35]],
[[41, 42],
[44, 45]]]]])
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1, 2],
strides=[2, 2, 3],
padding=padding,
patches=patches)
def testKsize2x2x2Stride1x1x1Valid(self):
"""Test for 2x2x2 kernel with VALID padding."""
image = np.arange(8).reshape([1, 2, 2, 2, 1]) + 1
patches = np.array([[[[[1, 2, 3, 4, 5, 6, 7, 8]]]]])
self._VerifyValues(
image,
ksizes=[2, 2, 2],
strides=[1, 1, 1],
padding="VALID",
patches=patches)
def testKsize2x2x2Stride1x1x1Same(self):
"""Test for 2x2x2 kernel with SAME padding."""
image = np.arange(8).reshape([1, 2, 2, 2, 1]) + 1
patches = np.array([[[[[1, 2, 3, 4, 5, 6, 7, 8],
[2, 0, 4, 0, 6, 0, 8, 0]],
[[3, 4, 0, 0, 7, 8, 0, 0],
[4, 0, 0, 0, 8, 0, 0, 0]]],
[[[5, 6, 7, 8, 0, 0, 0, 0],
[6, 0, 8, 0, 0, 0, 0, 0]],
[[7, 8, 0, 0, 0, 0, 0, 0],
[8, 0, 0, 0, 0, 0, 0, 0]]]]])
self._VerifyValues(
image,
ksizes=[2, 2, 2],
strides=[1, 1, 1],
padding="SAME",
patches=patches)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/extract_volume_patches_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SerializeSparse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SerializeSparseTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
if dtype is None:
dtype = dtypes.int32
return sparse_tensor_lib.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
[2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _testSerializeDeserializeHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_5x6(np.arange(6))
serialized = serialize_fn(sp_input, out_type=out_type)
sp_deserialized = deserialize_fn(serialized, dtype=dtypes.int32)
indices, values, shape = self.evaluate(sp_deserialized)
self.assertAllEqual(indices, sp_input[0])
self.assertAllEqual(values, sp_input[1])
self.assertAllEqual(shape, sp_input[2])
def testSerializeDeserialize(self):
self._testSerializeDeserializeHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
def testVariantSerializeDeserialize(self):
self._testSerializeDeserializeHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse,
dtypes.variant)
def _testSerializeDeserializeBatchHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_5x6(np.arange(6))
serialized = serialize_fn(sp_input, out_type=out_type)
serialized = array_ops.stack([serialized, serialized])
sp_deserialized = deserialize_fn(serialized, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input[0])
self.assertAllEqual(combined_values[:6], sp_input[1])
self.assertAllEqual(combined_values[6:], sp_input[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testSerializeDeserializeBatch(self):
self._testSerializeDeserializeBatchHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testSerializeDeserializeManyBatch(self):
self._testSerializeDeserializeBatchHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeBatch(self):
self._testSerializeDeserializeBatchHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse,
dtypes.variant)
def _testSerializeDeserializeBatchInconsistentShapeHelper(
self, serialize_fn, deserialize_fn, out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = serialize_fn(sp_input1, out_type=out_type)
serialized = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
self.assertAllEqual(combined_values[:6], sp_input0[1])
self.assertAllEqual(combined_values[6:], sp_input1[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testSerializeDeserializeBatchInconsistentShape(self):
self._testSerializeDeserializeBatchInconsistentShapeHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeBatchInconsistentShape(self):
self._testSerializeDeserializeBatchInconsistentShapeHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse,
dtypes.variant)
def _testSerializeDeserializeNestedBatchHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_5x6(np.arange(6))
serialized = serialize_fn(sp_input, out_type=out_type)
serialized = array_ops.stack([serialized, serialized])
serialized = array_ops.stack([serialized, serialized])
sp_deserialized = deserialize_fn(serialized, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized)
# minibatch 0
self.assertAllEqual(combined_indices[:6, :2], [[0, 0]] * 6)
self.assertAllEqual(combined_indices[:6, 2:], sp_input[0])
self.assertAllEqual(combined_values[:6], sp_input[1])
# minibatch 1
self.assertAllEqual(combined_indices[6:12, :2], [[0, 1]] * 6)
self.assertAllEqual(combined_indices[6:12, 2:], sp_input[0])
self.assertAllEqual(combined_values[6:12], sp_input[1])
# minibatch 2
self.assertAllEqual(combined_indices[12:18, :2], [[1, 0]] * 6)
self.assertAllEqual(combined_indices[12:18, 2:], sp_input[0])
self.assertAllEqual(combined_values[12:18], sp_input[1])
# minibatch 3
self.assertAllEqual(combined_indices[18:, :2], [[1, 1]] * 6)
self.assertAllEqual(combined_indices[18:, 2:], sp_input[0])
self.assertAllEqual(combined_values[18:], sp_input[1])
self.assertAllEqual(combined_shape, [2, 2, 5, 6])
@test_util.run_deprecated_v1
def testSerializeDeserializeNestedBatch(self):
self._testSerializeDeserializeNestedBatchHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeNestedBatch(self):
self._testSerializeDeserializeNestedBatchHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse,
dtypes.variant)
def _testFeedSerializeDeserializeBatchHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = serialize_fn(sp_input1, out_type=out_type)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized_concat, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized, {sp_input0: input0_val,
sp_input1: input1_val})
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
self.assertAllEqual(combined_values[:6], input0_val[1])
self.assertAllEqual(combined_values[6:], input1_val[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testFeedSerializeDeserializeBatch(self):
self._testFeedSerializeDeserializeBatchHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testFeedSerializeDeserializeManyBatch(self):
self._testFeedSerializeDeserializeBatchHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testFeedVariantSerializeDeserializeBatch(self):
self._testFeedSerializeDeserializeBatchHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse,
dtypes.variant)
def _testSerializeManyShapeHelper(self,
serialize_many_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
serialized = serialize_many_fn(sparse_tensor, out_type=out_type)
serialized_value = sess.run(
serialized,
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertEqual(serialized_value.shape, (4, 3))
@test_util.run_deprecated_v1
def testSerializeManyShape(self):
self._testSerializeManyShapeHelper(sparse_ops.serialize_many_sparse)
def testVariantSerializeManyShape(self):
# NOTE: The following test is a no-op as it is currently not possible to
# convert the serialized variant value to a numpy value.
pass
def _testSerializeManyDeserializeBatchHelper(self,
serialize_many_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
serialized = serialize_many_fn(sparse_tensor, out_type=out_type)
deserialized = deserialize_fn(serialized, dtype=dtypes.string)
deserialized_value = sess.run(
deserialized,
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertAllEqual(deserialized_value.indices, indices_value)
self.assertAllEqual(deserialized_value.values, values_value)
self.assertAllEqual(deserialized_value.dense_shape, shape_value)
@test_util.run_deprecated_v1
def testSerializeManyDeserializeBatch(self):
self._testSerializeManyDeserializeBatchHelper(
sparse_ops.serialize_many_sparse, sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testSerializeManyDeserializeManyBatch(self):
self._testSerializeManyDeserializeBatchHelper(
sparse_ops.serialize_many_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testVariantSerializeManyDeserializeBatch(self):
self._testSerializeManyDeserializeBatchHelper(
sparse_ops.serialize_many_sparse, sparse_ops.deserialize_sparse,
dtypes.variant)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeScalar(self):
with self.session(use_gpu=False) as sess:
indices_value = np.array([[]], dtype=np.int64)
values_value = np.array([37], dtype=np.int32)
shape_value = np.array([], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder()
serialized = sparse_ops.serialize_sparse(
sparse_tensor, out_type=dtypes.variant)
deserialized = sparse_ops.deserialize_sparse(
serialized, dtype=dtypes.int32)
deserialized_value = sess.run(
deserialized,
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertAllEqual(deserialized_value.indices, indices_value)
self.assertAllEqual(deserialized_value.values, values_value)
self.assertAllEqual(deserialized_value.dense_shape, shape_value)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeScalarBatch(self):
with self.session(use_gpu=False) as sess:
indices_value = np.array([[]], dtype=np.int64)
values_value = np.array([37], dtype=np.int32)
shape_value = np.array([], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder()
serialized = sparse_ops.serialize_sparse(
sparse_tensor, out_type=dtypes.variant)
stacked = array_ops.stack([serialized, serialized])
deserialized = sparse_ops.deserialize_sparse(stacked, dtype=dtypes.int32)
deserialized_value = sess.run(
deserialized,
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertAllEqual(deserialized_value.indices,
np.array([[0], [1]], dtype=np.int64))
self.assertAllEqual(deserialized_value.values,
np.array([37, 37], dtype=np.int32))
self.assertAllEqual(deserialized_value.dense_shape,
np.array([2], dtype=np.int64))
def _testDeserializeFailsWrongTypeHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = serialize_fn(sp_input1, out_type=out_type)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized_concat, dtype=dtypes.int64)
with self.assertRaisesOpError(
r"Requested SparseTensor of type int64 but "
r"SparseTensor\[0\].values.dtype\(\) == int32"):
sess.run(sp_deserialized,
{sp_input0: input0_val,
sp_input1: input1_val})
@test_util.run_deprecated_v1
def testDeserializeFailsWrongType(self):
self._testDeserializeFailsWrongTypeHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testDeserializeManyFailsWrongType(self):
self._testDeserializeFailsWrongTypeHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testVariantDeserializeFailsWrongType(self):
self._testDeserializeFailsWrongTypeHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse,
dtypes.variant)
def _testDeserializeFailsInconsistentRankHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = serialize_fn(sp_input1, out_type=out_type)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized_concat, dtype=dtypes.int32)
with self.assertRaisesOpError(
r"Inconsistent shape across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 2 but rank of SparseTensor\[1\] is: 3"):
sess.run(sp_deserialized,
{sp_input0: input0_val,
sp_input1: input1_val})
@test_util.run_deprecated_v1
def testDeserializeFailsInconsistentRank(self):
self._testDeserializeFailsInconsistentRankHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testDeserializeManyFailsInconsistentRank(self):
self._testDeserializeFailsInconsistentRankHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testVariantDeserializeFailsInconsistentRank(self):
self._testDeserializeFailsInconsistentRankHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse,
dtypes.variant)
def _testDeserializeFailsInvalidProtoHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = ["a", "b", "c"]
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized_concat, dtype=dtypes.int32)
with self.assertRaisesOpError(r"Could not parse serialized proto"):
sess.run(sp_deserialized, {sp_input0: input0_val})
@test_util.run_deprecated_v1
def testDeserializeFailsInvalidProto(self):
self._testDeserializeFailsInvalidProtoHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testDeserializeManyFailsInvalidProto(self):
self._testDeserializeFailsInvalidProtoHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_serialization_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for make_template."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import traceback
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def variable_scoped_function(trainable=True):
return variable_scope.get_variable(
"dummy", shape=[1], trainable=trainable,
initializer=init_ops.zeros_initializer())
def internally_variable_scoped_function(scope_name):
with variable_scope.variable_scope(scope_name):
return variable_scope.get_variable(
"dummy", shape=[1], initializer=init_ops.zeros_initializer())
def function_with_create(trainable):
"""Creates a variable as a side effect using tf.Variable."""
variables.Variable(0, trainable=trainable)
return variable_scope.get_variable(
"dummy", shape=[1], initializer=init_ops.zeros_initializer())
def function_with_side_create(trainable, name="side"):
"""Creates a variable as a side effect using tf.get_variable."""
variable_scope.get_variable(name, shape=[1], trainable=trainable)
return variable_scope.get_variable(
"dummy", shape=[1], initializer=init_ops.zeros_initializer())
def variable_scoped_function_with_local_variable():
variable_scope.get_local_variable(
"local", shape=[1], initializer=init_ops.zeros_initializer())
return variable_scope.get_variable(
"dummy", shape=[1], initializer=init_ops.zeros_initializer())
class TemplateTest(test.TestCase):
@test_util.run_deprecated_v1
def test_end_to_end(self):
"""This test shows a very simple line model with test_loss.
The template is used to share parameters between a training and test model.
"""
# y = 2x + 1
training_input, training_output = ([1., 2., 3., 4.], [2.8, 5.1, 7.2, 8.7])
test_input, test_output = ([5., 6., 7., 8.], [11, 13, 15, 17])
random_seed.set_random_seed(1234)
def test_line(x):
m = variable_scope.get_variable(
"w", shape=[], initializer=init_ops.truncated_normal_initializer())
b = variable_scope.get_variable(
"b", shape=[], initializer=init_ops.truncated_normal_initializer())
return x * m + b
line_template = template.make_template("line", test_line)
train_prediction = line_template(training_input)
test_prediction = line_template(test_input)
train_loss = math_ops.reduce_mean(
math_ops.square(train_prediction - training_output))
test_loss = math_ops.reduce_mean(
math_ops.square(test_prediction - test_output))
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train_op = optimizer.minimize(train_loss)
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
initial_test_loss = self.evaluate(test_loss)
self.evaluate(train_op)
final_test_loss = self.evaluate(test_loss)
# Parameters are tied, so the loss should have gone down when we trained it.
self.assertLess(final_test_loss, initial_test_loss)
def test_end_to_end_eager(self):
"""This test shows a very simple line model with test_loss in eager mode.
The template is used to share parameters between a training and test model.
"""
with context.eager_mode():
# y = 2x + 1
training_input, training_output = ([1., 2., 3., 4.], [2.8, 5.1, 7.2, 8.7])
test_input, test_output = ([5., 6., 7., 8.], [11, 13, 15, 17])
random_seed.set_random_seed(1234)
def test_line(x):
m = variable_scope.get_variable(
"w", shape=[], initializer=init_ops.truncated_normal_initializer())
b = variable_scope.get_variable(
"b", shape=[], initializer=init_ops.truncated_normal_initializer())
return x * m + b
line_template = template.make_template("line", test_line)
def train_loss():
train_prediction = line_template(training_input)
return math_ops.reduce_mean(
math_ops.square(train_prediction - training_output))
def test_loss():
test_prediction = line_template(test_input)
return math_ops.reduce_mean(
math_ops.square(test_prediction - test_output))
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
initial_test_loss = test_loss()
optimizer.minimize(train_loss)
final_test_loss = test_loss()
# Parameters are tied, so the loss should have gone down after training.
self.assertLess(final_test_loss.numpy(), initial_test_loss.numpy())
@test_util.run_in_graph_and_eager_modes
def test_skip_stack_frames(self):
first = traceback.format_stack()
second = traceback.format_stack()
result = template._skip_common_stack_elements(first, second)
self.assertEqual(1, len(result))
self.assertNotEqual(len(first), len(result))
@test_util.run_in_graph_and_eager_modes
def test_template_with_empty_name(self):
tpl = template.make_template("", variable_scoped_function)
with variable_scope.variable_scope("outer"):
x = variable_scope.get_variable("x", [])
v = tpl()
self.assertEqual("outer/", tpl.variable_scope_name)
self.assertEqual("outer//dummy:0", v.name)
if context.executing_eagerly():
# In eager mode `x` is not visible to the template since the template does
# not rely on global collections.
self.assertEqual([v], tpl.variables)
else:
self.assertEqual([x, v], tpl.variables)
@test_util.run_in_graph_and_eager_modes
def test_template_with_name(self):
tmpl1 = template.make_template("s1", variable_scoped_function)
tmpl2 = template.make_template("s1", variable_scoped_function)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/dummy:0", v1.name)
self.assertEqual("s1_1/dummy:0", v3.name)
@test_util.run_deprecated_v1
def test_same_unique_name_raise_error(self):
tmpl1 = template.make_template(
"_", variable_scoped_function, unique_name_="s1")
tmpl1()
tmpl2 = template.make_template(
"_", variable_scoped_function, unique_name_="s1")
with self.assertRaisesRegexp(
ValueError, "Variable s1/dummy already exists, disallowed.*"):
tmpl2()
def test_unique_name_raise_error_in_eager(self):
with context.eager_mode():
with self.assertRaisesRegexp(
ValueError,
"unique_name_ cannot be used when eager exeuction is enabled."):
template.make_template(
"_", variable_scoped_function, unique_name_="s1")
@test_util.run_deprecated_v1
def test_unique_name_and_reuse(self):
tmpl1 = template.make_template(
"_", variable_scoped_function, unique_name_="s1")
v1 = tmpl1()
v2 = tmpl1()
variable_scope.get_variable_scope().reuse_variables()
tmpl2 = template.make_template(
"_", variable_scoped_function, unique_name_="s1")
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertEqual(v1, v3)
self.assertEqual("s1/dummy:0", v1.name)
@test_util.run_in_graph_and_eager_modes
def test_template_in_scope(self):
tmpl1 = template.make_template("s1", variable_scoped_function)
tmpl2 = template.make_template("s1", variable_scoped_function)
with variable_scope.variable_scope("scope"):
v1 = tmpl1()
v3 = tmpl2()
# The template contract requires the following to ignore scope2.
with variable_scope.variable_scope("scope2"):
v2 = tmpl1()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("scope/s1/dummy:0", v1.name)
self.assertEqual("scope/s1_1/dummy:0", v3.name)
@test_util.run_in_graph_and_eager_modes
def test_template_with_internal_reuse(self):
tmpl1 = template.make_template("s1", internally_variable_scoped_function)
tmpl2 = template.make_template("s1", internally_variable_scoped_function)
v1 = tmpl1("test")
v2 = tmpl1("test")
v3 = tmpl2("test")
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/test/dummy:0", v1.name)
self.assertEqual("s1_1/test/dummy:0", v3.name)
with self.assertRaises(ValueError):
tmpl1("not_test")
@test_util.run_in_graph_and_eager_modes
def test_template_without_name(self):
with self.assertRaisesRegexp(
ValueError, "name cannot be None."):
template.make_template(None, variable_scoped_function)
@test_util.run_in_graph_and_eager_modes
def test_make_template(self):
# Test both that we can call it with positional and keywords.
tmpl1 = template.make_template(
"s1", internally_variable_scoped_function, scope_name="test")
tmpl2 = template.make_template(
"s1", internally_variable_scoped_function, scope_name="test")
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/test/dummy:0", v1.name)
self.assertEqual("s1_1/test/dummy:0", v3.name)
@test_util.run_deprecated_v1
def test_enforces_no_extra_trainable_variables(self):
tmpl = template.make_template("s", function_with_create, trainable=True)
tmpl()
with self.assertRaises(ValueError):
tmpl()
@test_util.run_in_graph_and_eager_modes
def test_enforces_no_extra_trainable_variables_eager(self):
tmpl = template.make_template("s",
function_with_side_create,
trainable=True)
tmpl(name="1")
with self.assertRaises(ValueError):
tmpl(name="2")
def test_permits_extra_non_trainable_variables(self):
tmpl = template.make_template("s", function_with_create, trainable=False)
self.assertEqual(tmpl(), tmpl())
def test_permits_extra_non_trainable_variables_eager(self):
with context.eager_mode():
tmpl = template.make_template("s",
function_with_side_create,
trainable=False)
self.assertEqual(tmpl(name="1"), tmpl(name="2"))
@test_util.run_in_graph_and_eager_modes
def test_internal_variable_reuse(self):
def nested():
with variable_scope.variable_scope("nested") as vs:
v1 = variable_scope.get_variable(
"x", initializer=init_ops.zeros_initializer(), shape=[])
with variable_scope.variable_scope(vs, reuse=True):
v2 = variable_scope.get_variable("x")
self.assertEqual(v1, v2)
return v1
tmpl1 = template.make_template("s1", nested)
tmpl2 = template.make_template("s1", nested)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/nested/x:0", v1.name)
self.assertEqual("s1_1/nested/x:0", v3.name)
@test_util.run_in_graph_and_eager_modes
def test_nested_templates(self):
def nested_template():
nested1 = template.make_template("nested", variable_scoped_function)
nested2 = template.make_template("nested", variable_scoped_function)
v1 = nested1()
v2 = nested2()
# nested1 and nested2 should not share variables
self.assertNotEqual(v1, v2)
# Variables created by nested1 should be isolated from variables
# created by nested2.
self.assertEqual(nested1.variables, [v1])
self.assertEqual(nested2.variables, [v2])
self.assertEqual(nested1.trainable_variables, [v1])
self.assertEqual(nested2.trainable_variables, [v2])
self.assertEqual(len(nested1.non_trainable_variables), 0)
self.assertEqual(len(nested2.non_trainable_variables), 0)
return v1, v2
tmpl1 = template.make_template("s1", nested_template)
tmpl2 = template.make_template("s1", nested_template)
v1, v2 = tmpl1()
v3, v4 = tmpl1()
v5, v6 = tmpl2()
# The second invocation of tmpl1 should reuse the variables
# created in the first invocation.
self.assertEqual([v1, v2], [v3, v4])
self.assertEqual(tmpl1.variables, [v1, v2])
self.assertEqual(tmpl1.trainable_variables, [v1, v2])
self.assertEqual(len(tmpl1.non_trainable_variables), 0)
# tmpl1 and tmpl2 should not share variables.
self.assertNotEqual([v1, v2], [v5, v6])
self.assertSequenceEqual(tmpl2.variables, [v5, v6])
self.assertSequenceEqual(tmpl2.trainable_variables, [v5, v6])
self.assertEqual(len(tmpl2.non_trainable_variables), 0)
self.assertEqual("s1/nested/dummy:0", v1.name)
self.assertEqual("s1/nested_1/dummy:0", v2.name)
self.assertEqual("s1_1/nested/dummy:0", v5.name)
self.assertEqual("s1_1/nested_1/dummy:0", v6.name)
self.assertEqual(2, len(tmpl1._checkpoint_dependencies))
self.assertEqual("nested", tmpl1._checkpoint_dependencies[0].name)
self.assertEqual("nested_1", tmpl1._checkpoint_dependencies[1].name)
model = training.Model()
model.template = tmpl1
self.assertEqual(model.variables, [v1, v2])
self.assertEqual(model.trainable_variables, [v1, v2])
self.assertEqual(len(model.non_trainable_variables), 0)
model.templates = [tmpl2]
self.assertEqual(model.variables, [v1, v2, v5, v6])
self.assertEqual(model.trainable_variables, [v1, v2, v5, v6])
self.assertEqual(len(model.non_trainable_variables), 0)
# Make sure losses, layers, and updates aren't broken by having a Template
# in the mix, which does not expose any updates or losses.
self.assertEqual([], model.layers)
self.assertEqual([], model.updates)
self.assertEqual([], model.losses)
self.assertEqual([], model.templates.layers)
self.assertEqual([], model.templates.updates)
self.assertEqual([], model.templates.losses)
@test_util.run_in_graph_and_eager_modes
def test_nested_templates_with_defun(self):
def variable_scoped_function_no_return_value(trainable=True):
# defun cannot compile functions that return non-Tensor objects
_ = variable_scope.get_variable(
"dummy",
shape=[1],
trainable=trainable,
initializer=init_ops.zeros_initializer())
def nested_template():
nested1 = template.make_template_internal(
"nested",
variable_scoped_function_no_return_value,
create_graph_function_=True)
nested2 = template.make_template_internal(
"nested",
variable_scoped_function_no_return_value,
create_graph_function_=True)
nested1()
nested2()
v1 = nested1.variables
v2 = nested2.variables
# nested1 and nested2 should not share variables
self.assertNotEqual(v1, v2)
# Variables created by nested1 should be isolated from variables
# created by nested2.
self.assertEqual(nested1.variables, v1)
self.assertEqual(nested2.variables, v2)
self.assertEqual(nested1.trainable_variables, v1)
self.assertEqual(nested2.trainable_variables, v2)
self.assertEqual(len(nested1.non_trainable_variables), 0)
self.assertEqual(len(nested2.non_trainable_variables), 0)
tmpl1 = template.make_template("s1", nested_template)
tmpl2 = template.make_template("s1", nested_template)
tmpl1()
v1 = tmpl1.variables
tmpl1()
v2 = tmpl1.variables
tmpl2()
v3 = tmpl2.variables
# The second invocation of tmpl1 should reuse the variables
# created in the first invocation.
self.assertSequenceEqual(v1, v2)
# tmpl1 and tmpl2 should not share variables.
self.assertNotEqual(v1, v3)
self.assertEqual("s1/nested/dummy:0", v1[0].name)
self.assertEqual("s1/nested_1/dummy:0", v1[1].name)
self.assertEqual("s1_1/nested/dummy:0", v3[0].name)
self.assertEqual("s1_1/nested_1/dummy:0", v3[1].name)
def test_graph_function_no_name(self):
with context.eager_mode():
def f(_, y):
return y + 1
partial = functools.partial(f, 1.0)
tmpl = template.make_template_internal(
"a", partial, create_graph_function_=True)
self.assertAllEqual(tmpl(ops.convert_to_tensor(1.0)), 2.0)
@test_util.run_in_graph_and_eager_modes
def test_immediate_scope_creation(self):
# Create templates in scope a then call in scope b. make_template should
# capture the scope the first time it is called, and make_immediate_template
# should capture the scope at construction time.
with variable_scope.variable_scope("ctor_scope"):
# Create scope here:
tmpl_immed = template.make_template("a", variable_scoped_function,
True)
# default: create scope at __call__
tmpl_defer = template.make_template(
"b", variable_scoped_function, False)
with variable_scope.variable_scope("call_scope"):
inner_imm_var = tmpl_immed()
inner_defer_var = tmpl_defer()
outer_imm_var = tmpl_immed()
outer_defer_var = tmpl_defer()
self.assertNotEqual(inner_imm_var, inner_defer_var)
self.assertEqual(outer_imm_var, inner_imm_var)
self.assertEqual(outer_defer_var, inner_defer_var)
self.assertEqual("ctor_scope/a/dummy:0", inner_imm_var.name)
self.assertEqual("call_scope/b/dummy:0", inner_defer_var.name)
@test_util.run_in_graph_and_eager_modes
def test_scope_access(self):
# Ensure that we can access the scope inside the template, because the name
# of that scope may be different from the name we pass to make_template, due
# to having been made unique by variable_scope.
with variable_scope.variable_scope("foo"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", variable_scoped_function, True)
tb = template.make_template("bar", variable_scoped_function, True)
# Ensure we can get the scopes before either template is actually called.
self.assertEqual(ta.variable_scope.name, "foo/bar")
self.assertEqual(tb.variable_scope.name, "foo/bar_1")
with variable_scope.variable_scope("foo_2"):
# Create a template which defers scope creation.
tc = template.make_template("blah", variable_scoped_function, False)
# Before we call the template, the scope property will be set to None.
self.assertEqual(tc.variable_scope, None)
tc()
# Template is called at the top level, so there is no preceding "foo_2".
self.assertEqual(tc.variable_scope.name, "blah")
@test_util.run_in_graph_and_eager_modes
def test_custom_getter(self):
# Custom getter that maintains call count and forwards to true getter
custom_getter_count = [0]
def custom_getter(getter, name, *args, **kwargs):
custom_getter_count[0] += 1
return getter(name, *args, **kwargs)
# Test that custom getter is called both when variables are created and
# subsequently accessed
tmpl1 = template.make_template(
"s1", variable_scoped_function, custom_getter_=custom_getter)
self.assertEqual(custom_getter_count[0], 0)
tmpl1()
self.assertEqual(custom_getter_count[0], 1)
tmpl1()
self.assertEqual(custom_getter_count[0], 2)
# Test that custom getter is called when the variable scope is created
# during construction
custom_getter_count[0] = 0
tmpl2 = template.make_template(
"s2",
variable_scoped_function,
custom_getter_=custom_getter,
create_scope_now_=True)
self.assertEqual(custom_getter_count[0], 0)
tmpl2()
self.assertEqual(custom_getter_count[0], 1)
tmpl2()
self.assertEqual(custom_getter_count[0], 2)
@test_util.run_in_graph_and_eager_modes
def test_fails_gracefully(self):
for create_scope_now in [True, False]:
def module_function_with_one_arg(inputs):
w = variable_scope.get_variable(
"w", shape=[1], initializer=init_ops.zeros_initializer())
return inputs * w
templatized_function = template.make_template(
"f1", module_function_with_one_arg,
create_scope_now_=create_scope_now)
data = array_ops.zeros([1])
try:
# Try to connect with a kwarg which is unsupported.
templatized_function(data, is_training=True)
except TypeError:
pass
# The failed __call__ hasn't modified the inner state.
self.assertFalse(templatized_function._variables_created)
templatized_function(data)
self.assertTrue(templatized_function._variables_created)
@test_util.run_in_graph_and_eager_modes
def test_name_scopes_for_variable_scopes(self):
# Test that name scopes are not unnecessarily uniquified (but are
# still uniquified when necessary).
def linear_module(x, output_size):
w = variable_scope.get_variable(
"w", shape=[x.get_shape()[1], output_size],
initializer=init_ops.zeros_initializer())
b = variable_scope.get_variable(
"b", shape=[output_size],
initializer=init_ops.zeros_initializer())
return (math_ops.matmul(x, w) + b), w
def make_linear_module(output_size, name):
return template.make_template(
name,
linear_module,
output_size=output_size,
create_scope_now_=True)
inputs = array_ops.ones((3, 4))
linear1 = make_linear_module(output_size=2, name="foo")
outputs_a, w1 = linear1(inputs)
outputs_b, _ = linear1(inputs)
self.assertEquals("foo", linear1.variable_scope.name)
self.assertEquals("foo/w:0", w1.name)
if not context.executing_eagerly():
self.assertEquals("foo/add:0", outputs_a.name,
"First application of template should get "
"same name scope as variables.")
self.assertEquals("foo_1/add:0", outputs_b.name,
"Second application of template should get "
"a freshly uniquified name scope.")
linear2 = make_linear_module(output_size=2, name="foo")
outputs_c, w2 = linear2(inputs)
outputs_d, _ = linear2(inputs)
self.assertEquals("foo_1", linear2.variable_scope.name,
"New template gets a freshly uniquified variable scope "
"because 'foo' is already taken.")
self.assertEquals("foo_1/w:0", w2.name)
if not context.executing_eagerly():
self.assertEquals("foo_1_1/add:0", outputs_c.name,
"First application of template would get "
"same name scope as variables, but 'foo_1' is already "
"a name scope.")
self.assertEquals("foo_1_2/add:0", outputs_d.name,
"Second application of template should also get "
"a freshly uniquified name scope.")
@test_util.run_in_graph_and_eager_modes
def test_global_variables(self):
# Make sure global_variables are created.
with variable_scope.variable_scope("foo"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", variable_scoped_function, True)
if context.executing_eagerly():
tb = template.make_template("s", function_with_side_create,
trainable=False)
else:
tb = template.make_template("s", function_with_create, trainable=False)
# Initially there are not variables created.
self.assertEqual([], list(ta.global_variables))
self.assertEqual([], list(tb.global_variables))
# After calling there are variables created.
ta()
tb()
# Ensure we can get the scopes before either template is actually called.
self.assertEqual(1, len(ta.global_variables))
self.assertEqual(2, len(tb.global_variables))
@test_util.run_in_graph_and_eager_modes
def test_trainable_variables(self):
# Make sure trainable_variables are created.
with variable_scope.variable_scope("foo2"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", variable_scoped_function, True)
tb = template.make_template("bar", variable_scoped_function, True)
# Initially there are not variables created.
self.assertEqual([], list(ta.trainable_variables))
self.assertEqual([], list(tb.trainable_variables))
# After calling there are variables created.
ta()
tb()
# Ensure we can get the scopes before either template is actually called.
self.assertEqual(1, len(ta.trainable_variables))
self.assertEqual(1, len(tb.trainable_variables))
# None non-trainable variable was created.
self.assertEqual([], list(ta.non_trainable_variables))
self.assertEqual([], list(tb.non_trainable_variables))
# Ensure variables returns all the variables.
self.assertEqual(1, len(ta.variables))
self.assertEqual(1, len(tb.variables))
@test_util.run_in_graph_and_eager_modes
def test_non_trainable_variables(self):
# Make sure non_trainable_variables are created.
with variable_scope.variable_scope("foo2"):
ta = template.make_template("a", variable_scoped_function,
trainable=True)
tb = template.make_template("b", variable_scoped_function,
trainable=False)
# Initially there are not variables created.
self.assertEqual([], list(ta.variables))
self.assertEqual([], list(tb.variables))
# After calling there are variables created.
ta()
tb()
# Check the trainable and non_trainable variables.
self.assertEqual(1, len(ta.trainable_variables))
self.assertEqual([], list(ta.non_trainable_variables))
self.assertEqual([], list(tb.trainable_variables))
self.assertEqual(1, len(tb.non_trainable_variables))
# Ensure variables returns all the variables.
self.assertEqual(1, len(ta.variables))
self.assertEqual(1, len(tb.variables))
# TODO(apassos) handle local variables in Eager
@test_util.run_deprecated_v1
def test_local_variables(self):
# Make sure trainable_variables are created.
with variable_scope.variable_scope("foo3"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", variable_scoped_function, True)
tb = template.make_template("bar",
variable_scoped_function_with_local_variable)
# Initially there are not variables created.
self.assertEqual([], list(ta.local_variables))
self.assertEqual([], list(tb.local_variables))
# After calling there are variables created.
ta()
tb()
# Ensure we can get the scopes before either template is actually called.
self.assertEqual(0, len(ta.local_variables))
self.assertEqual(1, len(tb.local_variables))
@test_util.run_in_graph_and_eager_modes
def test_make_template_with_defun(self):
def variable_scoped_function_no_return_value(scope_name):
# defun cannot compile functions that return non-Tensor objects
with variable_scope.variable_scope(scope_name):
_ = variable_scope.get_variable(
"dummy", shape=[1], initializer=init_ops.zeros_initializer())
tmpl = template.make_template_internal(
"s1",
variable_scoped_function_no_return_value,
create_graph_function_=True,
scope_name="test")
# The first invocation of tmpl1 creates variables, the second should
# be executed as a graph function.
tmpl()
v1 = tmpl.variables
tmpl()
v2 = tmpl.variables
self.assertSequenceEqual(v1, v2)
self.assertEqual("s1/test/dummy:0", v1[0].name)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/template_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for aggregate_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class AddNTest(test.TestCase):
# AddN special-cases adding the first M inputs to make (N - M) divisible by 8,
# after which it adds the remaining (N - M) tensors 8 at a time in a loop.
# Test N in [1, 10] so we check each special-case from 1 to 9 and one
# iteration of the loop.
_MAX_N = 10
def _supported_types(self):
if test.is_gpu_available():
return [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128, dtypes.int64
]
return [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128]
def _buildData(self, shape, dtype):
data = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testAddN(self):
np.random.seed(12345)
with self.session(use_gpu=True) as sess:
for dtype in self._supported_types():
for count in range(1, self._MAX_N + 1):
data = [self._buildData((2, 2), dtype) for _ in range(count)]
actual = self.evaluate(math_ops.add_n(data))
expected = np.sum(np.vstack(
[np.expand_dims(d, 0) for d in data]), axis=0)
tol = 5e-3 if dtype == dtypes.float16 else 5e-7
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testUnknownShapes(self):
np.random.seed(12345)
with self.session(use_gpu=True) as sess:
for dtype in self._supported_types():
data = self._buildData((2, 2), dtype)
for count in range(1, self._MAX_N + 1):
data_ph = array_ops.placeholder(dtype=dtype)
actual = sess.run(math_ops.add_n([data_ph] * count), {data_ph: data})
expected = np.sum(np.vstack([np.expand_dims(data, 0)] * count),
axis=0)
tol = 5e-3 if dtype == dtypes.float16 else 5e-7
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testVariant(self):
def create_constant_variant(value):
return constant_op.constant(
tensor_pb2.TensorProto(
dtype=dtypes.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(value, dtype=np.int32).tobytes())
]))
# TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant
# copying between CPU and GPU is supported.
with self.session(use_gpu=False):
variant_const_3 = create_constant_variant(3)
variant_const_4 = create_constant_variant(4)
variant_const_5 = create_constant_variant(5)
# 3 + 3 + 5 + 4 = 15.
result = math_ops.add_n((variant_const_3, variant_const_3,
variant_const_5, variant_const_4))
# Smoke test -- ensure this executes without trouble.
# Right now, non-numpy-compatible objects cannot be returned from a
# session.run call; similarly, objects that can't be converted to
# native numpy types cannot be passed to ops.convert_to_tensor.
# For now, run the test and examine the output to see that the result is
# equal to 15.
result_op = logging_ops.Print(
result, [variant_const_3, variant_const_4, variant_const_5, result],
message=("Variants stored an int: c(3), c(4), c(5), "
"add_n(c(3), c(3), c(5), c(4)): ")).op
result_op.run()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/aggregate_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import timeit
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import network as keras_network
from tensorflow.python.layers import base as base_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_lib
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops.losses import losses
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.training import training
from tensorflow.python.util import nest
class Plus1RNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def call(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class ScalarStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def call(self, input_, state, scope=None):
return (input_, state + 1)
class UnbalancedOutputRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return tensor_shape.TensorShape(1), tensor_shape.TensorShape((2))
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def call(self, input_, state, scope=None):
concatenated = array_ops.concat((input_, input_), axis=-1)
return (input_, concatenated), state + 1
class TensorArrayStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell its state as a TensorArray."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return (tensor_shape.TensorShape([]), ())
def zero_state(self, batch_size, dtype):
return (array_ops.zeros([], dtype=dtypes.int32),
tensor_array_ops.TensorArray(
dtype=dtype, size=0, dynamic_size=True))
def call(self, input_, state, scope=None):
new_array = state[1].write(state[0], input_)
return (input_, (state[0] + 1, new_array))
class KerasNetworkTFRNNs(keras_network.Network):
def __init__(self, name=None):
super(KerasNetworkTFRNNs, self).__init__(name=name)
self._cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.LSTMCell(1) for _ in range(2)])
def call(self, inputs):
return self._cell(inputs, self._cell.get_initial_state(inputs))
class KerasNetworkKerasRNNs(keras_network.Network):
def __init__(self, name=None):
super(KerasNetworkKerasRNNs, self).__init__(name=name)
self._cell = keras.layers.StackedRNNCells(
[keras.layers.LSTMCell(1) for _ in range(2)])
def call(self, inputs):
return self._cell(inputs, self._cell.get_initial_state(inputs))
class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_in_graph_and_eager_modes
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
if context.executing_eagerly():
inputs = [constant_op.constant(np.ones((3, 4)))]
else:
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
rnn.dynamic_rnn(
cell,
array_ops.stack(inputs),
dtype=dtypes.float32,
sequence_length=[[4]])
@test_util.run_in_graph_and_eager_modes
def testInvalidDtype(self):
if context.executing_eagerly():
inputs = np.zeros((3, 4, 5), dtype=np.int32)
else:
inputs = array_ops.placeholder(dtypes.int32, shape=(3, 4, 5))
cells = [
rnn_cell_impl.BasicRNNCell,
rnn_cell_impl.GRUCell,
rnn_cell_impl.BasicLSTMCell,
rnn_cell_impl.LSTMCell,
]
for cell_cls in cells:
with self.cached_session():
with self.assertRaisesRegexp(
ValueError, "RNN cell only supports floating"):
cell = cell_cls(2, dtype=dtypes.int32)
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.int32)
@test_util.run_in_graph_and_eager_modes
def testBatchSizeFromInput(self):
cell = Plus1RNNCell()
in_eager_mode = context.executing_eagerly()
# With static batch size
if in_eager_mode:
inputs = np.zeros((3, 4, 5), dtype=np.float32)
initial_state = np.zeros((3, 5), dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(3, 4, 5))
initial_state = array_ops.placeholder(dtypes.float32, shape=(3, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell, inputs, initial_state=initial_state)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# Without static batch size
# Tensor shapes are fully determined with eager execution enabled,
# so only run this test for graph construction.
if not in_eager_mode:
inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(None, outputs.shape.dims[0].value)
self.assertEqual(None, state.shape.dims[0].value)
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell,
inputs,
initial_state=array_ops.placeholder(dtypes.float32, shape=(None, 5)))
self.assertEqual(None, outputs.shape.dims[0].value)
self.assertEqual(None, state.shape.dims[0].value)
@test_util.run_in_graph_and_eager_modes
def testScalarStateIsAccepted(self):
cell = ScalarStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session(use_gpu=True) as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={inputs: [[[1], [2], [3], [4]]]})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state)
@test_util.run_in_graph_and_eager_modes
def testUnbalancedOutputIsAccepted(self):
cell = UnbalancedOutputRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session(use_gpu=True) as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={inputs: [[[1], [2], [3], [4]]]})
self.assertIsInstance(outputs, tuple)
self.assertAllEqual([[[1], [2], [3], [4]]], outputs[0])
self.assertAllEqual([[[1, 1], [2, 2], [3, 3], [4, 4]]], outputs[1])
self.assertAllEqual(4, state)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testEagerMemory(self):
with context.eager_mode():
cell = TensorArrayStateRNNCell()
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=[4])
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testTensorArrayStateIsAccepted(self):
cell = TensorArrayStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session(use_gpu=True) as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
state = (state[0], state[1].stack())
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={
inputs: [[[1], [2], [3], [4]]]
})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state[0])
self.assertAllEqual([[[1]], [[2]], [[3]], [[4]]], state[1])
@test_util.run_deprecated_v1
def testCellGetInitialState(self):
cell = rnn_cell_impl.BasicRNNCell(5)
with self.assertRaisesRegexp(
ValueError, "batch_size and dtype cannot be None"):
cell.get_initial_state(None, None, None)
inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 1))
with self.assertRaisesRegexp(
ValueError, "batch size from input tensor is different from"):
cell.get_initial_state(inputs=inputs, batch_size=50, dtype=None)
with self.assertRaisesRegexp(
ValueError, "batch size from input tensor is different from"):
cell.get_initial_state(
inputs=inputs, batch_size=constant_op.constant(50), dtype=None)
with self.assertRaisesRegexp(
ValueError, "dtype from input tensor is different from"):
cell.get_initial_state(inputs=inputs, batch_size=None, dtype=dtypes.int16)
initial_state = cell.get_initial_state(
inputs=inputs, batch_size=None, dtype=None)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
batch = array_ops.shape(inputs)[0]
dtype = inputs.dtype
initial_state = cell.get_initial_state(None, batch, dtype)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
def _assert_cell_builds(self, cell_class, dtype, batch_size, in_size,
out_size):
cell = cell_class(out_size, dtype=dtype)
in_shape = tensor_shape.TensorShape((batch_size, in_size))
cell.build(in_shape)
state_output = cell.get_initial_state(
inputs=None, batch_size=batch_size, dtype=dtype)
cell_output, _ = cell(array_ops.zeros(in_shape, dtype), state_output)
self.assertAllEqual([batch_size, out_size], cell_output.shape.as_list())
@test_util.run_in_graph_and_eager_modes
def testCellsBuild(self):
f32 = dtypes.float32
f64 = dtypes.float64
self._assert_cell_builds(rnn_cell_impl.BasicRNNCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicRNNCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicLSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicLSTMCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.GRUCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.GRUCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f64, 5, 7, 3)
@test_util.run_deprecated_v1
def testRNNWithKerasSimpleRNNCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.SimpleRNNCell(output_shape)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(state.shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state)
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), batch)
@test_util.run_deprecated_v1
def testRNNWithKerasGRUCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.GRUCell(output_shape)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(state.shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state)
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), batch)
@test_util.run_deprecated_v1
def testRNNWithKerasLSTMCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.LSTMCell(output_shape)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(len(state), 2)
self.assertEqual(state[0].shape.as_list(), [None, output_shape])
self.assertEqual(state[1].shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state[0])
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), 2)
self.assertEqual(len(state[0]), batch)
self.assertEqual(len(state[1]), batch)
@test_util.run_deprecated_v1
def testRNNWithStackKerasCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.StackedRNNCells(
[keras.layers.LSTMCell(2 * output_shape),
keras.layers.LSTMCell(output_shape)])
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(len(state), 2)
state = nest.flatten(state)
self.assertEqual(len(state), 4)
self.assertEqual(state[0].shape.as_list(), [None, 2 * output_shape])
self.assertEqual(state[1].shape.as_list(), [None, 2 * output_shape])
self.assertEqual(state[2].shape.as_list(), [None, output_shape])
self.assertEqual(state[3].shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state[2])
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), 4)
for s in state:
self.assertEqual(len(s), batch)
@test_util.run_deprecated_v1
def testStaticRNNWithKerasSimpleRNNCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
x_train = np.transpose(x_train, (1, 0, 2))
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.SimpleRNNCell(output_shape)
inputs = [array_ops.placeholder(
dtypes.float32, shape=(None, input_shape))] * timestep
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), timestep)
self.assertEqual(outputs[0].shape.as_list(), [None, output_shape])
self.assertEqual(state.shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state)
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
feed_dict = {i: d for i, d in zip(inputs, x_train)}
feed_dict[predict] = y_train
_, outputs, state = sess.run(
[train_op, outputs, state], feed_dict)
self.assertEqual(len(outputs), timestep)
self.assertEqual(len(outputs[0]), batch)
self.assertEqual(len(state), batch)
@test_util.run_deprecated_v1
def testKerasAndTFRNNLayerOutputComparison(self):
input_shape = 10
output_shape = 5
timestep = 4
batch = 20
(x_train, _), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
fix_weights_generator = keras.layers.SimpleRNNCell(output_shape)
fix_weights_generator.build((None, input_shape))
weights = fix_weights_generator.get_weights()
with self.session(graph=ops_lib.Graph()) as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
cell = keras.layers.SimpleRNNCell(output_shape)
tf_out, tf_state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
cell.set_weights(weights)
[tf_out, tf_state] = sess.run([tf_out, tf_state], {inputs: x_train})
with self.session(graph=ops_lib.Graph()) as sess:
k_input = keras.Input(shape=(timestep, input_shape),
dtype=dtypes.float32)
cell = keras.layers.SimpleRNNCell(output_shape)
layer = keras.layers.RNN(cell, return_sequences=True, return_state=True)
keras_out = layer(k_input)
cell.set_weights(weights)
k_out, k_state = sess.run(keras_out, {k_input: x_train})
self.assertAllClose(tf_out, k_out)
self.assertAllClose(tf_state, k_state)
@test_util.run_deprecated_v1
def testSimpleRNNCellAndBasicRNNCellComparison(self):
input_shape = 10
output_shape = 5
timestep = 4
batch = 20
(x_train, _), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
fix_weights_generator = keras.layers.SimpleRNNCell(output_shape)
fix_weights_generator.build((None, input_shape))
# The SimpleRNNCell contains 3 weights: kernel, recurrent_kernel, and bias
# The BasicRNNCell contains 2 weight: kernel and bias, where kernel is
# zipped [kernel, recurrent_kernel] in SimpleRNNCell.
keras_weights = fix_weights_generator.get_weights()
kernel, recurrent_kernel, bias = keras_weights
tf_weights = [np.concatenate((kernel, recurrent_kernel)), bias]
with self.session(graph=ops_lib.Graph()) as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
cell = keras.layers.SimpleRNNCell(output_shape)
k_out, k_state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
cell.set_weights(keras_weights)
[k_out, k_state] = sess.run([k_out, k_state], {inputs: x_train})
with self.session(graph=ops_lib.Graph()) as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
cell = rnn_cell_impl.BasicRNNCell(output_shape)
tf_out, tf_state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
cell.set_weights(tf_weights)
[tf_out, tf_state] = sess.run([tf_out, tf_state], {inputs: x_train})
self.assertAllClose(tf_out, k_out, atol=1e-5)
self.assertAllClose(tf_state, k_state, atol=1e-5)
@test_util.run_deprecated_v1
def testBasicLSTMCellInterchangeWithLSTMCell(self):
with self.session(graph=ops_lib.Graph()) as sess:
basic_cell = rnn_cell_impl.BasicLSTMCell(1)
basic_cell(array_ops.ones([1, 1]),
state=basic_cell.get_initial_state(inputs=None,
batch_size=1,
dtype=dtypes.float32))
self.evaluate([v.initializer for v in basic_cell.variables])
self.evaluate(basic_cell._bias.assign([10.] * 4))
save = saver.Saver()
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = save.save(sess, prefix)
with self.session(graph=ops_lib.Graph()) as sess:
lstm_cell = rnn_cell_impl.LSTMCell(1, name="basic_lstm_cell")
lstm_cell(array_ops.ones([1, 1]),
state=lstm_cell.get_initial_state(inputs=None,
batch_size=1,
dtype=dtypes.float32))
self.evaluate([v.initializer for v in lstm_cell.variables])
save = saver.Saver()
save.restore(sess, save_path)
self.assertAllEqual([10.] * 4, self.evaluate(lstm_cell._bias))
# TODO(scottzhu): Look into updating for V2 Intializers.
@test_util.run_deprecated_v1
def testRNNCellSerialization(self):
for cell in [
rnn_cell_impl.LSTMCell(32, use_peepholes=True, cell_clip=True),
rnn_cell_impl.BasicLSTMCell(32, dtype=dtypes.float32),
rnn_cell_impl.BasicRNNCell(32, activation="relu", dtype=dtypes.float32),
rnn_cell_impl.GRUCell(32, dtype=dtypes.float32)
]:
with self.cached_session():
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer="rmsprop", loss="mse")
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
# The custom_objects is important here since rnn_cell_impl is
# not visible as a Keras layer, and also has a name conflict with
# keras.LSTMCell and GRUCell.
layer = keras.layers.RNN.from_config(
config,
custom_objects={
"BasicRNNCell": rnn_cell_impl.BasicRNNCell,
"GRUCell": rnn_cell_impl.GRUCell,
"LSTMCell": rnn_cell_impl.LSTMCell,
"BasicLSTMCell": rnn_cell_impl.BasicLSTMCell
})
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def testRNNCellActsLikeKerasRNNCellInProperScope(self):
with base_layers.keras_style_scope():
kn1 = KerasNetworkTFRNNs(name="kn1")
kn2 = KerasNetworkKerasRNNs(name="kn2")
z = array_ops.zeros((2, 3))
kn1(z)
kn2(z)
# pylint: disable=protected-access
self.assertTrue(all("kn1" in v.name for v in kn1._cell.variables))
self.assertTrue(all("kn2" in v.name for v in kn2._cell.variables))
with base_layers.keras_style_scope():
kn1_new = KerasNetworkTFRNNs(name="kn1_new")
kn2_new = KerasNetworkKerasRNNs(name="kn2_new")
kn2_new(z)
# Most importantly, this doesn't fail due to variable scope reuse issues.
kn1_new(z)
self.assertTrue(all("kn1_new" in v.name for v in kn1_new._cell.variables))
self.assertTrue(all("kn2_new" in v.name for v in kn2_new._cell.variables))
######### Benchmarking RNN code
def _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length, dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# These parameters don't matter
batch_size = 512
num_units = 512
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
def _create_static_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
_static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length)
def _create_dynamic_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
_static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
delta_static = timeit.timeit(_create_static_rnn, number=5)
delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)
print("%d \t %f \t %f \t %f" %
(max_time, delta_static, delta_dynamic, delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _timer(sess, ops):
# Warm in
for _ in range(2):
sess.run(ops)
# Timing run
runs = 20
start = time.time()
for _ in range(runs):
sess.run(ops)
end = time.time()
return (end - start) / float(runs)
def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# Using rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
delta_static = _timer(sess, ops)
# Using dynamic_rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
variables_lib.global_variables_initializer().run()
delta_dynamic = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f \t %f" %
(batch_size, max_time, num_units, use_gpu, delta_static, delta_dynamic,
delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def half_seq_len_vs_unroll_half_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Halve the sequence length, full static unroll
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t,
sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_half_seq_len = _timer(sess, ops)
# Halve the unroll size, don't use sequence length
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t[:(max_time // 2)], sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_unroll_half = _timer(sess, ops)
print("%d \t %d \t\t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_half_seq_len,
delta_unroll_half, delta_half_seq_len / delta_unroll_half))
return delta_half_seq_len, delta_unroll_half
def _concat_state_vs_tuple_state_rnn_benchmark(inputs_list_t, sequence_length,
state_is_tuple):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, final_state = rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
final_state = list(final_state) if state_is_tuple else [final_state]
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + final_state,
trainable_variables)
return control_flow_ops.group(*(final_state + gradients + outputs))
def concat_state_vs_tuple_state_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Run with concatenated states (default)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=False)
variables_lib.global_variables_initializer().run()
delta_concat_state = _timer(sess, ops)
# Run with tuple states (new)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=True)
variables_lib.global_variables_initializer().run()
delta_tuple_state = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_concat_state,
delta_tuple_state, delta_concat_state / delta_tuple_state))
return delta_concat_state, delta_tuple_state
def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length, swap_memory):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell,
inputs_t,
sequence_length=sequence_length,
swap_memory=swap_memory,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# No memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=False)
variables_lib.global_variables_initializer().run()
no_swap = _timer(sess, ops)
# Memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=True)
variables_lib.global_variables_initializer().run()
swap = _timer(sess, ops)
print("%d \t %d \t %d \t %f \t %f \t %f" %
(batch_size, max_time, num_units, no_swap, swap, swap / no_swap))
return no_swap, swap
def rnn_long_sequence_benchmark(batch_size, seqlen, num_units, dynamic,
swap_memory, nn):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = [seqlen for _ in range(batch_size)]
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(seqlen)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
for _ in range(nn):
if dynamic:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=swap_memory)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
else:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f" % (batch_size, seqlen, num_units,
dynamic, elapsed,
elapsed / seqlen))
class BenchmarkRNN(test.Benchmark):
def benchmarkGraphCreationStaticVsDynamicLSTM(self):
print("Graph Creation: Static Unroll vs. Dynamic Unroll LSTM")
print("max_t \t dt(static) \t dt(dynamic) \t dt(dynamic)/dt(static)")
for max_time in (1, 25, 50):
s_dt, d_dt = graph_creation_static_vs_dynamic_rnn_benchmark(max_time)
self.report_benchmark(
name="graph_creation_time_static_T%02d" % max_time,
iters=5,
wall_time=s_dt)
self.report_benchmark(
name="graph_creation_time_dynamic_T%02d" % max_time,
iters=5,
wall_time=d_dt)
def benchmarkStaticUnrollVsDynamicFlowLSTM(self):
print("Calculation: Static Unroll with Dynamic Flow LSTM "
"vs. Dynamic Unroll LSTM")
print("batch \t max_t \t units \t gpu \t dt(static) \t dt(dynamic) "
"\t dt(dynamic)/dt(static)")
for batch_size in (256,):
for max_time in (50,):
for num_units in (512, 256, 128):
for use_gpu in (False, True):
s_dt, d_dt = static_vs_dynamic_rnn_benchmark(batch_size, max_time,
num_units, use_gpu)
self.report_benchmark(
name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkDynamicLSTMNoMemorySwapVsMemorySwap(self):
print("Calculation: Dynamic LSTM No Memory Swap vs. Memory Swap")
print("batch \t max_t \t units \t no_swap \t swap \t swap/no_swap")
for batch_size in (256, 512):
for max_time in (100,):
for num_units in (512, 256, 128):
no_swap, swap = dynamic_rnn_swap_memory_benchmark(batch_size,
max_time, num_units)
self.report_benchmark(
name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=no_swap)
self.report_benchmark(
name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=swap)
def benchmarkStaticUnrollHalfSequenceLengthVsHalfUnroll(self):
print("Calculation: Static Unroll with Halved Sequence Length "
"vs. Half Static Unroll")
print("batch \t full_t \t units \t gpu \t dt(half_seq_len) "
"\t dt(unroll_half) \t dt(half_seq_len)/dt(unroll_half)")
for batch_size in (128,):
for max_time in (50,):
for num_units in (256,):
for use_gpu in (False, True):
s_dt, d_dt = half_seq_len_vs_unroll_half_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="half_seq_len_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="unroll_half_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkStaticUnrollStateConcatVsStateTuple(self):
print("Calculation: Static Unroll with Concatenated State "
"vs. Tuple State")
print("batch \t time \t units \t gpu \t dt(concat_state) "
"\t dt(tuple_state) \t dt(concat_state)/dt(tuple_state)")
for batch_size in (
16,
128,):
for max_time in (50,):
for num_units in (
16,
128,):
for use_gpu in (False, True):
c_dt, t_dt = concat_state_vs_tuple_state_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="concat_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=c_dt)
self.report_benchmark(
name="tuple_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=t_dt)
def _benchmarkDynamicLSTMMemorySwapLongSeq(self):
"""The memory swapping test for the SOSP submission."""
print("Calculation: Long LSTM Sequence")
print("batch \t len \t units \t dynamic \t elapsed_t \t elapsed_t/len")
batch_size = 512
seqlen = 800
num_units = 512
dynamic = True
swap_memory = True
# Some warming up.
if swap_memory:
rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
dynamic, swap_memory, 2)
# Measure the performance.
for slen in xrange(100, 1100, 100):
rnn_long_sequence_benchmark(batch_size, slen, num_units, dynamic,
swap_memory, 3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/rnn_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for integer division by zero."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ZeroDivisionTest(test.TestCase):
@test_util.run_deprecated_v1
def testZeros(self):
with test_util.use_gpu():
for dtype in dtypes.uint8, dtypes.int16, dtypes.int32, dtypes.int64:
zero = constant_op.constant(0, dtype=dtype)
one = constant_op.constant(1, dtype=dtype)
bads = [one // zero]
if dtype in (dtypes.int32, dtypes.int64):
bads.append(one % zero)
for bad in bads:
try:
result = self.evaluate(bad)
except errors_impl.OpError as e:
# Ideally, we'd get a nice exception. In theory, this should only
# happen on CPU, but 32 bit integer GPU division is actually on
# CPU due to a placer bug.
# TODO(irving): Make stricter once the placer bug is fixed.
self.assertIn('Integer division by zero', str(e))
else:
# On the GPU, integer division by zero produces all bits set.
# But apparently on some GPUs "all bits set" for 64 bit division
# means 32 bits set, so we allow 0xffffffff as well. This isn't
# very portable, so we may need to expand this list if other GPUs
# do different things.
#
# XLA constant folds integer division by zero to 1.
self.assertTrue(test.is_gpu_available())
self.assertIn(result, (-1, 1, 0xff, 0xffffffff))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/zero_division_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Stack and ParallelStack Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(
arr, axis=(axis,)) for arr in np.split(
array, axis_len, axis=axis)
]
class StackOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimple(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
for dtype in [np.bool, np.float32, np.int32, np.int64]:
data = np.random.randn(*shape).astype(dtype)
# Convert [data[0], data[1], ...] separately to tensorflow
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
# Stack back into a single tensorflow tensor
c = array_ops.stack(xs)
self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1
def testSimpleParallelCPU(self):
np.random.seed(7)
with self.session(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
data = np.random.randn(*shape).astype(np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1
def testSimpleParallelGPU(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
data = np.random.randn(*shape).astype(np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1
def testConst(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
for dtype in [np.bool, np.float32, np.int16, np.int32, np.int64]:
data = np.random.randn(*shape).astype(dtype)
# Stack back into a single tensorflow tensor directly using np array
c = array_ops.stack(data)
# This is implemented via a Const:
self.assertEqual(c.op.type, "Const")
self.assertAllEqual(c.eval(), data)
# Python lists also work for 1-D case:
if len(shape) == 1:
data_list = list(data)
cl = array_ops.stack(data_list)
self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl.eval(), data)
# Verify that shape induction works with shapes produced via const stack
a = constant_op.constant([1, 2, 3, 4, 5, 6])
b = array_ops.reshape(a, array_ops.stack([2, 3]))
self.assertAllEqual(b.get_shape(), [2, 3])
@test_util.run_deprecated_v1
def testConstParallelCPU(self):
np.random.seed(7)
with self.session(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape).astype(np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl.eval(), data)
data = np.random.randn(*shape).astype(np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1
def testConstParallelGPU(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape).astype(np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl.eval(), data)
data = np.random.randn(*shape).astype(np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1
def testGradientsAxis0(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
with self.cached_session(use_gpu=True):
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs)
err = gradient_checker.compute_gradient_error(xs, shapes, c, shape)
self.assertLess(err, 1e-6)
@test_util.run_deprecated_v1
def testGradientsAxis1(self):
np.random.seed(7)
for shape in (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
out_shape = list(shape[1:])
out_shape.insert(1, shape[0])
with self.cached_session(use_gpu=True):
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs, axis=1)
err = gradient_checker.compute_gradient_error(xs, shapes, c, out_shape)
self.assertLess(err, 1e-6)
@test_util.run_deprecated_v1
def testZeroSizeCPU(self):
# Verify that stack doesn't crash for zero size inputs
with self.session(use_gpu=False):
for shape in (0,), (3, 0), (0, 3):
x = np.zeros((2,) + shape).astype(np.int32)
p = array_ops.stack(list(x)).eval()
self.assertAllEqual(p, x)
p = array_ops.parallel_stack(list(x)).eval()
self.assertAllEqual(p, x)
@test_util.run_deprecated_v1
def testZeroSizeGPU(self):
# Verify that stack doesn't crash for zero size inputs
with self.session(use_gpu=True):
for shape in (0,), (3, 0), (0, 3):
x = np.zeros((2,) + shape).astype(np.int32)
p = array_ops.stack(list(x)).eval()
self.assertAllEqual(p, x)
p = array_ops.parallel_stack(list(x)).eval()
self.assertAllEqual(p, x)
@test_util.run_deprecated_v1
def testAxis0DefaultCPU(self):
with self.session(use_gpu=False):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = array_ops.stack(t).eval()
parallel_stacked = array_ops.parallel_stack(t).eval()
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
@test_util.run_deprecated_v1
def testAxis0DefaultGPU(self):
with self.session(use_gpu=True):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = array_ops.stack(t).eval()
parallel_stacked = array_ops.parallel_stack(t).eval()
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for i in range(1, 6):
expected = np.random.random(np.random.permutation(i) + 1)
# For all the possible axis to split it, including negative indices.
for j in range(-i, i):
test_arrays = np_split_squeeze(expected, j)
with self.cached_session(use_gpu=True):
actual_pack = array_ops.stack(test_arrays, axis=j)
self.assertEqual(expected.shape, actual_pack.get_shape())
actual_pack = self.evaluate(actual_pack)
actual_stack = array_ops.stack(test_arrays, axis=j)
self.assertEqual(expected.shape, actual_stack.get_shape())
actual_stack = self.evaluate(actual_stack)
self.assertNDArrayNear(expected, actual_stack, 1e-6)
def testDimOutOfRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegexp(ValueError, r"axis = 2 not in \[-2, 2\)"):
array_ops.stack(t, axis=2)
def testDimOutOfNegativeRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegexp(ValueError, r"axis = -3 not in \[-2, 2\)"):
array_ops.stack(t, axis=-3)
class AutomaticStackingTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimple(self):
with self.session(use_gpu=True):
self.assertAllEqual(
[1, 0, 2],
ops.convert_to_tensor([1, constant_op.constant(0), 2]).eval())
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor(
[[0, 0, 0], [0, constant_op.constant(1), 0],
[0, 0, 0]]).eval())
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor(
[[0, 0, 0], constant_op.constant([0, 1, 0]),
[0, 0, 0]]).eval())
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([
constant_op.constant([0, 0, 0]),
constant_op.constant([0, 1, 0]),
constant_op.constant([0, 0, 0])
]).eval())
def testWithNDArray(self):
with self.session(use_gpu=True):
result = ops.convert_to_tensor([[[0., 0.],
constant_op.constant([1., 1.])],
np.array(
[[2., 2.], [3., 3.]],
dtype=np.float32)])
self.assertAllEqual([[[0., 0.], [1., 1.]], [[2., 2.], [3., 3.]]],
self.evaluate(result))
@test_util.run_deprecated_v1
def testVariable(self):
with self.session(use_gpu=True):
v = variables.Variable(17)
result = ops.convert_to_tensor([[0, 0, 0], [0, v, 0], [0, 0, 0]])
v.initializer.run()
self.assertAllEqual([[0, 0, 0], [0, 17, 0], [0, 0, 0]],
self.evaluate(result))
v.assign(38).op.run()
self.assertAllEqual([[0, 0, 0], [0, 38, 0], [0, 0, 0]],
self.evaluate(result))
def testDtype(self):
t_0 = ops.convert_to_tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([[0., 0., 0.], constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]])
self.assertEqual(dtypes.float64, t_1.dtype)
t_2 = ops.convert_to_tensor(
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
t_3 = ops.convert_to_tensor(
[[0., 0., 0.],
constant_op.constant([0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_3.dtype)
t_4 = ops.convert_to_tensor(
[constant_op.constant([0., 0., 0.], dtype=dtypes.float64)],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_4.dtype)
with self.assertRaises(TypeError):
ops.convert_to_tensor([
constant_op.constant(
[0., 0., 0.], dtype=dtypes.float32), constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
])
def testDtypeConversionWhenTensorDtypeMismatch(self):
t_0 = ops.convert_to_tensor([0., 0., 0.])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([0, 0, 0])
self.assertEqual(dtypes.int32, t_1.dtype)
t_2 = ops.convert_to_tensor([t_0, t_0, t_1], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
@test_util.run_deprecated_v1
def testPlaceholder(self):
with self.session(use_gpu=True):
# Test using placeholder with a defined shape.
ph_0 = array_ops.placeholder(dtypes.int32, shape=[])
result_0 = ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
self.assertAllEqual(
[[0, 0, 0], [0, 1, 0], [0, 0, 0]], result_0.eval(feed_dict={ph_0: 1}))
self.assertAllEqual(
[[0, 0, 0], [0, 2, 0], [0, 0, 0]], result_0.eval(feed_dict={ph_0: 2}))
# Test using placeholder with an undefined shape.
ph_1 = array_ops.placeholder(dtypes.int32)
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
self.assertAllEqual(
[[0, 0, 0], [0, 1, 0], [0, 0, 0]], result_1.eval(feed_dict={ph_1: 1}))
self.assertAllEqual(
[[0, 0, 0], [0, 2, 0], [0, 0, 0]], result_1.eval(feed_dict={ph_1: 2}))
@test_util.run_deprecated_v1
def testShapeErrors(self):
# Static shape error.
ph_0 = array_ops.placeholder(dtypes.int32, shape=[1])
with self.assertRaises(ValueError):
ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
# Dynamic shape error.
ph_1 = array_ops.placeholder(dtypes.int32)
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
with self.session(use_gpu=True):
with self.assertRaises(errors_impl.InvalidArgumentError):
result_1.eval(feed_dict={ph_1: [1]})
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/stack_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class TraceTest(test.TestCase):
def setUp(self):
x = np.random.seed(0)
def compare(self, x):
np_ans = np.trace(x, axis1=-2, axis2=-1)
with self.cached_session(use_gpu=True):
tf_ans = math_ops.trace(x).eval()
self.assertAllClose(tf_ans, np_ans)
@test_util.run_deprecated_v1
def testTrace(self):
for dtype in [np.int32, np.float32, np.float64]:
for shape in [[2, 2], [2, 3], [3, 2], [2, 3, 2], [2, 2, 2, 3]]:
x = np.random.rand(np.prod(shape)).astype(dtype).reshape(shape)
self.compare(x)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/trace_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for depthwise convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def ConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 147, 147, 2],
[3, 299, 299, 3], [5, 183, 183, 1]]
filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [3, 3, 8, 1],
[3, 3, 7, 1], [5, 5, 2, 1], [3, 3, 2, 8], [2, 2, 3,
8], [5, 5, 1, 2]]
out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 49, 49, 16],
[3, 150, 150, 24], [5, 92, 92, 2]]
strides = [1, 1, 1, 1, 1, 1, 3, 2, 2]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, SAME]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def CheckGradConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
compute_gradient_error() is very expensive. So the configs should be
relatively small.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[2, 5, 8, 1], [4, 5, 5, 1], [2, 4, 4, 2], [1, 15, 15, 2],
[2, 15, 16, 1]]
filter_sizes = [[4, 4, 1, 2], [2, 2, 1, 2], [3, 1, 2, 2], [1, 3, 2, 1],
[3, 3, 1, 2]]
out_sizes = [[2, 5, 8, 2], [4, 2, 2, 2], [2, 4, 4, 4], [1, 15, 15, 2],
[2, 5, 5, 2]]
strides = [1, 2, 1, 1, 3]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, VALID, SAME, SAME, VALID]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
class DepthwiseConv2DTest(test.TestCase):
# This is testing that depthwise_conv2d and depthwise_conv2d_native
# produce the same results. It also tests that NCHW and NHWC
# formats agree, by comparing the depthwise_conv2d_native with
# 'NCHW' format (with transposition) matches the 'NHWC' format using
# the higher level interface.
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_type,
use_gpu,
grouped_conv=False,
data_format="NHWC"):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
data_type: The data type to use.
use_gpu: Whether to use GPU.
grouped_conv: Whether to use cuDNN 7's grouped convolution.
data_format: The data_format of the input. "NHWC" or "NCHW".
"""
input_size = 1
filter_size = 1
for s in tensor_in_sizes:
input_size *= s
for s in filter_in_sizes:
filter_size *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = [f * 1.0 / input_size for f in range(1, input_size + 1)]
x2 = [f * 1.0 / filter_size for f in range(1, filter_size + 1)]
ops.reset_default_graph()
graph = ops.get_default_graph()
with self.session(graph=graph, use_gpu=use_gpu) as sess:
tolerance = {
dtypes.float16: 4e-2,
dtypes.float32: 1e-5,
dtypes.float64: 1e-12,
}[data_type]
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=data_type)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=data_type)
native_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NHWC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with sess.graph._kernel_label_map({
"DepthwiseConv2dNative": "cudnn_grouped_convolution"
} if grouped_conv else {}):
conv_native = nn_ops.depthwise_conv2d_native(
native_t1,
t2,
strides=strides,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
try:
native_result = self.evaluate(conv_native)
except errors.InvalidArgumentError as e:
# Grouped convolution kernel is only registered for cuDNN 7. Silently
# return when we are running on an earlier version or without GPU.
if e.message.startswith(
"No OpKernel was registered to support Op 'DepthwiseConv2dNative'"):
tf_logging.warn("Skipping grouped convolution test")
return
raise e
conv_interface = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
interface_result = self.evaluate(conv_interface)
tf_logging.info(
"data_type: %r, use_gpu: %r, grouped_conv: %r, max diff = %f",
data_type, use_gpu, grouped_conv,
np.amax(np.absolute(native_result - interface_result)))
self.assertArrayNear(
np.ravel(native_result), np.ravel(interface_result), tolerance)
self.assertShapeEqual(native_result, conv_native)
self.assertShapeEqual(native_result, conv_interface)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2D, %dth config: %r * %r, stride: %d, padding: "
"%s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
tf_logging.info("Testing without grouped_conv")
self._VerifyValues(
input_size, filter_size, stride, padding, data_type, use_gpu=True)
tf_logging.info("Testing with grouped_conv")
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
use_gpu=True,
grouped_conv=True)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DWithUnknownShape(self):
# GitHub issue 22110.
if not test.is_gpu_available():
return
with self.session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32)
f = np.ones([1, 1, 1, 1], np.float32)
v = nn_impl.depthwise_conv2d(
x, f, [1, 1, 1, 1], "VALID", rate=[2, 1], data_format="NCHW")
self.assertAllEqual(
np.ones([1, 1, 1, 1], np.float32),
v.eval(feed_dict={x: np.ones([1, 1, 1, 1], np.float32)}))
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFormat, %dth config: %r * %r, stride: %d, "
"padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
use_gpu=True,
data_format="NCHW")
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected, use_gpu):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether to use GPU.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.cached_session(use_gpu=use_gpu) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_ops.depthwise_conv2d_native(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = self.evaluate(conv)
tf_logging.info("value = %r", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=False)
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=True)
# Gradient checkers. This tests depthwise gradient computations for both
# BackpropFilter and BackpropInput by comparing gradients computed by the
# depthwise gradient ops with the gradients computed numerically (details can
# be found in the compute_gradient_error().
# Note this check is very expensive so the input should not be too big.
def _ConstructAndTestGradient(self,
input_shape,
filter_shape,
output_shape,
stride,
padding,
data_type,
test_input,
use_gpu,
grouped_conv=False,
data_format="NHWC"):
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
ops.reset_default_graph()
graph = ops.get_default_graph()
with self.session(graph=graph, use_gpu=use_gpu) as sess:
tolerance = {
dtypes.float16: 4e-0,
dtypes.float32: 8e-4,
dtypes.float64: 1e-12,
}[data_type]
input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=data_type, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=data_type, name="filter")
native_input = input_tensor
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NHWC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_input = array_ops.transpose(input_tensor, [0, 3, 1, 2])
input_shape = [
input_shape[0], input_shape[3], input_shape[1], input_shape[2]
]
output_shape = [
output_shape[0], output_shape[3], output_shape[1], output_shape[2]
]
strides = [1, 1, stride, stride]
with sess.graph._kernel_label_map({
"DepthwiseConv2dNative": "cudnn_grouped_convolution",
"DepthwiseConv2dNativeBackpropInput": "cudnn_grouped_convolution",
"DepthwiseConv2dNativeBackpropFilter": "cudnn_grouped_convolution",
} if grouped_conv else {}):
depthwise_conv2d = nn_ops.depthwise_conv2d_native(
native_input,
filter_tensor,
strides,
padding,
data_format=data_format,
name="depthwise_conv2d")
self.assertEqual(output_shape, depthwise_conv2d.get_shape())
try:
if test_input:
err = gradient_checker.compute_gradient_error(
native_input, input_shape, depthwise_conv2d, output_shape)
else:
err = gradient_checker.compute_gradient_error(
filter_tensor, filter_shape, depthwise_conv2d, output_shape)
except errors.InvalidArgumentError as e:
# Grouped convolution kernel is only registered for cuDNN 7. Silently
# return when we are running on an earlier version or without GPU.
if grouped_conv and e.message.startswith(
"No OpKernel was registered to support Op 'DepthwiseConv2dNative'"):
tf_logging.warn("Skipping grouped convolution test")
return
raise e
tf_logging.info(
"data_type: %r, use_gpu: %r, grouped_conv: %r, error = %f", data_type,
use_gpu, grouped_conv, err)
self.assertLess(err, tolerance)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DInputGrad(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGrad, %dth config: %r * %r, stride: %d, "
"padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True)
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True,
grouped_conv=True)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DInputGradFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGradFormat, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True,
data_format="NCHW")
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DFilterGrad(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGrad, %dth config: %r * %r, stride: "
"%d, padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=False,
use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DFilterGradFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGradFormat, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=False,
use_gpu=True,
data_format="NCHW")
def _CompareBackpropInputFloat(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_gpu):
with self.cached_session(use_gpu=use_gpu):
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def _CompareBackpropInputDouble(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float64)
x2 = np.random.rand(*output_sizes).astype(np.float64)
def _GetVal(use_gpu):
with self.cached_session(use_gpu=use_gpu):
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DInputGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGradCompare, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
self._CompareBackpropInputFloat(input_size, filter_size, output_size,
stride, padding)
self._CompareBackpropInputDouble(input_size, filter_size, output_size,
stride, padding)
def _CompareBackpropFilterFloat(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_gpu):
with self.cached_session(use_gpu=use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def _CompareBackpropFilterDouble(self, input_sizes, filter_sizes,
output_sizes, stride, padding):
x0 = np.random.rand(*input_sizes).astype(np.float64)
x2 = np.random.rand(*output_sizes).astype(np.float64)
def _GetVal(use_gpu):
with self.cached_session(use_gpu=use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DFilterGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGradCompare, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
self._CompareBackpropFilterFloat(input_size, filter_size, output_size,
stride, padding)
self._CompareBackpropFilterDouble(input_size, filter_size, output_size,
stride, padding)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/depthwise_conv_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SoftmaxOp and LogSoftmaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class SoftmaxTest(test.TestCase):
def _npSoftmax(self, features, dim=-1, log=False):
if dim == -1:
dim = len(features.shape) - 1
one_only_on_dim = list(features.shape)
one_only_on_dim[dim] = 1
is_fp16 = features.dtype == np.float16
if is_fp16:
# Do the compute in fp32 and cast the input back to fp32.
features = features.astype(np.float32)
e = np.exp(features - np.reshape(
np.amax(
features, axis=dim), one_only_on_dim))
softmax = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
if log:
res = np.log(softmax)
else:
res = softmax
if is_fp16:
res = res.astype(np.float16)
return res
def _testSoftmax(self, np_features, dim=-1, log=False, use_gpu=False):
# A previous version of the code checked the op name rather than the op type
# to distinguish between log and non-log. Use an arbitrary name to catch
# this bug in future.
name = "arbitrary"
np_softmax = self._npSoftmax(np_features, dim=dim, log=log)
with self.cached_session(use_gpu=use_gpu):
if log:
tf_softmax = nn_ops.log_softmax(np_features, axis=dim, name=name)
else:
tf_softmax = nn_ops.softmax(np_features, axis=dim, name=name)
out = self.evaluate(tf_softmax)
self.assertAllCloseAccordingToType(np_softmax, out)
self.assertShapeEqual(np_softmax, tf_softmax)
if not log:
# Bonus check: the softmaxes should add to one in dimension dim.
sum_along_dim = np.sum(out, axis=dim)
self.assertAllCloseAccordingToType(
np.ones(sum_along_dim.shape), sum_along_dim)
def _testAll(self, features):
self._testSoftmax(features, use_gpu=True)
self._testSoftmax(features, log=True, use_gpu=True)
self._testOverflow(use_gpu=True)
def testNpSoftmax(self):
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
# Batch 0: All exps are 1. The expected result is
# Softmaxes = [0.25, 0.25, 0.25, 0.25]
# LogSoftmaxes = [-1.386294, -1.386294, -1.386294, -1.386294]
#
# Batch 1:
# exps = [1., 2.718, 7.389, 20.085]
# sum = 31.192
# Softmaxes = exps / sum = [0.0320586, 0.08714432, 0.23688282, 0.64391426]
# LogSoftmaxes = [-3.44019 , -2.44019 , -1.44019 , -0.44019]
np_sm = self._npSoftmax(np.array(features))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, 0.25],
[0.0320586, 0.08714432, 0.23688282, 0.64391426]]),
np_sm,
rtol=1.e-5,
atol=1.e-5)
np_lsm = self._npSoftmax(np.array(features), log=True)
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[-3.4401897, -2.4401897, -1.4401897, -0.4401897]]),
np_lsm,
rtol=1.e-5,
atol=1.e-5)
def _testOverflow(self, use_gpu=False):
if use_gpu:
type = np.float32 # pylint: disable=redefined-builtin
else:
type = np.float64 # pylint: disable=redefined-builtin
max = np.finfo(type).max # pylint: disable=redefined-builtin
features = np.array([[1., 1., 1., 1.], [max, 1., 2., 3.]]).astype(type)
with self.cached_session(use_gpu=use_gpu):
tf_log_softmax = nn_ops.log_softmax(features)
out = self.evaluate(tf_log_softmax)
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[0, -max, -max, -max]]),
out,
rtol=1.e-5,
atol=1.e-5)
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32))
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testFloatGPU(self):
if test.is_gpu_available(cuda_only=True):
rows = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
cols = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
for row, col in zip(rows, cols):
logging.info("Testing softmax float dtype in shape [%d, %d]", row, col)
data = np.random.rand(row, col)
self._testAll(data.astype(np.float32))
def testHalf(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16))
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testHalfGPU(self):
if test.is_gpu_available(cuda_only=True):
rows = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
cols = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
for row, col in zip(rows, cols):
logging.info("Testing softmax half dtype in shape [%d, %d]", row, col)
data = np.random.rand(row, col)
self._testAll(data.astype(np.float16))
def testDouble(self):
self._testSoftmax(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64))
self._testOverflow()
def test1DTensorAsInput(self):
self._testSoftmax(
np.array([3., 2., 3., 9.]).astype(np.float64), use_gpu=False)
self._testOverflow(use_gpu=False)
def test1DTensorAsInputNoReshape(self):
self._testSoftmax(
np.array([3., 2., 3., 9.]).astype(np.float64), use_gpu=False)
self._testOverflow(use_gpu=False)
def test3DTensorAsInput(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
use_gpu=False)
self._testOverflow(use_gpu=False)
def test3DTensorAsInputNoReshape(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongFirstDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=0,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongSecondDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=1,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongNegativeDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=-2,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testShapeInference(self):
op = nn_ops.softmax([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]])
self.assertEqual([3, 2, 4], op.get_shape())
@test_util.run_deprecated_v1
def testEmptyInput(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=[0, 3])
self.assertEqual(0, array_ops.size(x).eval())
# reshape would raise if logits is empty
with self.assertRaises(errors_impl.InvalidArgumentError):
nn_ops.softmax(x, axis=0).eval()
def testDimTooLarge(self):
with self.cached_session():
# Use placeholder to make sure we get runtime error instead of shape
# inference error.
dim = array_ops.placeholder_with_default(100, shape=[])
with self.assertRaises(errors_impl.InvalidArgumentError):
nn_ops.softmax([1., 2., 3., 4.], axis=dim).eval()
def testInvalidAxis(self):
# Test case for GitHub issue 22793.
with self.cached_session():
ones = array_ops.ones(shape=[2, 3])
with self.assertRaises(errors_impl.InvalidArgumentError):
nn_ops.softmax(ones, axis=2).eval()
@test_util.run_deprecated_v1
def testLargeDims(self):
# Make sure that we properly handle large inputs. See
# https://github.com/tensorflow/tensorflow/issues/4425 for details
for dims in [129, 256]:
ones = np.random.rand(dims, dims).astype(np.float32)
np_softmax = self._npSoftmax(ones)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu) as sess:
x = array_ops.placeholder(dtypes.float32)
y = nn_ops.softmax(x)
tf_softmax = sess.run(y, feed_dict={x: ones})
self.assertAllClose(tf_softmax, np_softmax)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/softmax_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.training.tracking import util as trackable_utils
from tensorflow.python.util import nest
class Plus1RNNCell(rnn_cell.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def __call__(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class DummyMultiDimensionalLSTM(rnn_cell.RNNCell):
"""LSTM Cell generating (output, new_state) = (input + 1, state + 1).
The input to this cell may have an arbitrary number of dimensions that follow
the preceding 'Time' and 'Batch' dimensions.
"""
def __init__(self, dims):
"""Initialize the Multi-dimensional LSTM cell.
Args:
dims: tuple that contains the dimensions of the output of the cell,
without including 'Time' or 'Batch' dimensions.
"""
if not isinstance(dims, tuple):
raise TypeError("The dimensions passed to DummyMultiDimensionalLSTM "
"should be a tuple of ints.")
self._dims = dims
self._output_size = tensor_shape.TensorShape(self._dims)
self._state_size = (tensor_shape.TensorShape(self._dims),
tensor_shape.TensorShape(self._dims))
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
h, c = state
return (input_ + 1, (h + 1, c + 1))
class NestedRNNCell(rnn_cell.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1).
The input, output and state of this cell is a tuple of two tensors.
"""
@property
def output_size(self):
return (5, 5)
@property
def state_size(self):
return (6, 6)
def __call__(self, input_, state, scope=None):
h, c = state
x, y = input_
return ((x + 1, y + 1), (h + 1, c + 1))
class TestStateSaver(object):
def __init__(self, batch_size, state_size):
self._batch_size = batch_size
self._state_size = state_size
self.saved_state = {}
def state(self, name):
if isinstance(self._state_size, dict):
state_size = self._state_size[name]
else:
state_size = self._state_size
if isinstance(state_size, int):
state_size = (state_size,)
elif isinstance(state_size, tuple):
pass
else:
raise TypeError("state_size should either be an int or a tuple")
return array_ops.zeros((self._batch_size,) + state_size)
def save_state(self, name, state):
self.saved_state[name] = state
return array_ops.identity(state)
@property
def batch_size(self):
return self._batch_size
@property
def state_size(self):
return self._state_size
class TestStateSaverWithCounters(TestStateSaver):
"""Class wrapper around TestStateSaver.
A dummy class used for testing of static_state_saving_rnn. It helps test if
save_state and state functions got called same number of time when we
evaluate output of rnn cell and state or either of them separately. It
inherits from the TestStateSaver and adds the counters for calls of functions.
"""
@test_util.run_v1_only("b/124229375")
def __init__(self, batch_size, state_size):
super(TestStateSaverWithCounters, self).__init__(batch_size, state_size)
self._num_state_calls = variables_lib.VariableV1(0)
self._num_save_state_calls = variables_lib.VariableV1(0)
def state(self, name):
with ops.control_dependencies(
[state_ops.assign_add(self._num_state_calls, 1)]):
return super(TestStateSaverWithCounters, self).state(name)
def save_state(self, name, state):
with ops.control_dependencies([state_ops.assign_add(
self._num_save_state_calls, 1)]):
return super(TestStateSaverWithCounters, self).save_state(name, state)
@property
def num_state_calls(self):
return self._num_state_calls
@property
def num_save_state_calls(self):
return self._num_save_state_calls
class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_v1_only("b/124229375")
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=4)
@test_util.run_v1_only("b/124229375")
def testRNN(self):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape(), inp.get_shape())
self.assertEqual(out.dtype, inp.dtype)
with self.session(use_gpu=True) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
# Outputs
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
# Final state
self.assertAllClose(values[-1],
max_length * np.ones(
(batch_size, input_size), dtype=np.float32))
@test_util.run_v1_only("b/124229375")
def testDropout(self):
cell = Plus1RNNCell()
full_dropout_cell = rnn_cell.DropoutWrapper(
cell, input_keep_prob=1e-6, seed=0)
(name, dep), = full_dropout_cell._checkpoint_dependencies
self.assertIs(dep, cell)
self.assertEqual("cell", name)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("drop_scope"):
dropped_outputs, _ = rnn.static_rnn(
full_dropout_cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
self.assertEqual(out.dtype, inp.dtype)
with self.session(use_gpu=True) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
full_dropout_values = sess.run(
dropped_outputs, feed_dict={
inputs[0]: input_value
})
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
for d_v in full_dropout_values[:-1]: # Add 1.0 to dropped_out (all zeros)
self.assertAllClose(d_v, np.ones_like(input_value))
@test_util.run_v1_only("b/124229375")
def testDynamicCalculation(self):
cell = Plus1RNNCell()
sequence_length = array_ops.placeholder(dtypes.int64)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("drop_scope"):
dynamic_outputs, dynamic_state = rnn.static_rnn(
cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
self.assertEqual(len(dynamic_outputs), len(inputs))
with self.session(use_gpu=True) as sess:
input_value = np.random.randn(batch_size, input_size)
dynamic_values = sess.run(
dynamic_outputs,
feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
dynamic_state_value = sess.run(
[dynamic_state],
feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
# outputs are fully calculated for t = 0, 1
for v in dynamic_values[:2]:
self.assertAllClose(v, input_value + 1.0)
# outputs at t = 2 are zero for entry 0, calculated for entry 1
self.assertAllClose(dynamic_values[2],
np.vstack((np.zeros((input_size)),
1.0 + input_value[1, :])))
# outputs at t = 3+ are zero
for v in dynamic_values[3:]:
self.assertAllEqual(v, np.zeros_like(input_value))
# the final states are:
# entry 0: the values from the calculation at t=1
# entry 1: the values from the calculation at t=2
self.assertAllEqual(dynamic_state_value[0],
np.vstack((1.0 * (1 + 1) * np.ones((input_size)),
1.0 * (2 + 1) * np.ones((input_size)))))
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.session(use_gpu=True, graph=ops.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
@test_util.run_v1_only("b/124229375")
def testScope(self):
def factory(scope):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
return rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class LSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testDType(self):
# Test case for GitHub issue 16228
# Not passing dtype in constructor results in default float32
lstm = rnn_cell.LSTMCell(10)
input_tensor = array_ops.ones([10, 50])
lstm.build(input_tensor.get_shape())
self.assertEqual(lstm._bias.dtype.base_dtype, dtypes.float32)
# Explicitly pass dtype in constructor
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
lstm = rnn_cell.LSTMCell(10, dtype=dtype)
input_tensor = array_ops.ones([10, 50])
lstm.build(input_tensor.get_shape())
self.assertEqual(lstm._bias.dtype.base_dtype, dtype)
@test_util.run_v1_only("b/124229375")
def testNoProjNoSharding(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
@test_util.run_v1_only("b/124229375")
def testCellClipping(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
cell_clip=0.0,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
for value in values:
# if cell c is clipped to 0, tanh(c) = 0 => m==0
self.assertAllEqual(value, np.zeros((batch_size, num_units)))
@test_util.run_v1_only("b/124229375")
def testNoProjNoShardingSimpleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, 2 * num_units)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name="save_lstm")
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
(last_state_value, saved_state_value) = sess.run(
[state, state_saver.saved_state["save_lstm"]],
feed_dict={
inputs[0]: input_value
})
self.assertAllEqual(last_state_value, saved_state_value)
@test_util.run_v1_only("b/124229375")
def testNoProjNoShardingTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.session(graph=ops.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, num_units)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=True)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=("c", "m"))
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
last_and_saved_states = sess.run(
state + (state_saver.saved_state["c"], state_saver.saved_state["m"]),
feed_dict={
inputs[0]: input_value
})
self.assertEqual(4, len(last_and_saved_states))
self.assertAllEqual(last_and_saved_states[:2], last_and_saved_states[2:])
@test_util.run_v1_only("b/124229375")
def testNoProjNoShardingNestedTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.session(graph=ops.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(
batch_size, {
"c0": num_units,
"m0": num_units,
"c1": num_units + 1,
"m1": num_units + 1,
"c2": num_units + 2,
"m2": num_units + 2,
"c3": num_units + 3,
"m3": num_units + 3
})
def _cell(i):
return rnn_cell.LSTMCell(
num_units + i,
use_peepholes=False,
initializer=initializer,
state_is_tuple=True)
# This creates a state tuple which has 4 sub-tuples of length 2 each.
cell = rnn_cell.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
state_names = (("c0", "m0"), ("c1", "m1"), ("c2", "m2"), ("c3", "m3"))
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=state_names)
self.assertEqual(len(outputs), len(inputs))
# Final output comes from _cell(3) which has state size num_units + 3
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units + 3])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
last_states = sess.run(
list(nest.flatten(state)), feed_dict={
inputs[0]: input_value
})
saved_states = sess.run(
list(state_saver.saved_state.values()),
feed_dict={
inputs[0]: input_value
})
self.assertEqual(8, len(last_states))
self.assertEqual(8, len(saved_states))
flat_state_names = nest.flatten(state_names)
named_saved_states = dict(
zip(state_saver.saved_state.keys(), saved_states))
for i in range(8):
self.assertAllEqual(last_states[i],
named_saved_states[flat_state_names[i]])
@test_util.run_v1_only("b/124229375")
def testProjNoSharding(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testStateTupleWithProjAndSequenceLength(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
with self.session(graph=ops.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell_notuple = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
cell_tuple = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=True)
with variable_scope.variable_scope("root") as scope:
outputs_notuple, state_notuple = rnn.static_rnn(
cell_notuple,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
# TODO(ebrevdo): For this test, we ensure values are identical and
# therefore the weights here are tied. In the future, we may consider
# making the state_is_tuple property mutable so we can avoid
# having to do this - especially if users ever need to reuse
# the parameters from different RNNCell instances. Right now,
# this seems an unrealistic use case except for testing.
cell_tuple._scope = cell_notuple._scope # pylint: disable=protected-access
outputs_tuple, state_tuple = rnn.static_rnn(
cell_tuple,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs_notuple), len(inputs))
self.assertEqual(len(outputs_tuple), len(inputs))
self.assertTrue(isinstance(state_tuple, tuple))
self.assertTrue(isinstance(state_notuple, ops.Tensor))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
outputs_notuple_v = sess.run(
outputs_notuple, feed_dict={
inputs[0]: input_value
})
outputs_tuple_v = sess.run(
outputs_tuple, feed_dict={
inputs[0]: input_value
})
self.assertAllEqual(outputs_notuple_v, outputs_tuple_v)
(state_notuple_v,) = sess.run(
(state_notuple,), feed_dict={
inputs[0]: input_value
})
state_tuple_v = sess.run(state_tuple, feed_dict={inputs[0]: input_value})
self.assertAllEqual(state_notuple_v, np.hstack(state_tuple_v))
@test_util.run_v1_only("b/124229375")
def testProjSharding(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
@test_util.run_v1_only("b/124229375")
def testDoubleInput(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float64, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(
cell,
inputs,
initial_state=cell.zero_state(batch_size, dtypes.float64))
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.asarray(
np.random.randn(batch_size, input_size), dtype=np.float64)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
self.assertEqual(values[0].dtype, input_value.dtype)
@test_util.run_v1_only("b/124229375")
def testShardNoShardEquivalentOutput(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
initializer = init_ops.constant_initializer(0.001)
cell_noshard = rnn_cell.LSTMCell(
num_units,
num_proj=num_proj,
use_peepholes=True,
initializer=initializer,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
state_is_tuple=False)
cell_shard = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("noshard_scope"):
outputs_noshard, state_noshard = rnn.static_rnn(
cell_noshard, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("shard_scope"):
outputs_shard, state_shard = rnn.static_rnn(
cell_shard, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs_noshard), len(inputs))
self.assertEqual(len(outputs_noshard), len(outputs_shard))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
feeds = dict((x, input_value) for x in inputs)
values_noshard = sess.run(outputs_noshard, feed_dict=feeds)
values_shard = sess.run(outputs_shard, feed_dict=feeds)
state_values_noshard = sess.run([state_noshard], feed_dict=feeds)
state_values_shard = sess.run([state_shard], feed_dict=feeds)
self.assertEqual(len(values_noshard), len(values_shard))
self.assertEqual(len(state_values_noshard), len(state_values_shard))
for (v_noshard, v_shard) in zip(values_noshard, values_shard):
self.assertAllClose(v_noshard, v_shard, atol=1e-3)
for (s_noshard, s_shard) in zip(state_values_noshard, state_values_shard):
self.assertAllClose(s_noshard, s_shard, atol=1e-3)
@test_util.run_v1_only("b/124229375")
def testDoubleInputWithDropoutAndDynamicCalculation(self):
"""Smoke test for using LSTM with doubles, dropout, dynamic calculation."""
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
sequence_length = array_ops.placeholder(dtypes.int64)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float64, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
dropout_cell = rnn_cell.DropoutWrapper(cell, 0.5, seed=0)
outputs, state = rnn.static_rnn(
dropout_cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float64))
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run(feed_dict={
sequence_length: [2, 3]
})
input_value = np.asarray(
np.random.randn(batch_size, input_size), dtype=np.float64)
values = sess.run(
outputs, feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
state_value = sess.run(
[state], feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
self.assertEqual(values[0].dtype, input_value.dtype)
self.assertEqual(state_value[0].dtype, input_value.dtype)
@test_util.run_v1_only("b/124229375")
def testSharingWeightsWithReuse(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.session(graph=ops.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
initializer_d = init_ops.random_uniform_initializer(
-1, 1, seed=self._seed + 1)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
cell_d = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer_d,
state_is_tuple=False)
with variable_scope.variable_scope("share_scope"):
outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("share_scope", reuse=True):
outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("diff_scope"):
outputs2, _ = rnn.static_rnn(cell_d, inputs, dtype=dtypes.float32)
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1 + outputs2, feed_dict={
inputs[0]: input_value
})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:2 * max_length]
outputs2_values = output_values[2 * max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
self.assertEqual(len(outputs0_values), len(outputs2_values))
for o1, o2, o3 in zip(outputs0_values, outputs1_values, outputs2_values):
# Same weights used by both RNNs so outputs should be the same.
self.assertAllEqual(o1, o2)
# Different weights used so outputs should be different.
self.assertTrue(np.linalg.norm(o1 - o3) > 1e-6)
@test_util.run_v1_only("b/124229375")
def testSharingWeightsWithDifferentNamescope(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.session(graph=ops.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
with ops.name_scope("scope0"):
with variable_scope.variable_scope("share_scope"):
outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with ops.name_scope("scope1"):
with variable_scope.variable_scope("share_scope", reuse=True):
outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1, feed_dict={
inputs[0]: input_value
})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
for out0, out1 in zip(outputs0_values, outputs1_values):
self.assertAllEqual(out0, out1)
@test_util.run_v1_only("b/124229375")
def testDynamicRNNAllowsUnknownTimeDimension(self):
inputs = array_ops.placeholder(dtypes.float32, shape=[1, None, 20])
cell = rnn_cell.GRUCell(30)
# Smoke test, this should not raise an error
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
@test_util.run_in_graph_and_eager_modes
def testDynamicRNNWithTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
in_graph_mode = not context.executing_eagerly()
with self.session(graph=ops.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
if in_graph_mode:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
else:
inputs = max_length * [
constant_op.constant(
np.random.randn(batch_size, input_size).astype(np.float32))
]
inputs_c = array_ops.stack(inputs)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=True)
with variable_scope.variable_scope("root") as scope:
outputs_static, state_static = rnn.static_rnn(
cell,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length,
scope=scope)
self.assertTrue(isinstance(state_static, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(state_dynamic, rnn_cell.LSTMStateTuple))
self.assertEqual(state_static[0], state_static.c)
self.assertEqual(state_static[1], state_static.h)
self.assertEqual(state_dynamic[0], state_dynamic.c)
self.assertEqual(state_dynamic[1], state_dynamic.h)
if in_graph_mode:
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
outputs_static = sess.run(
outputs_static, feed_dict={
inputs[0]: input_value
})
outputs_dynamic = sess.run(
outputs_dynamic, feed_dict={
inputs[0]: input_value
})
state_static = sess.run(
state_static, feed_dict={
inputs[0]: input_value
})
state_dynamic = sess.run(
state_dynamic, feed_dict={
inputs[0]: input_value
})
if in_graph_mode:
self.assertAllEqual(outputs_static, outputs_dynamic)
else:
self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic)
self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic))
@test_util.run_in_graph_and_eager_modes
def testDynamicRNNWithNestedTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
in_graph_mode = not context.executing_eagerly()
with self.session(graph=ops.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
if in_graph_mode:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
else:
inputs = max_length * [
constant_op.constant(
np.random.randn(batch_size, input_size).astype(np.float32))
]
inputs_c = array_ops.stack(inputs)
def _cell(i):
return rnn_cell.LSTMCell(
num_units + i,
use_peepholes=True,
num_proj=num_proj + i,
initializer=initializer,
state_is_tuple=True)
# This creates a state tuple which has 4 sub-tuples of length 2 each.
cell = rnn_cell.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
test_zero = cell.zero_state(1, dtypes.float32)
self.assertEqual(len(test_zero), 4)
for i in range(4):
self.assertEqual(test_zero[i][0].get_shape()[1], cell.state_size[i][0])
self.assertEqual(test_zero[i][1].get_shape()[1], cell.state_size[i][1])
with variable_scope.variable_scope("root") as scope:
outputs_static, state_static = rnn.static_rnn(
cell,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length,
scope=scope)
if in_graph_mode:
input_value = np.random.randn(batch_size, input_size)
variables_lib.global_variables_initializer().run()
outputs_static = sess.run(
outputs_static, feed_dict={
inputs[0]: input_value
})
outputs_dynamic = sess.run(
outputs_dynamic, feed_dict={
inputs[0]: input_value
})
state_static = sess.run(
nest.flatten(state_static), feed_dict={
inputs[0]: input_value
})
state_dynamic = sess.run(
nest.flatten(state_dynamic), feed_dict={
inputs[0]: input_value
})
if in_graph_mode:
self.assertAllEqual(outputs_static, outputs_dynamic)
else:
self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic)
state_static = nest.flatten(state_static)
state_dynamic = nest.flatten(state_dynamic)
self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic))
def _testDynamicEquivalentToStaticRNN(self, use_sequence_length):
time_steps = 8
num_units = 3
num_proj = 4
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size).astype(
np.float32)
if use_sequence_length:
sequence_length = np.random.randint(0, time_steps, size=batch_size)
else:
sequence_length = None
in_graph_mode = not context.executing_eagerly()
# TODO(b/68017812): Eager ignores operation seeds, so we need to create a
# single cell and reuse it across the static and dynamic RNNs. Remove this
# special case once is fixed.
if not in_graph_mode:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
########### Step 1: Run static graph and generate readouts
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
if in_graph_mode:
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
else:
concat_inputs = constant_op.constant(input_values)
inputs = array_ops.unstack(concat_inputs)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
# TODO(akshayka): Remove special case once b/68017812 is fixed.
if in_graph_mode:
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("dynamic_scope"):
outputs_static, state_static = rnn.static_rnn(
cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
if in_graph_mode:
# Generate gradients and run sessions to obtain outputs
feeds = {concat_inputs: input_values}
# Initialize
variables_lib.global_variables_initializer().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
static_gradients = gradients_impl.gradients(
outputs_static + [state_static], [concat_inputs])
# Generate gradients of individual outputs w.r.t. inputs
static_individual_gradients = nest.flatten([
gradients_impl.gradients(y, [concat_inputs])
for y in [outputs_static[0], outputs_static[-1], state_static]
])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
# pylint: disable=bad-builtin
static_individual_variable_gradients = nest.flatten([
gradients_impl.gradients(y, trainable_variables)
for y in [outputs_static[0], outputs_static[-1], state_static]
])
# Test forward pass
values_static = sess.run(outputs_static, feed_dict=feeds)
(state_value_static,) = sess.run((state_static,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
static_grad_values = sess.run(static_gradients, feed_dict=feeds)
static_individual_grad_values = sess.run(
static_individual_gradients, feed_dict=feeds)
static_individual_var_grad_values = sess.run(
static_individual_variable_gradients, feed_dict=feeds)
########## Step 2: Run dynamic graph and generate readouts
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
if in_graph_mode:
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
else:
concat_inputs = constant_op.constant(input_values)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
# TODO(akshayka): Remove this special case once b/68017812 is
# fixed.
if in_graph_mode:
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
split_outputs_dynamic = array_ops.unstack(outputs_dynamic, time_steps)
if in_graph_mode:
feeds = {concat_inputs: input_values}
# Initialize
variables_lib.global_variables_initializer().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
dynamic_gradients = gradients_impl.gradients(
split_outputs_dynamic + [state_dynamic], [concat_inputs])
# Generate gradients of several individual outputs w.r.t. inputs
dynamic_individual_gradients = nest.flatten([
gradients_impl.gradients(y, [concat_inputs])
for y in [
split_outputs_dynamic[0], split_outputs_dynamic[-1],
state_dynamic
]
])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
dynamic_individual_variable_gradients = nest.flatten([
gradients_impl.gradients(y, trainable_variables)
for y in [
split_outputs_dynamic[0], split_outputs_dynamic[-1],
state_dynamic
]
])
# Test forward pass
values_dynamic = sess.run(split_outputs_dynamic, feed_dict=feeds)
(state_value_dynamic,) = sess.run((state_dynamic,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
dynamic_grad_values = sess.run(dynamic_gradients, feed_dict=feeds)
dynamic_individual_grad_values = sess.run(
dynamic_individual_gradients, feed_dict=feeds)
dynamic_individual_var_grad_values = sess.run(
dynamic_individual_variable_gradients, feed_dict=feeds)
######### Step 3: Comparisons
if not in_graph_mode:
values_static = outputs_static
values_dynamic = split_outputs_dynamic
state_value_static = state_static
state_value_dynamic = state_dynamic
self.assertEqual(len(values_static), len(values_dynamic))
for (value_static, value_dynamic) in zip(values_static, values_dynamic):
self.assertAllClose(value_static, value_dynamic)
self.assertAllClose(state_value_static, state_value_dynamic)
if in_graph_mode:
self.assertAllClose(static_grad_values, dynamic_grad_values)
self.assertEqual(
len(static_individual_grad_values),
len(dynamic_individual_grad_values))
self.assertEqual(
len(static_individual_var_grad_values),
len(dynamic_individual_var_grad_values))
for i, (a, b) in enumerate(
zip(static_individual_grad_values, dynamic_individual_grad_values)):
tf_logging.info("Comparing individual gradients iteration %d" % i)
self.assertAllClose(a, b)
for i, (a, b) in enumerate(
zip(static_individual_var_grad_values,
dynamic_individual_var_grad_values)):
tf_logging.info(
"Comparing individual variable gradients iteration %d" % i)
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testDynamicEquivalentToStaticRNN(self):
self._testDynamicEquivalentToStaticRNN(use_sequence_length=False)
@test_util.run_in_graph_and_eager_modes
def testDynamicEquivalentToStaticRNNWithSequenceLength(self):
self._testDynamicEquivalentToStaticRNN(use_sequence_length=True)
class BidirectionalRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _createBidirectionalRNN(self, use_shape, use_sequence_length, scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = array_ops.placeholder(
dtypes.int64) if use_sequence_length else None
cell_fw = rnn_cell.LSTMCell(
num_units, input_size, initializer=initializer, state_is_tuple=False)
cell_bw = rnn_cell.LSTMCell(
num_units, input_size, initializer=initializer, state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
outputs, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell_fw,
cell_bw,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(),
[batch_size if use_shape else None, 2 * num_units])
input_value = np.random.randn(batch_size, input_size)
outputs = array_ops.stack(outputs)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalRNN(self, use_shape):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalRNN(use_shape, True))
variables_lib.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw],
feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# First sequence in batch is length=2
# Check that the time=0 forward output is equal to time=1 backward output
self.assertAllClose(out[0][0][0], out[1][0][3])
self.assertAllClose(out[0][0][1], out[1][0][4])
self.assertAllClose(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is equal to time=0 backward output
self.assertAllClose(out[1][0][0], out[0][0][3])
self.assertAllClose(out[1][0][1], out[0][0][4])
self.assertAllClose(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is equal to time=2 backward output
self.assertAllClose(out[0][1][0], out[2][1][3])
self.assertAllClose(out[0][1][1], out[2][1][4])
self.assertAllClose(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is equal to time=1 backward output
self.assertAllClose(out[1][1][0], out[1][1][3])
self.assertAllClose(out[1][1][1], out[1][1][4])
self.assertAllClose(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is equal to time=0 backward output
self.assertAllClose(out[2][1][0], out[0][1][3])
self.assertAllClose(out[2][1][1], out[0][1][4])
self.assertAllClose(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def _testBidirectionalRNNWithoutSequenceLength(self, use_shape):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, _ = (
self._createBidirectionalRNN(use_shape, False))
variables_lib.global_variables_initializer().run()
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw], feed_dict={
inputs[0]: input_value
})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# Both sequences in batch are length=8. Check that the time=i
# forward output is equal to time=8-1-i backward output
for i in range(8):
self.assertAllClose(out[i][0][0:3], out[8 - 1 - i][0][3:6])
self.assertAllClose(out[i][1][0:3], out[8 - 1 - i][1][3:6])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
@test_util.run_v1_only("b/124229375")
def testBidirectionalRNN(self):
self._testBidirectionalRNN(use_shape=False)
self._testBidirectionalRNN(use_shape=True)
@test_util.run_v1_only("b/124229375")
def testBidirectionalRNNWithoutSequenceLength(self):
self._testBidirectionalRNNWithoutSequenceLength(use_shape=False)
self._testBidirectionalRNNWithoutSequenceLength(use_shape=True)
def _createBidirectionalDynamicRNN(self,
use_shape,
use_state_tuple,
use_time_major,
use_sequence_length,
scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = (
array_ops.placeholder(dtypes.int64) if use_sequence_length else None)
cell_fw = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=use_state_tuple)
cell_bw = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=use_state_tuple)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size if use_shape else None, input_size))
]
inputs_c = array_ops.stack(inputs)
if not use_time_major:
inputs_c = array_ops.transpose(inputs_c, [1, 0, 2])
outputs, states = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs_c,
sequence_length,
dtype=dtypes.float32,
time_major=use_time_major,
scope=scope)
outputs = array_ops.concat(outputs, 2)
state_fw, state_bw = states
outputs_shape = [None, max_length, 2 * num_units]
if use_shape:
outputs_shape[0] = batch_size
if use_time_major:
outputs_shape[0], outputs_shape[1] = outputs_shape[1], outputs_shape[0]
self.assertEqual(outputs.get_shape().as_list(), outputs_shape)
input_value = np.random.randn(batch_size, input_size)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalDynamicRNN(self, use_shape, use_state_tuple,
use_time_major, use_sequence_length):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalDynamicRNN(
use_shape, use_state_tuple, use_time_major, use_sequence_length))
variables_lib.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
feed_dict = ({sequence_length: [2, 3]} if use_sequence_length else {})
feed_dict.update({inputs[0]: input_value})
if use_state_tuple:
out, c_fw, m_fw, c_bw, m_bw = sess.run(
[outputs, state_fw[0], state_fw[1], state_bw[0], state_bw[1]],
feed_dict=feed_dict)
s_fw = (c_fw, m_fw)
s_bw = (c_bw, m_bw)
else:
feed_dict.update({inputs[0]: input_value})
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw], feed_dict=feed_dict)
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
if not use_time_major:
out = np.swapaxes(out, 0, 1)
if use_sequence_length:
# First sequence in batch is length=2
# Check that the t=0 forward output is equal to t=1 backward output
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
# Check that the t=1 forward output is equal to t=0 backward output
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the t=0 forward output is equal to t=2 backward output
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
# Check that the t=1 forward output is equal to t=1 backward output
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
# Check that the t=2 forward output is equal to t=0 backward output
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should
# be exactly the same
self.assertAllClose(s_fw, s_bw)
else: # not use_sequence_length
max_length = 8 # from createBidirectionalDynamicRNN
for t in range(max_length):
self.assertAllEqual(out[t, :, 0:3], out[max_length - t - 1, :, 3:6])
self.assertAllClose(s_fw, s_bw)
@test_util.run_v1_only("b/124229375")
def testBidirectionalDynamicRNN(self):
# Generate 2^5 option values
# from [True, True, True, True, True] to [False, False, False, False, False]
options = itertools.product([True, False], repeat=4)
for option in options:
self._testBidirectionalDynamicRNN(
use_shape=option[0],
use_state_tuple=option[1],
use_time_major=option[2],
use_sequence_length=option[3])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
# REMARKS: factory(scope) is a function accepting a scope
# as an argument, such scope can be None, a string
# or a VariableScope instance.
with self.session(use_gpu=True, graph=ops.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "bidirectional_rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("BiRNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
@test_util.run_v1_only("b/124229375")
def testBidirectionalRNNScope(self):
def factory(scope):
return self._createBidirectionalRNN(
use_shape=True, use_sequence_length=True, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
@test_util.run_v1_only("b/124229375")
def testBidirectionalDynamicRNNScope(self):
def get_factory(use_time_major):
def factory(scope):
return self._createBidirectionalDynamicRNN(
use_shape=True,
use_state_tuple=True,
use_sequence_length=True,
use_time_major=use_time_major,
scope=scope)
return factory
self._testScope(get_factory(True), use_outer_scope=True)
self._testScope(get_factory(True), use_outer_scope=False)
self._testScope(get_factory(True), prefix=None, use_outer_scope=False)
self._testScope(get_factory(False), use_outer_scope=True)
self._testScope(get_factory(False), use_outer_scope=False)
self._testScope(get_factory(False), prefix=None, use_outer_scope=False)
class MultiDimensionalLSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_v1_only("b/124229375")
def testMultiDimensionalLSTMAllRNNContainers(self):
feature_dims = (3, 4, 5)
input_size = feature_dims
batch_size = 2
max_length = 8
sequence_length = [4, 6]
with self.session(graph=ops.Graph()) as sess:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None,) + input_size)
]
inputs_using_dim = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size,) + input_size)
]
inputs_c = array_ops.stack(inputs)
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = DummyMultiDimensionalLSTM(feature_dims)
state_saver = TestStateSaver(batch_size, input_size)
outputs_static, state_static = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length)
outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell,
cell,
inputs_using_dim,
dtype=dtypes.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = rnn.static_state_saving_rnn(
cell,
inputs_using_dim,
sequence_length=sequence_length,
state_saver=state_saver,
state_name=("h", "c"))
self.assertEqual(outputs_dynamic.get_shape().as_list(),
inputs_c.get_shape().as_list())
for out, inp in zip(outputs_static, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
for out, inp in zip(outputs_bid, inputs_using_dim):
input_shape_list = inp.get_shape().as_list()
# fwd and bwd activations are concatenated along the second dim.
input_shape_list[1] *= 2
self.assertEqual(out.get_shape().as_list(), input_shape_list)
variables_lib.global_variables_initializer().run()
input_total_size = (batch_size,) + input_size
input_value = np.random.randn(*input_total_size)
outputs_static_v = sess.run(
outputs_static, feed_dict={
inputs[0]: input_value
})
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={
inputs[0]: input_value
})
outputs_bid_v = sess.run(
outputs_bid, feed_dict={
inputs_using_dim[0]: input_value
})
outputs_sav_v = sess.run(
outputs_sav, feed_dict={
inputs_using_dim[0]: input_value
})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=2)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_static_v = sess.run(
state_static, feed_dict={
inputs[0]: input_value
})
state_dynamic_v = sess.run(
state_dynamic, feed_dict={
inputs[0]: input_value
})
state_bid_fw_v = sess.run(
state_fw, feed_dict={
inputs_using_dim[0]: input_value
})
state_bid_bw_v = sess.run(
state_bw, feed_dict={
inputs_using_dim[0]: input_value
})
state_sav_v = sess.run(
state_sav, feed_dict={
inputs_using_dim[0]: input_value
})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))
class NestedLSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_v1_only("b/124229375")
def testNestedIOLSTMAllRNNContainers(self):
input_size = 5
batch_size = 2
state_size = 6
max_length = 8
sequence_length = [4, 6]
with self.session(graph=ops.Graph()) as sess:
state_saver = TestStateSaver(batch_size, state_size)
single_input = (array_ops.placeholder(
dtypes.float32, shape=(None, input_size)),
array_ops.placeholder(
dtypes.float32, shape=(None, input_size)))
inputs = max_length * [single_input]
inputs_c = (array_ops.stack([input_[0] for input_ in inputs]),
array_ops.stack([input_[1] for input_ in inputs]))
single_input_using_dim = (array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size)),
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size)))
inputs_using_dim = max_length * [single_input_using_dim]
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = NestedRNNCell()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length)
outputs_static, state_static = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell,
cell,
inputs_using_dim,
dtype=dtypes.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = rnn.static_state_saving_rnn(
cell,
inputs_using_dim,
sequence_length=sequence_length,
state_saver=state_saver,
state_name=("h", "c"))
def _assert_same_shape(input1, input2, double=False):
flat_input1 = nest.flatten(input1)
flat_input2 = nest.flatten(input2)
for inp1, inp2 in zip(flat_input1, flat_input2):
input_shape = inp1.get_shape().as_list()
if double:
input_shape[1] *= 2
self.assertEqual(input_shape, inp2.get_shape().as_list())
_assert_same_shape(inputs_c, outputs_dynamic)
_assert_same_shape(inputs, outputs_static)
_assert_same_shape(inputs_using_dim, outputs_sav)
_assert_same_shape(inputs_using_dim, outputs_bid, double=True)
variables_lib.global_variables_initializer().run()
input_total_size = (batch_size, input_size)
input_value = (np.random.randn(*input_total_size),
np.random.randn(*input_total_size))
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={
single_input: input_value
})
outputs_static_v = sess.run(
outputs_static, feed_dict={
single_input: input_value
})
outputs_sav_v = sess.run(
outputs_sav, feed_dict={
single_input_using_dim: input_value
})
outputs_bid_v = sess.run(
outputs_bid, feed_dict={
single_input_using_dim: input_value
})
self.assertAllEqual(outputs_static_v,
np.transpose(outputs_dynamic_v, (1, 0, 2, 3)))
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=3)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_dynamic_v = sess.run(
state_dynamic, feed_dict={
single_input: input_value
})
state_static_v = sess.run(
state_static, feed_dict={
single_input: input_value
})
state_bid_fw_v = sess.run(
state_fw, feed_dict={
single_input_using_dim: input_value
})
state_bid_bw_v = sess.run(
state_bw, feed_dict={
single_input_using_dim: input_value
})
state_sav_v = sess.run(
state_sav, feed_dict={
single_input_using_dim: input_value
})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))
class StateSaverRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _factory(self, scope, state_saver):
num_units = state_saver.state_size // 2
batch_size = state_saver.batch_size
input_size = 5
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.zeros(dtype=dtypes.float32, shape=(batch_size, input_size))
]
out, state = rnn.static_state_saving_rnn(
cell,
inputs,
state_saver=state_saver,
state_name="save_lstm",
scope=scope)
return out, state, state_saver
def _testScope(self, prefix="prefix", use_outer_scope=True):
num_units = 3
batch_size = 2
state_saver = TestStateSaver(batch_size, 2 * num_units)
with self.session(use_gpu=True, graph=ops.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
self._factory(scope=scope, state_saver=state_saver)
else:
self._factory(scope=prefix, state_saver=state_saver)
variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testStateSaverRNNScope(self):
self._testScope(use_outer_scope=True)
self._testScope(use_outer_scope=False)
self._testScope(prefix=None, use_outer_scope=False)
def testStateSaverCallsSaveState(self):
"""Test that number of calls to state and save_state is equal.
Test if the order of actual evaluating or skipping evaluation of out,
state tensors, which are the output tensors from static_state_saving_rnn,
have influence on number of calls to save_state and state methods of
state_saver object (the number of calls should be same.)
"""
self.skipTest("b/124196246 Breakage for sess.run([out, ...]): 2 != 1")
num_units = 3
batch_size = 2
state_saver = TestStateSaverWithCounters(batch_size, 2 * num_units)
out, state, state_saver = self._factory(scope=None, state_saver=state_saver)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
sess.run(variables_lib.local_variables_initializer())
_, _, num_state_calls, num_save_state_calls = sess.run([
out,
state,
state_saver.num_state_calls,
state_saver.num_save_state_calls])
self.assertEqual(num_state_calls, num_save_state_calls)
_, num_state_calls, num_save_state_calls = sess.run([
out,
state_saver.num_state_calls,
state_saver.num_save_state_calls])
self.assertEqual(num_state_calls, num_save_state_calls)
_, num_state_calls, num_save_state_calls = sess.run([
state,
state_saver.num_state_calls,
state_saver.num_save_state_calls])
self.assertEqual(num_state_calls, num_save_state_calls)
class GRUTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_v1_only("b/124229375")
def testDynamic(self):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
cell = rnn_cell.GRUCell(num_units=num_units)
with variable_scope.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
feeds = {concat_inputs: input_values}
# Initialize
variables_lib.global_variables_initializer().run(feed_dict=feeds)
sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.session(use_gpu=True, graph=ops.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
@test_util.run_v1_only("b/124229375")
def testDynamicScope(self):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
sequence_length = np.random.randint(0, time_steps, size=batch_size)
def factory(scope):
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
cell = rnn_cell.GRUCell(num_units=num_units)
return rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32,
scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class RawRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_v1_only("b/124229375")
def _testRawRNN(self, max_time):
with self.session(graph=ops.Graph()) as sess:
batch_size = 16
input_depth = 4
num_units = 3
inputs = array_ops.placeholder(
shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
sequence_length = array_ops.placeholder(
shape=(batch_size,), dtype=dtypes.int32)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state # copy state through
elements_finished = (time_ >= sequence_length)
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
reuse_scope = variable_scope.get_variable_scope()
outputs_ta, final_state, _ = rnn.raw_rnn(cell, loop_fn, scope=reuse_scope)
outputs = outputs_ta.stack()
reuse_scope.reuse_variables()
outputs_dynamic_rnn, final_state_dynamic_rnn = rnn.dynamic_rnn(
cell,
inputs,
time_major=True,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=reuse_scope)
variables = variables_lib.trainable_variables()
gradients = gradients_impl.gradients([outputs, final_state],
[inputs] + variables)
gradients_dynamic_rnn = gradients_impl.gradients(
[outputs_dynamic_rnn, final_state_dynamic_rnn], [inputs] + variables)
variables_lib.global_variables_initializer().run()
rand_input = np.random.randn(max_time, batch_size, input_depth)
if max_time == 0:
rand_seq_len = np.zeros(batch_size)
else:
rand_seq_len = np.random.randint(max_time, size=batch_size)
# To ensure same output lengths for dynamic_rnn and raw_rnn
rand_seq_len[0] = max_time
(outputs_val, outputs_dynamic_rnn_val, final_state_val,
final_state_dynamic_rnn_val) = sess.run(
[outputs, outputs_dynamic_rnn, final_state, final_state_dynamic_rnn],
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
self.assertAllClose(outputs_dynamic_rnn_val, outputs_val)
self.assertAllClose(final_state_dynamic_rnn_val, final_state_val)
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So
# this case skips the gradients test.
if max_time > 0:
self.assertEqual(len(gradients), len(gradients_dynamic_rnn))
gradients_val = sess.run(
gradients,
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
gradients_dynamic_rnn_val = sess.run(
gradients_dynamic_rnn,
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
self.assertEqual(len(gradients_val), len(gradients_dynamic_rnn_val))
input_gradients_val = gradients_val[0]
input_gradients_dynamic_rnn_val = gradients_dynamic_rnn_val[0]
self.assertAllClose(input_gradients_val,
input_gradients_dynamic_rnn_val)
for i in range(1, len(gradients_val)):
self.assertAllClose(gradients_dynamic_rnn_val[i], gradients_val[i])
@test_util.run_v1_only("b/124229375")
def testRawRNNZeroLength(self):
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So this
# case skips the gradients test.
self._testRawRNN(max_time=0)
def testRawRNN(self):
self._testRawRNN(max_time=10)
@test_util.run_v1_only("b/124229375")
def testLoopState(self):
with self.session(graph=ops.Graph()):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = constant_op.constant([0])
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
loop_state = array_ops.stack([array_ops.squeeze(loop_state) + 1])
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output,
loop_state)
r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
self.assertEqual([10], loop_state.eval())
@test_util.run_v1_only("b/124229375")
def testLoopStateWithTensorArray(self):
with self.session(graph=ops.Graph()):
max_time = 4
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = tensor_array_ops.TensorArray(
dynamic_size=True,
size=0,
dtype=dtypes.int32,
clear_after_read=False)
loop_state = loop_state.write(0, 1)
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
loop_state = loop_state.write(time_,
loop_state.read(time_ - 1) + time_)
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output,
loop_state)
r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
loop_state = loop_state.stack()
self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval())
@test_util.run_v1_only("b/124229375")
def testEmitDifferentStructureThanCellOutput(self):
with self.session(graph=ops.Graph()) as sess:
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
# Verify emit shapes may be unknown by feeding a placeholder that
# determines an emit shape.
unknown_dim = array_ops.placeholder(dtype=dtypes.int32)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, _):
if cell_output is None:
emit_output = (array_ops.zeros([2, 3], dtype=dtypes.int32),
array_ops.zeros([unknown_dim], dtype=dtypes.int64))
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
emit_output = (array_ops.ones([batch_size, 2, 3], dtype=dtypes.int32),
array_ops.ones(
[batch_size, unknown_dim], dtype=dtypes.int64))
next_state = cell_state
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
r = rnn.raw_rnn(cell, loop_fn)
output_ta = r[0]
self.assertEqual(2, len(output_ta))
self.assertEqual([dtypes.int32, dtypes.int64],
[ta.dtype for ta in output_ta])
output = [ta.stack() for ta in output_ta]
output_vals = sess.run(output, feed_dict={unknown_dim: 1})
self.assertAllEqual(
np.ones((max_time, batch_size, 2, 3), np.int32), output_vals[0])
self.assertAllEqual(
np.ones((max_time, batch_size, 1), np.int64), output_vals[1])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.session(use_gpu=True, graph=ops.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
@test_util.run_v1_only("b/124229375")
def testRawRNNScope(self):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
def factory(scope):
inputs = array_ops.placeholder(
shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
sequence_length = array_ops.placeholder(
shape=(batch_size,), dtype=dtypes.int32)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state
elements_finished = (time_ >= sequence_length)
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
return rnn.raw_rnn(cell, loop_fn, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class DeviceWrapperCell(rnn_cell.RNNCell):
"""Class to ensure cell calculation happens on a specific device."""
def __init__(self, cell, device):
self._cell = cell
self._device = device
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, input_, state, scope=None):
if self._device is not None:
with ops.device(self._device):
return self._cell(input_, state, scope=scope)
else:
return self._cell(input_, state, scope=scope)
class TensorArrayOnCorrectDeviceTest(test.TestCase):
def _execute_rnn_on(self,
rnn_device=None,
cell_device=None,
input_device=None):
batch_size = 3
time_steps = 7
input_size = 5
num_units = 10
cell = rnn_cell.LSTMCell(num_units, use_peepholes=True)
gpu_cell = DeviceWrapperCell(cell, cell_device)
inputs = np.random.randn(batch_size, time_steps, input_size).astype(
np.float32)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
if input_device is not None:
with ops.device(input_device):
inputs = constant_op.constant(inputs)
if rnn_device is not None:
with ops.device(rnn_device):
outputs, _ = rnn.dynamic_rnn(
gpu_cell,
inputs,
sequence_length=sequence_length,
dtype=dtypes.float32)
else:
outputs, _ = rnn.dynamic_rnn(
gpu_cell,
inputs,
sequence_length=sequence_length,
dtype=dtypes.float32)
with self.session(use_gpu=True) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
variables_lib.global_variables_initializer().run()
sess.run(outputs, options=opts, run_metadata=run_metadata)
return run_metadata
def _retrieve_cpu_gpu_stats(self, run_metadata):
cpu_stats = None
gpu_stats = None
step_stats = run_metadata.step_stats
for ds in step_stats.dev_stats:
if "cpu:0" in ds.device[-5:].lower():
cpu_stats = ds.node_stats
if "gpu:0" == ds.device[-5:].lower():
gpu_stats = ds.node_stats
return cpu_stats, gpu_stats
@test_util.run_v1_only("b/124229375")
def testRNNOnCPUCellOnGPU(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# Writes happen at output of RNN cell
_assert_in("TensorArrayWrite", gpu_stats, cpu_stats)
# Gather happens on final TensorArray
_assert_in("TensorArrayGather", gpu_stats, cpu_stats)
# Reads happen at input to RNN cell
_assert_in("TensorArrayRead", cpu_stats, gpu_stats)
# Scatters happen to get initial input into TensorArray
_assert_in("TensorArrayScatter", cpu_stats, gpu_stats)
@test_util.run_v1_only("b/124229375")
def testRNNOnCPUCellOnCPU(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device="/cpu:0", input_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# All TensorArray operations happen on CPU
_assert_in("TensorArray", cpu_stats, gpu_stats)
@test_util.run_v1_only("b/124229375")
def testInputOnGPUCellNotDeclared(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(input_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# Everything happens on GPU
_assert_in("TensorArray", gpu_stats, cpu_stats)
class RNNCellTest(test.TestCase, parameterized.TestCase):
@test_util.run_v1_only("b/124229375")
def testBasicRNNCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x: np.array([[1., 1.]]),
m: np.array([[0.1, 0.1]])
})
self.assertEqual(res[0].shape, (1, 2))
@test_util.run_v1_only("b/124229375")
def testBasicRNNCellNotTrainable(self):
with self.cached_session() as sess:
def not_trainable_getter(getter, *args, **kwargs):
kwargs["trainable"] = False
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"root",
initializer=init_ops.constant_initializer(0.5),
custom_getter=not_trainable_getter):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertFalse(cell.trainable_variables)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.non_trainable_variables])
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x: np.array([[1., 1.]]),
m: np.array([[0.1, 0.1]])
})
self.assertEqual(res[0].shape, (1, 2))
@test_util.run_v1_only("b/124229375")
def testGRUCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x: np.array([[1., 1.]]),
m: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.175991, 0.175991]])
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test GRUCell with input_size != num_units.
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.156736, 0.156736]])
@test_util.run_v1_only("b/124229375")
def testBasicLSTMCell(self):
for dtype in [dtypes.float16, dtypes.float32]:
np_dtype = dtype.as_numpy_dtype
with self.session(graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2], dtype=dtype)
m = array_ops.zeros([1, 8], dtype=dtype)
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=False)
self.assertEqual(cell.dtype, None)
self.assertEqual("cell-0", cell._checkpoint_dependencies[0].name)
self.assertEqual("cell-1", cell._checkpoint_dependencies[1].name)
cell.get_config() # Should not throw an error
g, out_m = cell(x, m)
# Layer infers the input type.
self.assertEqual(cell.dtype, dtype.name)
expected_variable_names = [
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME
]
self.assertEqual(expected_variable_names,
[v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, out_m], {
x: np.array([[1., 1.]]),
m: 0.1 * np.ones([1, 8])
})
self.assertEqual(len(res), 2)
variables = variables_lib.global_variables()
self.assertEqual(expected_variable_names, [v.name for v in variables])
# The numbers in results were not calculated, this is just a
# smoke test.
self.assertAllClose(res[0], np.array(
[[0.240, 0.240]], dtype=np_dtype), 1e-2)
expected_mem = np.array(
[[0.689, 0.689, 0.448, 0.448, 0.398, 0.398, 0.240, 0.240]],
dtype=np_dtype)
self.assertAllClose(res[1], expected_mem, 1e-2)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test BasicLSTMCell with input_size != num_units.
x = array_ops.zeros([1, 3], dtype=dtype)
m = array_ops.zeros([1, 4], dtype=dtype)
g, out_m = rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x: np.array([[1., 1., 1.]], dtype=np_dtype),
m: 0.1 * np.ones([1, 4], dtype=np_dtype)
})
self.assertEqual(len(res), 2)
@test_util.run_v1_only("b/124229375")
def testBasicLSTMCellDimension0Error(self):
"""Tests that dimension 0 in both(x and m) shape must be equal."""
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size - 1, state_size])
with self.assertRaises(ValueError):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run(
[g, out_m], {
x: 1 * np.ones([batch_size, input_size]),
m: 0.1 * np.ones([batch_size - 1, state_size])
})
def testBasicLSTMCellStateSizeError(self):
"""Tests that state_size must be num_units * 2."""
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 3 # state_size must be num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run(
[g, out_m], {
x: 1 * np.ones([batch_size, input_size]),
m: 0.1 * np.ones([batch_size, state_size])
})
@test_util.run_v1_only("b/124229375")
def testBasicLSTMCellStateTupleType(self):
with self.cached_session():
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = (array_ops.zeros([1, 2]),) * 2
m1 = (array_ops.zeros([1, 2]),) * 2
cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicLSTMCell(2) for _ in range(2)],
state_is_tuple=True)
self.assertTrue(isinstance(cell.state_size, tuple))
self.assertTrue(
isinstance(cell.state_size[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(
isinstance(cell.state_size[1], rnn_cell_impl.LSTMStateTuple))
# Pass in regular tuples
_, (out_m0, out_m1) = cell(x, (m0, m1))
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
# Pass in LSTMStateTuples
variable_scope.get_variable_scope().reuse_variables()
zero_state = cell.zero_state(1, dtypes.float32)
self.assertTrue(isinstance(zero_state, tuple))
self.assertTrue(isinstance(zero_state[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(zero_state[1], rnn_cell_impl.LSTMStateTuple))
_, (out_m0, out_m1) = cell(x, zero_state)
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
@test_util.run_v1_only("b/124229375")
def testBasicLSTMCellWithStateTuple(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 4])
m1 = array_ops.zeros([1, 4])
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=True)
g, (out_m0, out_m1) = cell(x, (m0, m1))
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m0, out_m1], {
x: np.array([[1., 1.]]),
m0: 0.1 * np.ones([1, 4]),
m1: 0.1 * np.ones([1, 4])
})
self.assertEqual(len(res), 3)
# The numbers in results were not calculated, this is just a smoke test.
# Note, however, these values should match the original
# version having state_is_tuple=False.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem0 = np.array(
[[0.68967271, 0.68967271, 0.44848421, 0.44848421]])
expected_mem1 = np.array(
[[0.39897051, 0.39897051, 0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem0)
self.assertAllClose(res[2], expected_mem1)
@test_util.run_v1_only("b/124229375")
def testLSTMCell(self):
with self.cached_session() as sess:
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
output, state = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[output, state], {
x: np.array([[1., 1.], [2., 2.], [3., 3.]]),
m: 0.1 * np.ones((batch_size, state_size))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1].shape, (batch_size, state_size))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
@test_util.run_v1_only("b/124229375")
def testLSTMCellVariables(self):
with self.cached_session():
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
cell(x, m) # Execute to create variables
variables = variables_lib.global_variables()
self.assertEquals(variables[0].op.name, "root/lstm_cell/kernel")
self.assertEquals(variables[1].op.name, "root/lstm_cell/bias")
self.assertEquals(variables[2].op.name,
"root/lstm_cell/projection/kernel")
@test_util.run_in_graph_and_eager_modes
def testWrapperCheckpointing(self):
for wrapper_type in [
rnn_cell_impl.DropoutWrapper,
rnn_cell_impl.ResidualWrapper,
lambda cell: rnn_cell_impl.MultiRNNCell([cell])]:
cell = rnn_cell_impl.BasicRNNCell(1)
wrapper = wrapper_type(cell)
wrapper(array_ops.ones([1, 1]),
state=wrapper.zero_state(batch_size=1, dtype=dtypes.float32))
self.evaluate([v.initializer for v in cell.variables])
checkpoint = trackable_utils.Checkpoint(wrapper=wrapper)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(cell._bias.assign([40.]))
save_path = checkpoint.save(prefix)
self.evaluate(cell._bias.assign([0.]))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertAllEqual([40.], self.evaluate(cell._bias))
@test_util.run_in_graph_and_eager_modes
def testResidualWrapper(self):
wrapper_type = rnn_cell_impl.ResidualWrapper
x = ops.convert_to_tensor(np.array([[1., 1., 1.]]))
m = ops.convert_to_tensor(np.array([[0.1, 0.1, 0.1]]))
base_cell = rnn_cell_impl.GRUCell(
3, kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))
g, m_new = base_cell(x, m)
wrapper_object = wrapper_type(base_cell)
(name, dep), = wrapper_object._checkpoint_dependencies
wrapper_object.get_config() # Should not throw an error
self.assertIs(dep, base_cell)
self.assertEqual("cell", name)
g_res, m_new_res = wrapper_object(x, m)
self.evaluate([variables_lib.global_variables_initializer()])
res = self.evaluate([g, g_res, m_new, m_new_res])
# Residual connections
self.assertAllClose(res[1], res[0] + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res[2], res[3])
@test_util.run_in_graph_and_eager_modes
def testResidualWrapperWithSlice(self):
wrapper_type = rnn_cell_impl.ResidualWrapper
x = ops.convert_to_tensor(np.array([[1., 1., 1., 1., 1.]]))
m = ops.convert_to_tensor(np.array([[0.1, 0.1, 0.1]]))
base_cell = rnn_cell_impl.GRUCell(
3, kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))
g, m_new = base_cell(x, m)
def residual_with_slice_fn(inp, out):
inp_sliced = array_ops.slice(inp, [0, 0], [-1, 3])
return inp_sliced + out
g_res, m_new_res = wrapper_type(
base_cell, residual_with_slice_fn)(x, m)
self.evaluate([variables_lib.global_variables_initializer()])
res_g, res_g_res, res_m_new, res_m_new_res = self.evaluate(
[g, g_res, m_new, m_new_res])
# Residual connections
self.assertAllClose(res_g_res, res_g + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res_m_new, res_m_new_res)
def testDeviceWrapper(self):
wrapper_type = rnn_cell_impl.DeviceWrapper
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = rnn_cell_impl.GRUCell(3)
wrapped_cell = wrapper_type(cell, "/cpu:0")
(name, dep), = wrapped_cell._checkpoint_dependencies
wrapped_cell.get_config() # Should not throw an error
self.assertIs(dep, cell)
self.assertEqual("cell", name)
outputs, _ = wrapped_cell(x, m)
self.assertIn("cpu:0", outputs.device.lower())
def _retrieve_cpu_gpu_stats(self, run_metadata):
cpu_stats = None
gpu_stats = None
step_stats = run_metadata.step_stats
for ds in step_stats.dev_stats:
if "cpu:0" in ds.device[-5:].lower():
cpu_stats = ds.node_stats
if "gpu:0" == ds.device[-5:].lower():
gpu_stats = ds.node_stats
return cpu_stats, gpu_stats
@test_util.run_v1_only("b/124229375")
def testDeviceWrapperDynamicExecutionNodesAreAllProperlyLocated(self):
if not test.is_gpu_available():
# Can't perform this test w/o a GPU
return
gpu_dev = test.gpu_device_name()
with self.session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1, 3])
cell = rnn_cell_impl.DeviceWrapper(rnn_cell_impl.GRUCell(3), gpu_dev)
with ops.device("/cpu:0"):
outputs, _ = rnn.dynamic_rnn(
cell=cell, inputs=x, dtype=dtypes.float32)
run_metadata = config_pb2.RunMetadata()
opts = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
sess.run([variables_lib.global_variables_initializer()])
_ = sess.run(outputs, options=opts, run_metadata=run_metadata)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
self.assertFalse([s for s in cpu_stats if "gru_cell" in s.node_name])
self.assertTrue([s for s in gpu_stats if "gru_cell" in s.node_name])
@test_util.run_v1_only("b/124229375")
def testMultiRNNCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 4])
multi_rnn_cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=False)
_, ml = multi_rnn_cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(ml, {
x: np.array([[1., 1.]]),
m: np.array([[0.1, 0.1, 0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res, [[0.175991, 0.175991, 0.13248, 0.13248]])
self.assertEqual(len(multi_rnn_cell.weights), 2 * 4)
self.assertTrue(
[x.dtype == dtypes.float32 for x in multi_rnn_cell.weights])
@test_util.run_v1_only("b/124229375")
def testMultiRNNCellWithStateTuple(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m_bad = array_ops.zeros([1, 4])
m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_bad)
_, ml = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_good)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
ml, {
x: np.array([[1., 1.]]),
m_good[0]: np.array([[0.1, 0.1]]),
m_good[1]: np.array([[0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
def testDeviceWrapperSerialization(self):
wrapper_cls = rnn_cell_impl.DeviceWrapper
cell = rnn_cell_impl.LSTMCell(10)
wrapper = wrapper_cls(cell, "/cpu:0")
config = wrapper.get_config()
# Replace the cell in the config with real cell instance to work around the
# reverse keras dependency issue.
config_copy = config.copy()
config_copy["cell"] = rnn_cell_impl.LSTMCell.from_config(
config_copy["cell"]["config"])
reconstructed_wrapper = wrapper_cls.from_config(config_copy)
self.assertDictEqual(config, reconstructed_wrapper.get_config())
self.assertIsInstance(reconstructed_wrapper, wrapper_cls)
def testResidualWrapperSerialization(self):
wrapper_cls = rnn_cell_impl.ResidualWrapper
cell = rnn_cell_impl.LSTMCell(10)
wrapper = wrapper_cls(cell)
config = wrapper.get_config()
# Replace the cell in the config with real cell instance to work around the
# reverse keras dependency issue.
config_copy = config.copy()
config_copy["cell"] = rnn_cell_impl.LSTMCell.from_config(
config_copy["cell"]["config"])
reconstructed_wrapper = wrapper_cls.from_config(config_copy)
self.assertDictEqual(config, reconstructed_wrapper.get_config())
self.assertIsInstance(reconstructed_wrapper, wrapper_cls)
wrapper = wrapper_cls(cell, residual_fn=lambda i, o: i + i + o)
config = wrapper.get_config()
config_copy = config.copy()
config_copy["cell"] = rnn_cell_impl.LSTMCell.from_config(
config_copy["cell"]["config"])
reconstructed_wrapper = wrapper_cls.from_config(config_copy)
# Assert the reconstructed function will perform the math correctly.
self.assertEqual(reconstructed_wrapper._residual_fn(1, 2), 4)
def residual_fn(inputs, outputs):
return inputs * 3 + outputs
wrapper = wrapper_cls(cell, residual_fn=residual_fn)
config = wrapper.get_config()
config_copy = config.copy()
config_copy["cell"] = rnn_cell_impl.LSTMCell.from_config(
config_copy["cell"]["config"])
reconstructed_wrapper = wrapper_cls.from_config(config_copy)
# Assert the reconstructed function will perform the math correctly.
self.assertEqual(reconstructed_wrapper._residual_fn(1, 2), 5)
def testDropoutWrapperSerialization(self):
wrapper_cls = rnn_cell_impl.DropoutWrapper
cell = rnn_cell_impl.LSTMCell(10)
wrapper = wrapper_cls(cell)
config = wrapper.get_config()
config_copy = config.copy()
config_copy["cell"] = rnn_cell_impl.LSTMCell.from_config(
config_copy["cell"]["config"])
reconstructed_wrapper = wrapper_cls.from_config(config_copy)
self.assertDictEqual(config, reconstructed_wrapper.get_config())
self.assertIsInstance(reconstructed_wrapper, wrapper_cls)
wrapper = wrapper_cls(cell, dropout_state_filter_visitor=lambda s: True)
config = wrapper.get_config()
config_copy = config.copy()
config_copy["cell"] = rnn_cell_impl.LSTMCell.from_config(
config_copy["cell"]["config"])
reconstructed_wrapper = wrapper_cls.from_config(config_copy)
self.assertTrue(reconstructed_wrapper._dropout_state_filter(None))
def dropout_state_filter_visitor(unused_state):
return False
wrapper = wrapper_cls(
cell, dropout_state_filter_visitor=dropout_state_filter_visitor)
config = wrapper.get_config()
config_copy = config.copy()
config_copy["cell"] = rnn_cell_impl.LSTMCell.from_config(
config_copy["cell"]["config"])
reconstructed_wrapper = wrapper_cls.from_config(config_copy)
self.assertFalse(reconstructed_wrapper._dropout_state_filter(None))
@test_util.run_all_in_graph_and_eager_modes
class DropoutWrapperTest(test.TestCase, parameterized.TestCase):
def _testDropoutWrapper(self,
batch_size=None,
time_steps=None,
parallel_iterations=None,
wrapper_type=None,
scope="root",
**kwargs):
if batch_size is None and time_steps is None:
# 2 time steps, batch size 1, depth 3
batch_size = 1
time_steps = 2
x = constant_op.constant(
[[[2., 2., 2.]], [[1., 1., 1.]]], dtype=dtypes.float32)
m = rnn_cell_impl.LSTMStateTuple(
*[constant_op.constant([[0.1, 0.1, 0.1]], dtype=dtypes.float32)] * 2)
else:
x = constant_op.constant(
np.random.randn(time_steps, batch_size, 3).astype(np.float32))
m = rnn_cell_impl.LSTMStateTuple(*[
constant_op.
constant([[0.1, 0.1, 0.1]] * batch_size, dtype=dtypes.float32)] * 2)
outputs, final_state = rnn.dynamic_rnn(
cell=wrapper_type(
rnn_cell_impl.LSTMCell(
3, initializer=init_ops.constant_initializer(0.5)),
dtype=x.dtype, **kwargs),
time_major=True,
parallel_iterations=parallel_iterations,
inputs=x,
initial_state=m,
scope=scope)
self.evaluate([variables_lib.global_variables_initializer()])
res = self.evaluate([outputs, final_state])
self.assertEqual(res[0].shape, (time_steps, batch_size, 3))
self.assertEqual(res[1].c.shape, (batch_size, 3))
self.assertEqual(res[1].h.shape, (batch_size, 3))
return res
def testDropoutWrapperProperties(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
cell = rnn_cell_impl.BasicRNNCell(10)
wrapper = wrapper_type(cell)
# Github issue 15810
self.assertEqual(wrapper.wrapped_cell, cell)
self.assertEqual(wrapper.state_size, 10)
self.assertEqual(wrapper.output_size, 10)
def testDropoutWrapperZeroState(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
class _Cell(rnn_cell_impl.BasicRNNCell):
def zero_state(self, batch_size=None, dtype=None):
return "wrapped_cell_zero_state"
wrapper = wrapper_type(_Cell(10))
self.assertEqual(wrapper.zero_state(10, dtypes.float32),
"wrapped_cell_zero_state")
def testDropoutWrapperKeepAllConstantInput(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep = array_ops.ones([])
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep,
wrapper_type=wrapper_type)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepAll(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep,
wrapper_type=wrapper_type)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperWithSeed(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_some = 0.5
random_seed.set_random_seed(2)
## Use parallel_iterations = 1 in both calls to
## _testDropoutWrapper to ensure the (per-time step) dropout is
## consistent across both calls. Otherwise the seed may not end
## up being munged consistently across both graphs.
res_standard_1 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
seed=10,
parallel_iterations=1,
wrapper_type=wrapper_type,
scope="root_1")
random_seed.set_random_seed(2)
res_standard_2 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
seed=10,
parallel_iterations=1,
wrapper_type=wrapper_type,
scope="root_2")
self.assertAllClose(res_standard_1[0], res_standard_2[0])
self.assertAllClose(res_standard_1[1].c, res_standard_2[1].c)
self.assertAllClose(res_standard_1[1].h, res_standard_2[1].h)
def testDropoutWrapperKeepNoOutput(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-6)
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_none,
state_keep_prob=keep_all,
wrapper_type=wrapper_type)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(np.zeros(res[0].shape), res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepNoStateExceptLSTMCellMemory(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-6)
# Even though we dropout state, by default DropoutWrapper never
# drops out the memory ("c") term of an LSTMStateTuple.
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_all,
state_keep_prob=keep_none,
wrapper_type=wrapper_type)
true_c_state = np.array([[1.713925, 1.713925, 1.713925]], dtype=np.float32)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
self.assertAllClose(true_full_output[0], res[0][0])
# Second output is modified by zero input state
self.assertGreater(np.linalg.norm(true_full_output[1] - res[0][1]), 1e-4)
# h state has been set to zero
self.assertAllClose(np.zeros(res[1].h.shape), res[1].h)
# c state of an LSTMStateTuple is NEVER modified.
self.assertAllClose(true_c_state, res[1].c)
def testDropoutWrapperKeepNoInput(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-6)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
# All outputs are different because inputs are zeroed out
res = self._testDropoutWrapper(
input_keep_prob=keep_none,
output_keep_prob=keep_all,
state_keep_prob=keep_all,
wrapper_type=wrapper_type)
self.assertGreater(np.linalg.norm(res[0] - true_full_output), 1e-4)
self.assertGreater(np.linalg.norm(res[1].h - true_full_output[1]), 1e-4)
self.assertGreater(np.linalg.norm(res[1].c - true_full_final_c), 1e-4)
def testDropoutWrapperRecurrentOutput(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_some = 0.8
keep_all = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_some,
state_keep_prob=keep_all,
variational_recurrent=True,
wrapper_type=wrapper_type,
input_size=3,
batch_size=5,
time_steps=7)
# Ensure the same dropout pattern for all time steps
output_mask = np.abs(res[0]) > 1e-6
for m in output_mask[1:]:
self.assertAllClose(output_mask[0], m)
def testDropoutWrapperRecurrentStateInputAndOutput(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_some = 0.9
res = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
wrapper_type=wrapper_type,
input_size=3,
batch_size=5,
time_steps=7)
# Smoke test for the state/input masks.
output_mask = np.abs(res[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res[1].c) > 1e-6
state_h_mask = np.abs(res[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
def testDropoutWrapperRecurrentStateInputAndOutputWithSeed(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_some = 0.9
random_seed.set_random_seed(2347)
np.random.seed(23487)
res0 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
wrapper_type=wrapper_type,
input_size=3,
batch_size=5,
time_steps=7,
seed=-234987,
scope="root_0")
random_seed.set_random_seed(2347)
np.random.seed(23487)
res1 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
wrapper_type=wrapper_type,
input_size=3,
batch_size=5,
time_steps=7,
seed=-234987,
scope="root_1")
output_mask = np.abs(res0[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res0[1].c) > 1e-6
state_h_mask = np.abs(res0[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
# Ensure seeded calculation is identical.
self.assertAllClose(res0[0], res1[0])
self.assertAllClose(res0[1].c, res1[1].c)
self.assertAllClose(res0[1].h, res1[1].h)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/rnn_cell_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for slice op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SliceTest(test.TestCase):
def testEmpty(self):
inp = np.random.rand(4, 4).astype("f")
for k in xrange(4):
with self.cached_session(use_gpu=True):
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
slice_t = a[2, k:k]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[2, k:k])
def testInt32(self):
inp = np.random.rand(4, 4).astype("i")
for k in xrange(4):
with self.cached_session(use_gpu=True):
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)
slice_t = a[2, k:k]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[2, k:k])
def testSlicingWithInt64Index(self):
with self.cached_session(force_gpu=test.is_gpu_available()):
a = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
# Slice using int64 Tensor.
i = constant_op.constant(1, dtype=dtypes.int64)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i+1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
# Slice using int64 integer.
i = np.asarray(1).astype(np.int64)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i+1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
a_int32 = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
slice_t = array_ops.slice(a_int32,
np.asarray([1]).astype(np.int64),
np.asarray([2]).astype(np.int64))
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
a_float32 = constant_op.constant([0, 1, 2], dtype=dtypes.float32)
slice_t = array_ops.slice(a_float32,
np.asarray([1]).astype(np.int64),
np.asarray([2]).astype(np.int64))
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
def testSlicingInt64Tensor(self):
with self.cached_session(force_gpu=test.is_gpu_available()):
a = constant_op.constant([0, 1, 2], dtype=dtypes.int64)
# Slice using int32 Tensor.
i = constant_op.constant(1, dtype=dtypes.int32)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i + 1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
# Slice using int32 integer.
i = np.asarray(1).astype(np.int32)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i + 1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
slice_t = array_ops.slice(a, [1], [2])
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
def testSelectAll(self):
for _ in range(10):
with self.cached_session(use_gpu=True):
inp = np.random.rand(4, 4, 4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)
slice_explicit_t = array_ops.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
slice_implicit_t = a[:, :, :, :]
self.assertAllEqual(inp, self.evaluate(slice_explicit_t))
self.assertAllEqual(inp, self.evaluate(slice_implicit_t))
self.assertEqual(inp.shape, slice_explicit_t.get_shape())
self.assertEqual(inp.shape, slice_implicit_t.get_shape())
def testSingleDimension(self):
for _ in range(10):
with self.cached_session(use_gpu=True):
inp = np.random.rand(10).astype("f")
a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)
hi = np.random.randint(0, 9)
scalar_t = a[hi]
scalar_val = self.evaluate(scalar_t)
self.assertAllEqual(scalar_val, inp[hi])
if hi > 0:
lo = np.random.randint(0, hi)
else:
lo = 0
slice_t = a[lo:hi]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[lo:hi])
@test_util.run_deprecated_v1
def testScalarInput(self):
input_val = 0
with self.cached_session() as sess:
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(ValueError, "out of range"):
constant_op.constant(input_val)[:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
input_t = array_ops.placeholder(dtypes.int32)
slice_t = input_t[:]
with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
"out of range"):
sess.run([slice_t], feed_dict={input_t: input_val})
@test_util.run_deprecated_v1
def testInvalidIndex(self):
input_val = [1, 2]
with self.cached_session() as sess:
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(ValueError, "out of range"):
constant_op.constant(input_val)[1:, 1:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
input_t = array_ops.placeholder(dtypes.int32)
slice_t = input_t[1:, 1:]
with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
"out of range"):
sess.run([slice_t], feed_dict={input_t: input_val})
def _testSliceMatrixDim0(self, x, begin, size):
with self.cached_session(use_gpu=True):
tf_ans = array_ops.slice(x, [begin, 0], [size, x.shape[1]]).eval()
np_ans = x[begin:begin + size, :]
self.assertAllEqual(tf_ans, np_ans)
@test_util.run_deprecated_v1
def testSliceMatrixDim0(self):
x = np.random.rand(8, 4).astype("f")
self._testSliceMatrixDim0(x, 1, 2)
self._testSliceMatrixDim0(x, 3, 3)
y = np.random.rand(8, 7).astype("f") # 7 * sizeof(float) is not aligned
self._testSliceMatrixDim0(y, 1, 2)
self._testSliceMatrixDim0(y, 3, 3)
def testSingleElementAll(self):
for _ in range(10):
with self.cached_session(use_gpu=True):
inp = np.random.rand(4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
x, y = np.random.randint(0, 3, size=2).tolist()
slice_t = a[x, 0:y]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[x, 0:y])
def testSimple(self):
with self.session(use_gpu=True) as sess:
inp = np.random.rand(4, 4).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 4],
dtype=dtypes.float32)
slice_t = array_ops.slice(a, [0, 0], [2, 2])
slice2_t = a[:2, :2]
slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
self.assertAllEqual(slice_val, inp[:2, :2])
self.assertAllEqual(slice2_val, inp[:2, :2])
self.assertEqual(slice_val.shape, slice_t.get_shape())
self.assertEqual(slice2_val.shape, slice2_t.get_shape())
@test_util.run_deprecated_v1
def testComplex(self):
with self.session(use_gpu=True):
inp = np.random.rand(4, 10, 10, 4).astype("f")
a = constant_op.constant(inp, dtype=dtypes.float32)
x = np.random.randint(0, 9)
z = np.random.randint(0, 9)
if z > 0:
y = np.random.randint(0, z)
else:
y = 0
slice_t = a[:, x, y:z, :]
self.assertAllEqual(slice_t.eval(), inp[:, x, y:z, :])
def testRandom(self):
# Random dims of rank 6
input_shape = np.random.randint(0, 20, size=6)
inp = np.random.rand(*input_shape).astype("f")
with self.session(use_gpu=True) as sess:
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
sizes = [
np.random.randint(0, input_shape[i] - indices[i] + 1)
for i in range(6)
]
slice_t = array_ops.slice(a, indices, sizes)
slice2_t = a[indices[0]:indices[0] + sizes[0], indices[1]:indices[
1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[3]
+ sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:
indices[5] + sizes[5]]
slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
expected_val = inp[indices[0]:indices[0] + sizes[0], indices[1]:indices[
1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[
3] + sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:indices[
5] + sizes[5]]
self.assertAllEqual(slice_val, expected_val)
self.assertAllEqual(slice2_val, expected_val)
self.assertEqual(expected_val.shape, slice_t.get_shape())
self.assertEqual(expected_val.shape, slice2_t.get_shape())
def testPartialShapeInference(self):
z = array_ops.zeros((1, 2, 3))
self.assertAllEqual(z.get_shape().as_list(), [1, 2, 3])
m1 = array_ops.slice(z, [0, 0, 0], [-1, -1, -1])
self.assertAllEqual(m1.get_shape().as_list(), [1, 2, 3])
m2 = array_ops.slice(z, [0, 0, 0], [constant_op.constant(1) + 0, 2, -1])
self.assertAllEqual(m2.get_shape().as_list(), [1, 2, 3])
def _testGradientSlice(self, input_shape, slice_begin, slice_size):
with self.cached_session(use_gpu=True):
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
slice_t = array_ops.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
grad_tensor = constant_op.constant(grads)
grad = gradients_impl.gradients(slice_t, [a], grad_tensor)[0]
result = self.evaluate(grad)
# Create a zero tensor of the input shape ane place
# the grads into the right location to compare against TensorFlow.
np_ans = np.zeros(input_shape)
slices = []
for i in xrange(len(input_shape)):
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
np_ans[slices] = grads
self.assertAllClose(np_ans, result)
def _testGradientVariableSize(self):
with self.cached_session(use_gpu=True):
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
out = array_ops.slice(inp, [1], [-1])
grad_actual = gradients_impl.gradients(out, inp)[0].eval()
self.assertAllClose([0., 1., 1.], grad_actual)
def _testGradientVariableSize2D(self):
# Regression test for bug in slice. A low-level bug in Eigen was causing
# incorrect results for negative indices in multi-dimensional tensors.
# See b/114318298.
with self.cached_session(use_gpu=True) as sess:
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
g1 = gradients_impl.gradients(loss1, x)[0]
g2 = gradients_impl.gradients(loss2, x)[0]
g1_val, g2_val = self.evaluate([g1, g2])
self.assertAllEqual(g1_val, g2_val)
@test_util.run_deprecated_v1
def testGradientsAll(self):
# Slice the middle square out of a 4x4 input
self._testGradientSlice([4, 4], [1, 1], [2, 2])
# Slice the upper left square out of a 4x4 input
self._testGradientSlice([4, 4], [0, 0], [2, 2])
# Slice a non-square input starting from (2,1)
self._testGradientSlice([4, 4], [2, 1], [1, 2])
# Slice a 3D tensor
self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])
# Use -1 as a slice dimension.
self._testGradientVariableSize()
# Use -1 as a slice dimension on a 2D tensor.
self._testGradientVariableSize2D()
@test_util.run_deprecated_v1
def testNotIterable(self):
# NOTE(mrry): If we register __getitem__ as an overloaded
# operator, Python will valiantly attempt to iterate over the
# Tensor from 0 to infinity. This test ensures that this
# unintended behavior is prevented.
c = constant_op.constant(5.0)
with self.assertRaisesWithPredicateMatch(
TypeError, lambda e: "Tensor objects are only iterable" in str(e)):
for _ in c:
pass
@test_util.run_deprecated_v1
def testComputedShape(self):
# NOTE(mrry): We cannot currently handle partially-known values,
# because `tf.slice()` uses -1 to specify a wildcard size, and
# this can't be handled using the
# `tensor_util.constant_value_as_shape()` trick.
a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
begin = constant_op.constant(0)
size = constant_op.constant(1)
b = array_ops.slice(a, [begin, 0], [size, 2])
self.assertEqual([1, 2], b.get_shape())
begin = array_ops.placeholder(dtypes.int32, shape=())
c = array_ops.slice(a, [begin, 0], [-1, 2])
self.assertEqual([None, 2], c.get_shape().as_list())
def testSliceOfSlice(self):
with self.session(use_gpu=True):
a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
b = a[1:, :]
c = b[:-1, :]
d = c[1, :]
res = 2 * d - c[1, :] + a[2, :] - 2 * b[-2, :]
self.assertAllEqual([0, 0, 0], self.evaluate(res))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/slice_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
def _GenerateTestData(matrix_shape, num_rhs):
batch_shape = matrix_shape[:-2]
matrix_shape = matrix_shape[-2:]
m = matrix_shape[-2]
np.random.seed(1)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(matrix_shape)).reshape(matrix_shape).astype(np.float32)
rhs = np.ones([m, num_rhs]).astype(np.float32)
matrix = variables.Variable(
np.tile(matrix, batch_shape + (1, 1)), trainable=False)
rhs = variables.Variable(np.tile(rhs, batch_shape + (1, 1)), trainable=False)
return matrix, rhs
def _SolveWithNumpy(matrix, rhs, l2_regularizer=0):
if l2_regularizer == 0:
np_ans, _, _, _ = np.linalg.lstsq(matrix, rhs)
return np_ans
else:
rows = matrix.shape[-2]
cols = matrix.shape[-1]
if rows >= cols:
preconditioner = l2_regularizer * np.identity(cols)
gramian = np.dot(np.conj(matrix.T), matrix) + preconditioner
rhs = np.dot(np.conj(matrix.T), rhs)
return np.linalg.solve(gramian, rhs)
else:
preconditioner = l2_regularizer * np.identity(rows)
gramian = np.dot(matrix, np.conj(matrix.T)) + preconditioner
z = np.linalg.solve(gramian, rhs)
return np.dot(np.conj(matrix.T), z)
class MatrixSolveLsOpTest(test_lib.TestCase):
def _verifySolve(self,
x,
y,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=()):
if not fast and l2_regularizer != 0:
# The slow path does not support regularization.
return
maxdim = np.max(x.shape)
if dtype == np.float32 or dtype == np.complex64:
tol = maxdim * 5e-4
else:
tol = maxdim * 5e-7
a = x.astype(dtype)
b = y.astype(dtype)
if dtype in [np.complex64, np.complex128]:
a.imag = a.real
b.imag = b.real
# numpy.linalg.lstqr does not batching, so we just solve a single system
# and replicate the solution. and residual norm.
np_ans = _SolveWithNumpy(x, y, l2_regularizer=l2_regularizer)
np_r = np.dot(np.conj(a.T), b - np.dot(a, np_ans))
np_r_norm = np.sqrt(np.sum(np.conj(np_r) * np_r))
if batch_shape is not ():
a = np.tile(a, batch_shape + (1, 1))
b = np.tile(b, batch_shape + (1, 1))
np_ans = np.tile(np_ans, batch_shape + (1, 1))
np_r_norm = np.tile(np_r_norm, batch_shape)
with self.cached_session(use_gpu=fast) as sess:
if use_placeholder:
a_ph = array_ops.placeholder(dtypes.as_dtype(dtype))
b_ph = array_ops.placeholder(dtypes.as_dtype(dtype))
feed_dict = {a_ph: a, b_ph: b}
tf_ans = linalg_ops.matrix_solve_ls(
a_ph, b_ph, fast=fast, l2_regularizer=l2_regularizer)
else:
tf_ans = linalg_ops.matrix_solve_ls(
a, b, fast=fast, l2_regularizer=l2_regularizer)
feed_dict = {}
self.assertEqual(np_ans.shape, tf_ans.get_shape())
if l2_regularizer == 0:
# The least squares solution should satisfy A^H * (b - A*x) = 0.
tf_r = b - math_ops.matmul(a, tf_ans)
tf_r = math_ops.matmul(a, tf_r, adjoint_a=True)
tf_r_norm = linalg_ops.norm(tf_r, ord="fro", axis=[-2, -1])
tf_ans_val, tf_r_norm_val = sess.run(
[tf_ans, tf_r_norm], feed_dict=feed_dict)
self.assertAllClose(np_r_norm, tf_r_norm_val, atol=tol, rtol=tol)
else:
tf_ans_val = sess.run(tf_ans, feed_dict=feed_dict)
self.assertEqual(np_ans.shape, tf_ans_val.shape)
self.assertAllClose(np_ans, tf_ans_val, atol=2 * tol, rtol=2 * tol)
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.session(use_gpu=True):
matrix = constant_op.constant([[1., 0.], [0., 1.]])
rhs = constant_op.constant([[1., 0.]])
with self.assertRaises(ValueError):
linalg_ops.matrix_solve_ls(matrix, rhs)
def testEmpty(self):
full = np.array([[1., 2.], [3., 4.], [5., 6.]])
empty0 = np.empty([3, 0])
empty1 = np.empty([0, 2])
for fast in [True, False]:
with self.cached_session(use_gpu=True):
tf_ans = self.evaluate(
linalg_ops.matrix_solve_ls(empty0, empty0, fast=fast))
self.assertEqual(tf_ans.shape, (0, 0))
tf_ans = self.evaluate(
linalg_ops.matrix_solve_ls(empty0, full, fast=fast))
self.assertEqual(tf_ans.shape, (0, 2))
tf_ans = self.evaluate(
linalg_ops.matrix_solve_ls(full, empty0, fast=fast))
self.assertEqual(tf_ans.shape, (2, 0))
tf_ans = self.evaluate(
linalg_ops.matrix_solve_ls(empty1, empty1, fast=fast))
self.assertEqual(tf_ans.shape, (2, 2))
@test_util.run_v1_only("b/120545219")
def testBatchResultSize(self):
# 3x3x3 matrices, 3x3x1 right-hand sides.
matrix = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9.] * 3).reshape(3, 3, 3)
rhs = np.array([1., 2., 3.] * 3).reshape(3, 3, 1)
answer = linalg_ops.matrix_solve(matrix, rhs)
ls_answer = linalg_ops.matrix_solve_ls(matrix, rhs)
self.assertEqual(ls_answer.get_shape(), [3, 3, 1])
self.assertEqual(answer.get_shape(), [3, 3, 1])
def _GetSmallMatrixSolveLsOpTests(dtype, use_placeholder, fast, l2_regularizer):
def Square(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2.], [3., 4.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
for batch_shape in (), (2, 3):
self._verifySolve(
matrix,
rhs,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=batch_shape)
def Overdetermined(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2.], [3., 4.], [5., 6.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.], [1., 1., 0.]])
for batch_shape in (), (2, 3):
self._verifySolve(
matrix,
rhs,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=batch_shape)
def Underdetermined(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2., 3], [4., 5., 6.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
for batch_shape in (), (2, 3):
self._verifySolve(
matrix,
rhs,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=batch_shape)
return (Square, Overdetermined, Underdetermined)
def _GetLargeMatrixSolveLsOpTests(dtype, use_placeholder, fast, l2_regularizer):
def LargeBatchSquare(self):
np.random.seed(1)
num_rhs = 1
matrix_shape = (127, 127)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(matrix_shape)).reshape(matrix_shape).astype(np.float32)
rhs = np.ones([matrix_shape[0], num_rhs]).astype(np.float32)
self._verifySolve(
matrix,
rhs,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=(16, 8))
def LargeBatchOverdetermined(self):
np.random.seed(1)
num_rhs = 1
matrix_shape = (127, 64)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(matrix_shape)).reshape(matrix_shape).astype(np.float32)
rhs = np.ones([matrix_shape[0], num_rhs]).astype(np.float32)
self._verifySolve(
matrix,
rhs,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=(16, 8))
def LargeBatchUnderdetermined(self):
np.random.seed(1)
num_rhs = 1
matrix_shape = (64, 127)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(matrix_shape)).reshape(matrix_shape).astype(np.float32)
rhs = np.ones([matrix_shape[0], num_rhs]).astype(np.float32)
self._verifySolve(
matrix,
rhs,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=(16, 8))
return (LargeBatchSquare, LargeBatchOverdetermined, LargeBatchUnderdetermined)
class MatrixSolveLsBenchmark(test_lib.Benchmark):
matrix_shapes = [
(4, 4),
(8, 4),
(4, 8),
(10, 10),
(10, 8),
(8, 10),
(16, 16),
(16, 10),
(10, 16),
(101, 101),
(101, 31),
(31, 101),
(256, 256),
(256, 200),
(200, 256),
(1001, 1001),
(1001, 501),
(501, 1001),
(1024, 1024),
(1024, 128),
(128, 1024),
(2048, 2048),
(2048, 64),
(64, 2048),
(513, 4, 4),
(513, 4, 2),
(513, 2, 4),
(513, 16, 16),
(513, 16, 10),
(513, 10, 16),
(513, 256, 256),
(513, 256, 128),
(513, 128, 256),
]
def benchmarkMatrixSolveLsOp(self):
run_gpu_test = test_lib.is_gpu_available(True)
regularizer = 1.0
for matrix_shape in self.matrix_shapes:
for num_rhs in 1, 2, matrix_shape[-1]:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix, rhs = _GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve_ls(matrix, rhs, regularizer)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_ls_cpu_shape_{matrix_shape}_num_rhs_{num_rhs}"
).format(matrix_shape=matrix_shape, num_rhs=num_rhs))
if run_gpu_test and (len(matrix_shape) < 3 or matrix_shape[0] < 513):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix, rhs = _GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve_ls(matrix, rhs, regularizer)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_ls_gpu_shape_{matrix_shape}_num_rhs_"
"{num_rhs}").format(
matrix_shape=matrix_shape, num_rhs=num_rhs))
if __name__ == "__main__":
for dtype_ in [np.float32, np.float64, np.complex64, np.complex128]:
# TF2 does not support placeholders under eager so we skip it
for use_placeholder_ in set([False, not tf2.enabled()]):
for fast_ in [True, False]:
l2_regularizers = [0] if dtype_ == np.complex128 else [0, 0.1]
for l2_regularizer_ in l2_regularizers:
for test_case in _GetSmallMatrixSolveLsOpTests(
dtype_, use_placeholder_, fast_, l2_regularizer_):
name = "%s_%s_placeholder_%s_fast_%s_regu_%s" % (test_case.__name__,
dtype_.__name__,
use_placeholder_,
fast_,
l2_regularizer_)
_AddTest(MatrixSolveLsOpTest, "MatrixSolveLsOpTest", name,
test_case)
for dtype_ in [np.float32, np.float64, np.complex64, np.complex128]:
for test_case in _GetLargeMatrixSolveLsOpTests(dtype_, False, True, 0.0):
name = "%s_%s" % (test_case.__name__, dtype_.__name__)
_AddTest(MatrixSolveLsOpTest, "MatrixSolveLsOpTest", name, test_case)
test_lib.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Concat Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ConcatOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testHStack(self):
with self.session(use_gpu=True):
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
c = array_ops.concat([p1, p2], 0)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:4, :], params[p1])
self.assertAllEqual(result[4:, :], params[p2])
@test_util.run_deprecated_v1
def testVStack(self):
with self.session(use_gpu=True):
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
c = array_ops.concat([p1, p2], 1)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:, :4], params[p1])
self.assertAllEqual(result[:, 4:], params[p2])
def testInt32GPU(self):
with test_util.use_gpu():
p1 = np.random.rand(2, 3).astype("i")
p2 = np.random.rand(2, 3).astype("i")
x1 = constant_op.constant(p1)
x2 = constant_op.constant(p2)
c = array_ops.concat([x1, x2], 0)
result = self.evaluate(c)
self.assertAllEqual(result[:2, :], p1)
self.assertAllEqual(result[2:, :], p2)
def testRefType(self):
with test_util.use_gpu():
p1 = np.random.rand(4, 4).astype("f")
p2 = np.random.rand(4, 4).astype("f")
v1 = variables.Variable(p1)
v2 = variables.Variable(p2)
c = array_ops.concat([v1, v2], 0)
self.evaluate(variables.global_variables_initializer())
result = self.evaluate(c)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:4, :], p1)
self.assertAllEqual(result[4:, :], p2)
def _testRandom(self, dtype):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
# Random number of tensors, but always > 1.
num_tensors = np.random.randint(2, 10)
# Random dim to concat on
concat_dim = np.random.randint(5)
params = {}
if dtype == dtypes.bfloat16:
dtype_feed = dtypes.float32
else:
dtype_feed = dtype
with self.session(use_gpu=True):
p = []
for i in np.arange(num_tensors):
input_shape = shape
input_shape[concat_dim] = np.random.randint(1, 5)
placeholder = array_ops.placeholder(dtype_feed, shape=input_shape)
p.append(placeholder)
t = dtype_feed.as_numpy_dtype
params[placeholder] = np.random.rand(*input_shape).astype(t)
if dtype != dtype_feed:
concat_inputs = [math_ops.cast(p_i, dtype) for p_i in p]
else:
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
if dtype != dtype_feed:
c = math_ops.cast(c, dtype_feed)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
ind = [slice(0, params[p[i]].shape[j]) for j in np.arange(5)]
ind[concat_dim] = slice(cur_offset,
cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
if dtype == dtype_feed:
self.assertAllEqual(result[ind], params[p[i]])
else:
self.assertAllClose(result[ind], params[p[i]], 0.01)
@test_util.run_deprecated_v1
def testRandom(self):
self._testRandom(dtypes.bool)
self._testRandom(dtypes.float32)
self._testRandom(dtypes.int16)
self._testRandom(dtypes.int32)
self._testRandom(dtypes.int64)
self._testRandom(dtypes.bfloat16)
self._testRandom(dtypes.complex64)
self._testRandom(dtypes.complex128)
@test_util.run_deprecated_v1
def testInvalidConcatDimTypeAndShape(self):
a = variables.Variable(constant_op.constant(1.0, shape=[1]))
b = variables.Variable(constant_op.constant(2.0, shape=[1]))
with self.assertRaises(ValueError):
array_ops.concat(b, a)
with self.assertRaises(TypeError):
array_ops.concat(1, 4.2)
with self.assertRaises(ValueError):
array_ops.concat(1, a)
with self.assertRaises(TypeError):
array_ops.concat([a, b], a)
with self.assertRaises(ValueError):
array_ops.concat([a, b], [3])
with self.assertRaises(ValueError):
array_ops.concat([], 0)
# An integer tensor for shape dim should throw no error.
array_ops.concat(1, constant_op.constant(0, shape=[]))
# A non-scalar tensor for shape should throw ValueError.
with self.assertRaises(ValueError):
array_ops.concat(1, constant_op.constant(0, shape=[1]))
def _testGradientsSimple(self, dtype):
# Test both positive and negative concat axis.
# -2 and 1 correspond to the same axis for 3-dimensional tensors.
for axis in [-2, 1]:
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [10, x, 2]
t = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
t += -1j * t
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtype))
c = array_ops.concat(inp_tensors, axis)
output_shape = [10, 9, 2]
grad_inp = np.random.rand(*output_shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
grad_inp += -1j * grad_inp
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testGradientsSimple(self):
self._testGradientsSimple(dtypes.float32)
self._testGradientsSimple(dtypes.complex64)
@test_util.run_deprecated_v1
def testGradientsFirstDim(self):
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [x, 10, 2]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 0)
output_shape = [9, 10, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 0)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testGradientsLastDim(self):
# Test both positive and negative concat axis.
# -1 and 2 correspond to the same axis for 3-dimensional tensors.
for axis in [-1, 2]:
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [10, 2, x]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
def _RunAndVerifyGradientsRandom(self):
# Random dims of rank 5
input_shape = np.random.randint(1, 5, size=5)
# Random number of tensors
num_tensors = np.random.randint(12, 20)
# Random dim to concat on
concat_dim = np.random.randint(5)
concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in concat_dim_sizes:
shape = input_shape
shape[concat_dim] = x
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(t.flatten(), shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, concat_dim)
output_shape = input_shape
output_shape[concat_dim] = concat_dim_sizes.sum()
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, concat_dim)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testGradientsRandom(self):
for _ in range(5):
self._RunAndVerifyGradientsRandom()
@test_util.run_deprecated_v1
def testGradientWithUnknownInputDim(self):
with self.session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = array_ops.concat([x, y], 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], [x, y], [grad_tensor])
concated_grad = array_ops.concat(grad, 2)
params = {
x: np.random.rand(10, 2, 3).astype("f"),
y: np.random.rand(10, 2, 6).astype("f")
}
result = concated_grad.eval(feed_dict=params)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testShapeError(self):
# Rank doesn't match.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], 1)
# Dimensions don't match in a non-concat dim.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[1, 2, 1]),
constant_op.constant(20.0, shape=[3, 2, 1])
], 1)
# concat_dim out of range.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], 3)
# concat_dim out of range
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], -4)
@test_util.run_deprecated_v1
def testShapeWithUnknownConcatDim(self):
p1 = array_ops.placeholder(dtypes.float32)
c1 = constant_op.constant(10.0, shape=[4, 4, 4, 4])
p2 = array_ops.placeholder(dtypes.float32)
c2 = constant_op.constant(20.0, shape=[4, 4, 4, 4])
dim = array_ops.placeholder(dtypes.int32)
concat = array_ops.concat([p1, c1, p2, c2], dim)
self.assertEqual(4, concat.get_shape().ndims)
# All dimensions unknown.
concat2 = array_ops.concat([p1, p2], dim)
self.assertEqual(None, concat2.get_shape())
# Rank doesn't match.
c3 = constant_op.constant(30.0, shape=[4, 4, 4])
with self.assertRaises(ValueError):
array_ops.concat([p1, c1, p2, c3], dim)
@test_util.run_deprecated_v1
def testZeroSize(self):
# Verify that concat doesn't crash and burn for zero size inputs
np.random.seed(7)
with test_util.use_gpu():
for shape0 in (), (2,):
axis = len(shape0)
for shape1 in (), (3,):
for n0 in 0, 1, 2:
for n1 in 0, 1, 2:
x0 = np.random.randn(*(shape0 + (n0,) + shape1))
x1 = np.random.randn(*(shape0 + (n1,) + shape1))
correct = np.concatenate([x0, x1], axis=axis)
# TODO(irving): Make tf.concat handle map, then drop list().
xs = list(map(constant_op.constant, [x0, x1]))
c = array_ops.concat(xs, axis)
self.assertAllEqual(self.evaluate(c), correct)
# Check gradients
dc = np.random.randn(*c.get_shape().as_list())
dxs = self.evaluate(gradients_impl.gradients(c, xs, dc))
self.assertAllEqual(dc, np.concatenate(dxs, axis=axis))
@test_util.run_deprecated_v1
def testTensorConcatDim0Grad(self):
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
output_shape = [44, 7, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
output = array_ops.concat(xs, 0)
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testTensorConcatDim1Grad(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [20, 11, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
output = array_ops.concat(xs, 1)
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testIndexedSlicesConcatDim0Grad(self):
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
output_shape = [4, 7, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 0)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testIndexedSlicesConcatDim1Grad(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [4, 11, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 1)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testIndexedSlicesConcatDim2Grad(self):
x_shapes = [[20, 7, 3], [20, 7, 1], [20, 7, 2]]
output_shape = [4, 7, 6]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 2)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testIndexedSlicesConcatDim1Grad_UnknownInputDim(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [4, 11, 3]
with self.cached_session():
x_1 = array_ops.placeholder(dtypes.float64)
x_2 = array_ops.placeholder(dtypes.float64)
x_3 = array_ops.placeholder(dtypes.float64)
xs = [x_1, x_2, x_3]
x_concat = array_ops.concat(xs, 1)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
params = {
x_1: np.random.random_sample(x_shapes[0]).astype(np.float64),
x_2: np.random.random_sample(x_shapes[1]).astype(np.float64),
x_3: np.random.random_sample(x_shapes[2]).astype(np.float64)
}
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape,
extra_feed_dict=params)
self.assertLess(err, 1e-11)
def testConcatTuple(self):
c1 = np.random.rand(4, 4)
c2 = np.random.rand(4, 4)
concat_list_t = array_ops.concat([c1, c2], 0)
concat_tuple_t = array_ops.concat((c1, c2), 0)
self.assertAllEqual(
self.evaluate(concat_list_t), self.evaluate(concat_tuple_t))
@test_util.run_deprecated_v1
def testConcatNoScalars(self):
scalar = constant_op.constant(7)
dim = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegexp(
ValueError, r"Can't concatenate scalars \(use tf\.stack instead\)"):
array_ops.concat([scalar, scalar, scalar], dim)
# important as gpu implementation could fail if
# shared memory is not large for all the inputs
@test_util.run_deprecated_v1
def testConcatLargeNumberOfTensors(self):
with self.session(use_gpu=True):
for concat_dim in range(2):
params = {}
p = []
shape = np.array([7, 13])
if test.is_gpu_available():
num_tensors = 5000
else:
num_tensors = 500
for i in np.arange(num_tensors):
input_shape = shape
placeholder = array_ops.placeholder(dtypes.float32, shape=input_shape)
p.append(placeholder)
params[placeholder] = np.random.rand(*input_shape).astype(np.float32)
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
index = [slice(0, params[p[i]].shape[j]) for j in np.arange(2)]
index[concat_dim] = slice(cur_offset,
cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
self.assertAllEqual(result[index], params[p[i]])
def testConcatEmpty(self):
with test_util.use_gpu():
t1 = []
t2 = []
output = gen_array_ops.concat_v2([t1, t2], 0)
self.assertFalse(self.evaluate(output)) # Checks that output is empty
@test_util.run_deprecated_v1
def testConcatInvalidAxis(self):
with self.assertRaises(ValueError):
with test_util.use_gpu():
t1 = [1]
t2 = [2]
gen_array_ops.concat_v2([t1, t2], 1).eval()
def testConcatNegativeAxis(self):
with test_util.use_gpu():
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
c = gen_array_ops.concat_v2([t1, t2], -2)
self.assertEqual([4, 3], c.get_shape().as_list())
output = self.evaluate(c)
self.assertAllEqual([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
output)
c = gen_array_ops.concat_v2([t1, t2], -1)
self.assertEqual([2, 6], c.get_shape().as_list())
output = self.evaluate(c)
self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
def _testGradientsForAxis(
self, inp_tensors, axis, output_shape, feed_dict=None):
with self.cached_session():
c = array_ops.concat(inp_tensors, axis)
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = concated_grad.eval(feed_dict=feed_dict)
self.assertAllEqual(result, grad_inp)
def _testIndexedSlicesGradientsForAxis(
self, inp_tensors, axis, output_shape, gather_indexes, feed_dict=None):
with self.cached_session():
c = array_ops.gather(
array_ops.concat(inp_tensors, axis), gather_indexes)
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.gather(
array_ops.concat(grad, axis), gather_indexes)
result = concated_grad.eval(feed_dict=feed_dict)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testGradientsNegativeAxis(self):
x1 = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
x2 = [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]
inp_tensors = [constant_op.constant(x1, shape=(2, 3), dtype=dtypes.float32),
constant_op.constant(x2, shape=(2, 3), dtype=dtypes.float32)]
# Test concat gradient with axis == -2
self._testGradientsForAxis(inp_tensors, -2, output_shape=[4, 3])
# Test concat gradient with unknown-shape tensors.
x1_placeholder = array_ops.placeholder(dtypes.float32)
x2_placeholder = array_ops.placeholder(dtypes.float32)
inp_tensors_placeholders = [x1_placeholder, x2_placeholder]
feed_dict = {x1_placeholder: x1, x2_placeholder: x2}
self._testGradientsForAxis(
inp_tensors_placeholders, -1, output_shape=[2, 6], feed_dict=feed_dict)
# Test IndexedSlices concat gradient.
self._testIndexedSlicesGradientsForAxis(
inp_tensors, -2, output_shape=[2, 3], gather_indexes=[2, 0])
# We don't support calculating IndexedSlices concat gradient for
# negative indexes when rank is not known.
with self.assertRaises(ValueError):
self._testIndexedSlicesGradientsForAxis(
inp_tensors_placeholders, -2, output_shape=[2, 3],
gather_indexes=[2, 0], feed_dict=feed_dict)
def testConcatAxisType(self):
for dtype in [dtypes.int32, dtypes.int64]:
with test_util.use_gpu():
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
c = gen_array_ops.concat_v2([t1, t2],
constant_op.constant(1, dtype=dtype))
self.assertEqual([2, 6], c.get_shape().as_list())
output = self.evaluate(c)
self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
class ConcatOffsetTest(test.TestCase):
def testBasic(self):
with test_util.use_gpu():
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = self.evaluate(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
@test_util.run_deprecated_v1
def testNotVector(self):
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([[2, 3, 5]], dtypes.int32)
s1 = constant_op.constant([[2, 7, 5]], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should be a vector"):
self.evaluate(off)
@test_util.run_deprecated_v1
def testConcatDimOutOfRange(self):
cdim = constant_op.constant(4, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Concat dim is out of range: 4 vs. 3"):
self.evaluate(off)
@test_util.run_deprecated_v1
def testDimMismatch(self):
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5, 10], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should contain 3 elem"):
self.evaluate(off)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testSizeMismatch(self):
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 10], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
r"All dimensions except 1 must match. Input 1 has shape \[2 7 10\] "
r"and doesn't match input 0 with shape \[2 3 5\]."):
self.evaluate(off)
def testNegativeDim(self):
with test_util.use_gpu():
cdim = constant_op.constant(-2, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = self.evaluate(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
cdim = constant_op.constant(-3, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([1, 3, 5], dtypes.int32)
s2 = constant_op.constant([3, 3, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = self.evaluate(off)
self.assertAllEqual(ans, [[0, 0, 0], [2, 0, 0], [3, 0, 0]])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/concat_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for manip_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import manip_ops
from tensorflow.python.platform import test as test_lib
# pylint: disable=g-import-not-at-top
try:
from distutils.version import StrictVersion as Version
# numpy.roll for multiple shifts was introduced in numpy version 1.12.0
NP_ROLL_CAN_MULTISHIFT = Version(np.version.version) >= Version("1.12.0")
except ImportError:
NP_ROLL_CAN_MULTISHIFT = False
# pylint: enable=g-import-not-at-top
class RollTest(test_util.TensorFlowTestCase):
def _testRoll(self, np_input, shift, axis):
expected_roll = np.roll(np_input, shift, axis)
with self.cached_session():
roll = manip_ops.roll(np_input, shift, axis)
self.assertAllEqual(roll.eval(), expected_roll)
def _testGradient(self, np_input, shift, axis):
with self.cached_session():
inx = constant_op.constant(np_input.tolist())
xs = list(np_input.shape)
y = manip_ops.roll(inx, shift, axis)
# Expected y's shape to be the same
ys = xs
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, y, ys, x_init_value=np_input)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_input, shift, axis):
self._testRoll(np_input, shift, axis)
if np_input.dtype == np.float32:
self._testGradient(np_input, shift, axis)
@test_util.run_deprecated_v1
def testIntTypes(self):
for t in [np.int32, np.int64]:
self._testAll(np.random.randint(-100, 100, (5)).astype(t), 3, 0)
if NP_ROLL_CAN_MULTISHIFT:
self._testAll(
np.random.randint(-100, 100, (4, 4, 3)).astype(t), [1, -2, 3],
[0, 1, 2])
self._testAll(
np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t), [0, 1, -2],
[1, 2, 3])
@test_util.run_deprecated_v1
def testFloatTypes(self):
for t in [np.float32, np.float64]:
self._testAll(np.random.rand(5).astype(t), 2, 0)
if NP_ROLL_CAN_MULTISHIFT:
self._testAll(np.random.rand(3, 4).astype(t), [1, 2], [1, 0])
self._testAll(np.random.rand(1, 3, 4).astype(t), [1, 0, -3], [0, 1, 2])
@test_util.run_deprecated_v1
def testComplexTypes(self):
for t in [np.complex64, np.complex128]:
x = np.random.rand(4, 4).astype(t)
self._testAll(x + 1j * x, 2, 0)
if NP_ROLL_CAN_MULTISHIFT:
x = np.random.rand(2, 5).astype(t)
self._testAll(x + 1j * x, [1, 2], [1, 0])
x = np.random.rand(3, 2, 1, 1).astype(t)
self._testAll(x + 1j * x, [2, 1, 1, 0], [0, 3, 1, 2])
@test_util.run_deprecated_v1
def testNegativeAxis(self):
self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1)
self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)
# Make sure negative axis should be 0 <= axis + dims < dims
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of range"):
manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32),
3, -10).eval()
@test_util.run_deprecated_v1
def testInvalidInputShape(self):
# The input should be 1-D or higher, checked in shape function.
with self.assertRaisesRegexp(
ValueError, "Shape must be at least rank 1 but is rank 0"):
manip_ops.roll(7, 1, 0)
@test_util.run_deprecated_v1
def testRollInputMustVectorHigherRaises(self):
# The input should be 1-D or higher, checked in kernel.
tensor = array_ops.placeholder(dtype=dtypes.int32)
shift = 1
axis = 0
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"input must be 1-D or higher"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={tensor: 7})
@test_util.run_deprecated_v1
def testInvalidAxisShape(self):
# The axis should be a scalar or 1-D, checked in shape function.
with self.assertRaisesRegexp(
ValueError, "Shape must be at most rank 1 but is rank 2"):
manip_ops.roll([[1, 2], [3, 4]], 1, [[0, 1]])
@test_util.run_deprecated_v1
def testRollAxisMustBeScalarOrVectorRaises(self):
# The axis should be a scalar or 1-D, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = 1
axis = array_ops.placeholder(dtype=dtypes.int32)
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"axis must be a scalar or a 1-D vector"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={axis: [[0, 1]]})
@test_util.run_deprecated_v1
def testInvalidShiftShape(self):
# The shift should be a scalar or 1-D, checked in shape function.
with self.assertRaisesRegexp(
ValueError, "Shape must be at most rank 1 but is rank 2"):
manip_ops.roll([[1, 2], [3, 4]], [[0, 1]], 1)
@test_util.run_deprecated_v1
def testRollShiftMustBeScalarOrVectorRaises(self):
# The shift should be a scalar or 1-D, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = array_ops.placeholder(dtype=dtypes.int32)
axis = 1
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"shift must be a scalar or a 1-D vector"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [[0, 1]]})
@test_util.run_deprecated_v1
def testInvalidShiftAndAxisNotEqualShape(self):
# The shift and axis must be same size, checked in shape function.
with self.assertRaisesRegexp(ValueError, "both shapes must be equal"):
manip_ops.roll([[1, 2], [3, 4]], [1], [0, 1])
@test_util.run_deprecated_v1
def testRollShiftAndAxisMustBeSameSizeRaises(self):
# The shift and axis must be same size, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = array_ops.placeholder(dtype=dtypes.int32)
axis = [0, 1]
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"shift and axis must have the same size"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [1]})
def testRollAxisOutOfRangeRaises(self):
tensor = [1, 2]
shift = 1
axis = 1
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of range"):
manip_ops.roll(tensor, shift, axis).eval()
if __name__ == "__main__":
test_lib.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/manip_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeCSV op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class DecodeCSVOpTest(test.TestCase):
def _test(self, args, expected_out=None, expected_err_re=None):
if expected_err_re is None:
decode = parsing_ops.decode_csv(**args)
out = self.evaluate(decode)
for i, field in enumerate(out):
if field.dtype == np.float32 or field.dtype == np.float64:
self.assertAllClose(field, expected_out[i])
else:
self.assertAllEqual(field, expected_out[i])
else:
with self.assertRaisesOpError(expected_err_re):
decode = parsing_ops.decode_csv(**args)
self.evaluate(decode)
def testSimple(self):
args = {
"records": ["1", "2", '"3"'],
"record_defaults": [[1]],
}
expected_out = [[1, 2, 3]]
self._test(args, expected_out)
def testSimpleWithScalarDefaults(self):
args = {
"records": ["1,4", "2,5", "3,6"],
"record_defaults": [1, 2],
}
expected_out = [[1, 2, 3], [4, 5, 6]]
self._test(args, expected_out)
def testSimpleWith2DDefaults(self):
args = {
"records": ["1", "2", "3"],
"record_defaults": [[[0]]],
}
if context.executing_eagerly():
err_spec = errors.InvalidArgumentError, (
"Each record default should be at "
"most rank 1.")
else:
err_spec = ValueError, "Shape must be at most rank 1 but is rank 2"
with self.assertRaisesWithPredicateMatch(*err_spec):
self._test(args)
def testSimpleNoQuoteDelimiter(self):
args = {
"records": ["1", "2", '"3"'],
"record_defaults": [[""]],
"use_quote_delim": False,
}
expected_out = [[b"1", b"2", b'"3"']]
self._test(args, expected_out)
def testScalar(self):
args = {"records": '1,""', "record_defaults": [[3], [4]]}
expected_out = [1, 4]
self._test(args, expected_out)
def test2D(self):
args = {"records": [["1", "2"], ['""', "4"]], "record_defaults": [[5]]}
expected_out = [[[1, 2], [5, 4]]]
self._test(args, expected_out)
def test2DNoQuoteDelimiter(self):
args = {
"records": [["1", "2"], ['""', '"']],
"record_defaults": [[""]],
"use_quote_delim": False
}
expected_out = [[[b"1", b"2"], [b'""', b'"']]]
self._test(args, expected_out)
def testDouble(self):
args = {
"records": ["1.0", "-1.79e+308", '"1.79e+308"'],
"record_defaults": [np.array([], dtype=np.double)],
}
expected_out = [[1.0, -1.79e+308, 1.79e+308]]
self._test(args, expected_out)
def testInt64(self):
args = {
"records": ["1", "2", '"2147483648"'],
"record_defaults": [np.array([], dtype=np.int64)],
}
expected_out = [[1, 2, 2147483648]]
self._test(args, expected_out)
def testComplexString(self):
args = {
"records": ['"1.0"', '"ab , c"', '"a\nbc"', '"ab""c"', " abc "],
"record_defaults": [["1"]]
}
expected_out = [[b"1.0", b"ab , c", b"a\nbc", b'ab"c', b" abc "]]
self._test(args, expected_out)
def testMultiRecords(self):
args = {
"records": ["1.0,4,aa", "0.2,5,bb", "3,6,cc"],
"record_defaults": [[1.0], [1], ["aa"]]
}
expected_out = [[1.0, 0.2, 3], [4, 5, 6], [b"aa", b"bb", b"cc"]]
self._test(args, expected_out)
def testNA(self):
args = {
"records": ["2.0,NA,aa", "NA,5,bb", "3,6,NA"],
"record_defaults": [[0.0], [0], [""]],
"na_value": "NA"
}
expected_out = [[2.0, 0.0, 3], [0, 5, 6], [b"aa", b"bb", b""]]
self._test(args, expected_out)
def testWithDefaults(self):
args = {
"records": [",1,", "0.2,3,bcd", "3.0,,"],
"record_defaults": [[1.0], [0], ["a"]]
}
expected_out = [[1.0, 0.2, 3.0], [1, 3, 0], [b"a", b"bcd", b"a"]]
self._test(args, expected_out)
def testWithDefaultsAndNoQuoteDelimiter(self):
args = {
"records": [",1,", "0.2,3,bcd", '3.0,,"'],
"record_defaults": [[1.0], [0], ["a"]],
"use_quote_delim": False,
}
expected_out = [[1.0, 0.2, 3.0], [1, 3, 0], [b"a", b"bcd", b"\""]]
self._test(args, expected_out)
def testWithTabDelim(self):
args = {
"records": ["1\t1", "0.2\t3", "3.0\t"],
"record_defaults": [[1.0], [0]],
"field_delim": "\t"
}
expected_out = [[1.0, 0.2, 3.0], [1, 3, 0]]
self._test(args, expected_out)
def testWithoutDefaultsError(self):
args = {
"records": [",1", "0.2,3", "3.0,"],
"record_defaults": [[1.0], np.array([], dtype=np.int32)]
}
self._test(
args, expected_err_re="Field 1 is required but missing in record 2!")
def testWrongFieldIntError(self):
args = {
"records": [",1", "0.2,234a", "3.0,2"],
"record_defaults": [[1.0], np.array([], dtype=np.int32)]
}
self._test(
args, expected_err_re="Field 1 in record 1 is not a valid int32: 234a")
def testOutOfRangeError(self):
args = {
"records": ["1", "9999999999999999999999999", "3"],
"record_defaults": [[1]]
}
self._test(
args, expected_err_re="Field 0 in record 1 is not a valid int32: ")
def testWrongFieldFloatError(self):
args = {
"records": [",1", "0.2,2", "3.0adf,3"],
"record_defaults": [[1.0], np.array([], dtype=np.int32)]
}
self._test(
args, expected_err_re="Field 0 in record 2 is not a valid float: ")
def testWrongFieldStringError(self):
args = {"records": ['"1,a,"', "0.22", 'a"bc'], "record_defaults": [["a"]]}
self._test(
args, expected_err_re="Unquoted fields cannot have quotes/CRLFs inside")
def testWrongDefaults(self):
args = {"records": [",1", "0.2,2", "3.0adf,3"], "record_defaults": [[1.0]]}
self._test(args, expected_err_re="Expect 1 fields but have 2 in record 0")
def testShortQuotedString(self):
args = {
"records": ["\""],
"record_defaults": [["default"]],
}
self._test(
args, expected_err_re="Quoted field has to end with quote followed.*")
def testSelectCols(self):
args = {
"records": [",,", "4,5,6"],
"record_defaults": [[1], [2]],
"select_cols": [0, 1]
}
expected_out = [[1, 4], [2, 5]]
self._test(args, expected_out)
def testSelectColsInclLast(self):
# The last col is a edge-casey; add test for that
args = {
"records": [",,", "4,5,6"],
"record_defaults": [[0], [1], [2]],
"select_cols": [0, 1, 2]
}
expected_out = [[0, 4], [1, 5], [2, 6]]
self._test(args, expected_out)
def testWrongSelectColsInclLast(self):
# The last col is a edge-casey; add test for that
args = {
"records": [",,", "4,5,6"],
"record_defaults": [[0], [1], [2]],
"select_cols": [0, 1, 3]
}
self._test(args, expected_err_re="Expect 3 fields but have 2 in record 0")
def testWrongSelectColsLen(self):
args = {
"records": ["1,2,3", "4,5,6"],
"record_defaults": [[0], [0], [0]],
"select_cols": [0]
}
with self.assertRaisesWithPredicateMatch(
ValueError, "Length of select_cols and record_defaults do not match."):
self._test(args)
def testWrongSelectColsSorting(self):
args = {
"records": ["1,2,3"],
"record_defaults": [[0], [1]],
"select_cols": [1, 0]
}
with self.assertRaisesWithPredicateMatch(
ValueError, "select_cols is not strictly increasing."):
self._test(args)
def testWrongSelectColsIndicesNegative(self):
args = {
"records": ["1,2,3"],
"record_defaults": [[0], [1]],
"select_cols": [-1, 0] # -1 is not a valid index
}
with self.assertRaisesWithPredicateMatch(
ValueError, "select_cols contains negative values."):
self._test(args)
def testWrongSelectColsIndicesTooHigh(self):
args = {
"records": ["1,2,3"],
"record_defaults": [[0], [1]],
"select_cols": [0, 3] # 3 is not a valid index
}
# Only successfully parses one of the columns
self._test(args, expected_err_re="Expect 2 fields but have 1 in record 0")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/decode_csv_op_test.py
|
tensorflow-master
|
tensorflow/python/kernel_tests/__init__.py
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for scan ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def numpy_reverse(x, axis):
length = len(x.shape)
if axis < 0:
axis = length + axis
ix = [
slice(None, None, -1) if i == axis else slice(None) for i in range(length)
]
return x[ix]
def handle_options(func, x, axis, exclusive, reverse):
"""Adds tf options to numpy scan ops."""
length = len(x.shape)
if axis < 0:
axis = length + axis
if reverse:
x = numpy_reverse(x, axis)
if exclusive:
ix_head = [slice(0, 1) if i == axis else slice(None) for i in range(length)]
ix_init = [
slice(0, -1) if i == axis else slice(None) for i in range(length)
]
if func == np.cumsum:
init = np.zeros_like(x[ix_head])
elif func == np.cumprod:
init = np.ones_like(x[ix_head])
else:
raise ValueError("Unknown scan function.")
x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)
else:
x = func(x, axis=axis)
if reverse:
x = numpy_reverse(x, axis)
return x
class CumsumTest(test.TestCase):
valid_dtypes = [
np.int32, np.int64, np.float16, np.float32, np.float64, np.complex64,
np.complex128
]
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.cached_session(use_gpu=True):
tf_out = math_ops.cumsum(x, axis, exclusive, reverse).eval()
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
@test_util.run_deprecated_v1
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True):
axis = constant_op.constant(0, axis_dtype)
tf_out = math_ops.cumsum(x, axis).eval()
@test_util.run_deprecated_v1
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123860949") # The computation is constant folded
def testLarge(self):
for dtype in self.valid_dtypes:
x = np.ones([1000000], dtype=dtype) / 1024
self._compareAll(x, 0)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = ops.convert_to_tensor(x)
with self.session(use_gpu=True):
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumsum(input_tensor, [0]).eval()
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(0, 50).reshape(shape).astype(np.float64)
with self.cached_session(use_gpu=True):
t = ops.convert_to_tensor(x)
result = math_ops.cumsum(t, axis, exclusive, reverse)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, shape, result, shape, x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient(self):
for axis in (-1, 0):
self._compareGradient([50], axis, False, False)
@test_util.run_deprecated_v1
def testGradientReverse(self):
for axis in (-1, 0):
self._compareGradient([50], axis, False, True)
@test_util.run_deprecated_v1
def testGradientExclusive(self):
for axis in (-1, 0):
self._compareGradient([50], axis, True, False)
@test_util.run_deprecated_v1
def testGradientExclusiveReverse(self):
for axis in (-1, 0):
self._compareGradient([50], axis, True, True)
@test_util.run_deprecated_v1
def testGradient2D(self):
for axis in (-1, 0, 1):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compareGradient([5, 10], axis, exclusive, reverse)
class CumprodTest(test.TestCase):
valid_dtypes = [
np.int32, np.int64, np.float16, np.float32, np.float64, np.complex64,
np.complex128
]
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.cached_session(use_gpu=True):
tf_out = math_ops.cumprod(x, axis, exclusive, reverse).eval()
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
@test_util.run_deprecated_v1
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True):
axis = constant_op.constant(0, axis_dtype)
tf_out = math_ops.cumprod(x, axis).eval()
@test_util.run_deprecated_v1
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = ops.convert_to_tensor(x)
with self.session(use_gpu=True):
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumprod(input_tensor, [0]).eval()
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(1, 9).reshape(shape).astype(np.float64)
with self.cached_session(use_gpu=True):
t = ops.convert_to_tensor(x)
result = math_ops.cumprod(t, axis, exclusive, reverse)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, shape, result, shape, x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient(self):
for axis in (-1, 0):
self._compareGradient([8], axis, False, False)
@test_util.run_deprecated_v1
def testGradientReverse(self):
for axis in (-1, 0):
self._compareGradient([8], axis, False, True)
@test_util.run_deprecated_v1
def testGradientExclusive(self):
for axis in (-1, 0):
self._compareGradient([8], axis, True, False)
@test_util.run_deprecated_v1
def testGradientExclusiveReverse(self):
for axis in (-1, 0):
self._compareGradient([8], axis, True, True)
@test_util.run_deprecated_v1
def testGradient2D(self):
for axis in (-2, -1, 0, 1):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compareGradient([2, 4], axis, exclusive, reverse)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/scan_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ExtractImagePatches gradient."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class ExtractImagePatchesGradTest(test.TestCase):
"""Gradient-checking for ExtractImagePatches op."""
_TEST_CASES = [
{
'in_shape': [2, 5, 5, 3],
'ksizes': [1, 1, 1, 1],
'strides': [1, 2, 3, 1],
'rates': [1, 1, 1, 1],
},
{
'in_shape': [2, 7, 7, 3],
'ksizes': [1, 3, 3, 1],
'strides': [1, 1, 1, 1],
'rates': [1, 1, 1, 1],
},
{
'in_shape': [2, 8, 7, 3],
'ksizes': [1, 2, 2, 1],
'strides': [1, 1, 1, 1],
'rates': [1, 1, 1, 1],
},
{
'in_shape': [2, 7, 8, 3],
'ksizes': [1, 3, 2, 1],
'strides': [1, 4, 3, 1],
'rates': [1, 1, 1, 1],
},
{
'in_shape': [1, 15, 20, 3],
'ksizes': [1, 4, 3, 1],
'strides': [1, 1, 1, 1],
'rates': [1, 2, 4, 1],
},
{
'in_shape': [2, 7, 8, 1],
'ksizes': [1, 3, 2, 1],
'strides': [1, 3, 2, 1],
'rates': [1, 2, 2, 1],
},
{
'in_shape': [2, 8, 9, 4],
'ksizes': [1, 2, 2, 1],
'strides': [1, 4, 2, 1],
'rates': [1, 3, 2, 1],
},
]
@test_util.run_deprecated_v1
def testGradient(self):
# Set graph seed for determinism.
random_seed = 42
random_seed_lib.set_random_seed(random_seed)
with self.cached_session():
for test_case in self._TEST_CASES:
np.random.seed(random_seed)
in_shape = test_case['in_shape']
in_val = constant_op.constant(
np.random.random(in_shape), dtype=dtypes.float32)
for padding in ['VALID', 'SAME']:
out_val = array_ops.extract_image_patches(in_val, test_case['ksizes'],
test_case['strides'],
test_case['rates'], padding)
out_shape = out_val.get_shape().as_list()
err = gradient_checker.compute_gradient_error(in_val, in_shape,
out_val, out_shape)
print('extract_image_patches gradient err: %.4e' % err)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testConstructGradientWithLargeImages(self):
batch_size = 4
height = 1024
width = 1024
ksize = 5
images = variable_scope.get_variable('inputs',
(batch_size, height, width, 1))
patches = array_ops.extract_image_patches(images,
ksizes=[1, ksize, ksize, 1],
strides=[1, 1, 1, 1],
rates=[1, 1, 1, 1],
padding='SAME')
# Github issue: #20146
# tf.image.extract_image_patches() gradient very slow at graph construction
# time
gradients = gradients_impl.gradients(patches, images)
# Won't time out.
self.assertIsNotNone(gradients)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/extract_image_patches_grad_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReorder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SparseSliceOpTest(test.TestCase):
def _SparseTensor_4x6(self, val_dtype=np.int64):
# [0 | |2 | |4 |5 ]
# [ |11| |13|14| ]
# [20| | |23| |25]
# [30| |32|33| |35]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1,
4], [2, 0],
[2, 3], [2, 5], [3, 0], [3, 2], [3, 3], [3, 5]]).astype(
np.int64)
val = np.array([0, 2, 4, 5, 11, 13, 14, 20, 23, 25, 30, 32, 33, 35]).astype(
val_dtype)
shape = np.array([4, 6]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensor_5x7(self):
# [0 | |2 | |4 |5 | ]
# [ |11| |13|14| |16]
# [20| | |23| |25| ]
# [30| |32|33| |35| ]
# [ |41| | |44| |46]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[1, 6], [2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5], [4, 1], [4, 4], [4, 6]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25, 30, 32, 33, 35, 41, 44,
46]).astype(np.int64)
shape = np.array([5, 7]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensorValue_3x4x2(self):
# slice(:,:, 0)
# ['a0'| |'b0'| ]
# [ |'c0'| |'d0']
# [ | |'e0'| ]
# slice(:,:, 1)
# ['a1'| |'b1'| ]
# [ |'c1'| |'d1']
# [ | |'e1'| ]
ind = np.array([[0, 0, 0], [0, 0, 1], [0, 2, 0], [0, 2, 1], [1, 1, 0],
[1, 1, 1], [1, 3, 0], [1, 3, 1], [2, 2, 0], [2, 2,
1]]).astype(
np.int64)
val = np.array(['a0', 'a1', 'b0', 'b1', 'c0', 'c1', 'd0', 'd1', 'e0', 'e1'])
shape = np.array([3, 4, 2]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensor_3x4x2(self):
return sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_3x4x2())
@test_util.run_deprecated_v1
def testSliceMatrixRows(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_4x6()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [2, 6])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [2, 0], [3, 7])
self.assertAllEqual(
sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 4, 5, 11, 13, 14])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [2, 6])
self.assertAllEqual(
sp_tensor1.indices.eval(),
[[0, 0], [0, 3], [0, 5], [1, 0], [1, 2], [1, 3], [1, 5]])
self.assertAllEqual(sp_tensor1.values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 6])
@test_util.run_deprecated_v1
def testSliceMatrixUnevenCols(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_5x7()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [5, 3])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 3], [5, 2])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 5], [5, 2])
self.assertAllEqual(
sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [1, 1], [2, 0], [3, 0], [3, 2], [4, 1]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 11, 20, 30, 32, 41])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [5, 3])
self.assertAllEqual(sp_tensor1.indices.eval(),
[[0, 1], [1, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor1.values.eval(), [4, 13, 14, 23, 33, 44])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor2.indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor2.values.eval(), [5, 16, 25, 35, 46])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [5, 2])
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [5, 2])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 2], [5, 2])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 2])
sp_tensor3 = sparse_ops.sparse_slice(sp_input, [0, 6], [5, 2])
self.assertAllEqual(sp_tensor0.indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 11, 20, 30, 41])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor1.indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sp_tensor1.values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor2.indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1], [4, 0]])
self.assertAllEqual(sp_tensor2.values.eval(), [4, 5, 14, 25, 35, 44])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor3.indices.eval(), [[1, 0], [4, 0]])
self.assertAllEqual(sp_tensor3.values.eval(), [16, 46])
self.assertAllEqual(sp_tensor3.dense_shape.eval(), [5, 1])
@test_util.run_deprecated_v1
def testSliceMatrixUnevenRows(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_5x7()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [3, 7])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [3, 0], [3, 7])
self.assertAllEqual(sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6], [2, 0], [2, 3], [2, 5]])
self.assertAllEqual(sp_tensor0.values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [3, 7])
self.assertAllEqual(
sp_tensor1.indices.eval(),
[[0, 0], [0, 2], [0, 3], [0, 5], [1, 1], [1, 4], [1, 6]])
self.assertAllEqual(sp_tensor1.values.eval(),
[30, 32, 33, 35, 41, 44, 46])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 7])
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [2, 7])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [2, 0], [2, 7])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [4, 0], [2, 7])
self.assertAllEqual(
sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4], [1, 6]])
self.assertAllEqual(sp_tensor0.values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensor1.values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensor2.indices.eval(), [[0, 1], [0, 4], [0, 6]])
self.assertAllEqual(sp_tensor2.values.eval(), [41, 44, 46])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [1, 7])
return
@test_util.run_deprecated_v1
def testSliceAllRows(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_4x6()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [1, 6])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [1, 0], [1, 6])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [2, 0], [1, 7])
sp_tensor3 = sparse_ops.sparse_slice(sp_input, [3, 0], [2, 7])
self.assertAllEqual(sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 4, 5])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensor1.indices.eval(), [[0, 1], [0, 3], [0, 4]])
self.assertAllEqual(sp_tensor1.values.eval(), [11, 13, 14])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensor2.indices.eval(), [[0, 0], [0, 3], [0, 5]])
self.assertAllEqual(sp_tensor2.values.eval(), [20, 23, 25])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensor3.indices.eval(),
[[0, 0], [0, 2], [0, 3], [0, 5]])
self.assertAllEqual(sp_tensor3.values.eval(), [30, 32, 33, 35])
self.assertAllEqual(sp_tensor3.dense_shape.eval(), [1, 6])
@test_util.run_deprecated_v1
def testSliceColumns(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_4x6()
sparse_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [4, 2])
sparse_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 2], [5, 2])
sparse_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 3])
self.assertAllEqual(sparse_tensor0.indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor0.values.eval(), [0, 11, 20, 30])
self.assertAllEqual(sparse_tensor0.dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensor1.indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sparse_tensor1.values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sparse_tensor1.dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensor2.indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1]])
self.assertAllEqual(sparse_tensor2.values.eval(), [4, 5, 14, 25, 35])
self.assertAllEqual(sparse_tensor2.dense_shape.eval(), [4, 2])
@test_util.run_deprecated_v1
def testSliceAllColumns(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_4x6()
sparse_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [4, 1])
sparse_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 1], [4, 1])
sparse_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 2], [4, 1])
sparse_tensor3 = sparse_ops.sparse_slice(sp_input, [0, 3], [4, 1])
sparse_tensor4 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 1])
sparse_tensor5 = sparse_ops.sparse_slice(sp_input, [0, 5], [6, 3])
self.assertAllEqual(sparse_tensor0.indices.eval(),
[[0, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor0.values.eval(), [0, 20, 30])
self.assertAllEqual(sparse_tensor0.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor1.indices.eval(), [[1, 0]])
self.assertAllEqual(sparse_tensor1.values.eval(), [11])
self.assertAllEqual(sparse_tensor1.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor2.indices.eval(), [[0, 0], [3, 0]])
self.assertAllEqual(sparse_tensor2.values.eval(), [2, 32])
self.assertAllEqual(sparse_tensor2.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor3.indices.eval(),
[[1, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor3.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor3.values.eval(), [13, 23, 33])
self.assertAllEqual(sparse_tensor4.indices.eval(), [[0, 0], [1, 0]])
self.assertAllEqual(sparse_tensor4.values.eval(), [4, 14])
self.assertAllEqual(sparse_tensor4.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor5.indices.eval(),
[[0, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor5.values.eval(), [5, 25, 35])
self.assertAllEqual(sparse_tensor5.dense_shape.eval(), [4, 1])
@test_util.run_deprecated_v1
def testGradients(self):
sp_input = self._SparseTensor_4x6(val_dtype=np.float32)
start_and_size = [([0, 0], [4, 2]),
([0, 2], [5, 2]),
([0, 4], [5, 3])]
with self.session(use_gpu=False):
for start, size in start_and_size:
sp_output = sparse_ops.sparse_slice(sp_input, start, size)
nnz_in = len(sp_input.values.eval())
nnz_out = len(sp_output.values.eval())
err = gradient_checker.compute_gradient_error(
[sp_input.values], [(nnz_in,)], sp_output.values, (nnz_out,))
self.assertLess(err, 1e-3)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_slice_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for record_input_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.framework import test_util
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RecordInputOpTest(test.TestCase):
def generateTestData(self,
prefix,
n,
m,
compression_type=tf_record.TFRecordCompressionType.NONE):
options = tf_record.TFRecordOptions(compression_type)
for i in range(n):
f = os.path.join(self.get_temp_dir(), prefix + "." + str(i))
w = tf_record.TFRecordWriter(f, options=options)
for j in range(m):
w.write("{0:0{width}}".format(i * m + j, width=10).encode("utf-8"))
w.close()
def testRecordInputSimple(self):
with self.cached_session() as sess:
self.generateTestData("basic", 1, 1)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input").get_yield_op()
self.assertEqual(self.evaluate(yield_op), b"0000000000")
def testRecordInputSimpleGzip(self):
with self.cached_session() as sess:
self.generateTestData(
"basic",
1,
1,
compression_type=tf_record.TFRecordCompressionType.GZIP)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=tf_record.TFRecordCompressionType.GZIP).get_yield_op(
)
self.assertEqual(self.evaluate(yield_op), b"0000000000")
def testRecordInputSimpleZlib(self):
with self.cached_session() as sess:
self.generateTestData(
"basic",
1,
1,
compression_type=tf_record.TFRecordCompressionType.ZLIB)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=tf_record.TFRecordCompressionType.ZLIB).get_yield_op(
)
self.assertEqual(self.evaluate(yield_op), b"0000000000")
@test_util.run_deprecated_v1
def testRecordInputEpochs(self):
files = 100
records_per_file = 100
batches = 2
with self.cached_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = self.evaluate(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
@test_util.run_deprecated_v1
def testDoesNotDeadlock(self):
# Iterate multiple times to cause deadlock if there is a chance it can occur
for _ in range(30):
with self.cached_session() as sess:
self.generateTestData("basic", 1, 1)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=100,
batch_size=1,
name="record_input")
yield_op = records.get_yield_op()
for _ in range(50):
self.evaluate(yield_op)
@test_util.run_deprecated_v1
def testEmptyGlob(self):
with self.cached_session() as sess:
record_input = data_flow_ops.RecordInput(file_pattern="foo")
yield_op = record_input.get_yield_op()
self.evaluate(variables.global_variables_initializer())
with self.assertRaises(NotFoundError):
self.evaluate(yield_op)
@test_util.run_deprecated_v1
def testBufferTooSmall(self):
files = 10
records_per_file = 10
batches = 2
with self.cached_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = self.evaluate(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/record_input_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the binary ops priority mechanism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.platform import test as test_lib
class TensorPriorityTest(test_lib.TestCase):
def testSupportedRhsWithoutDelegation(self):
class NumpyArraySubclass(np.ndarray):
pass
supported_rhs_without_delegation = (3, 3.0, [1.0, 2.0], np.array(
[1.0, 2.0]), NumpyArraySubclass(
shape=(1, 2), buffer=np.array([1.0, 2.0])),
ops.convert_to_tensor([[1.0, 2.0]]))
for rhs in supported_rhs_without_delegation:
tensor = ops.convert_to_tensor([[10.0, 20.0]])
res = tensor + rhs
self.assertIsInstance(res, ops.Tensor)
def testUnsupportedRhsWithoutDelegation(self):
class WithoutReverseAdd(object):
pass
tensor = ops.convert_to_tensor([[10.0, 20.0]])
rhs = WithoutReverseAdd()
with self.assertRaisesWithPredicateMatch(
TypeError, lambda e: "Expected float" in str(e)):
# pylint: disable=pointless-statement
tensor + rhs
def testUnsupportedRhsWithDelegation(self):
class WithReverseAdd(object):
def __radd__(self, lhs):
return "Works!"
tensor = ops.convert_to_tensor([[10.0, 20.0]])
rhs = WithReverseAdd()
res = tensor + rhs
self.assertEqual(res, "Works!")
def testFullDelegationControlUsingRegistry(self):
class NumpyArraySubclass(np.ndarray):
def __radd__(self, lhs):
return "Works!"
def raise_to_delegate(value, dtype=None, name=None, as_ref=False):
del value, dtype, name, as_ref # Unused.
raise TypeError
ops.register_tensor_conversion_function(
NumpyArraySubclass, raise_to_delegate, priority=0)
tensor = ops.convert_to_tensor([[10.0, 20.0]])
rhs = NumpyArraySubclass(shape=(1, 2), buffer=np.array([1.0, 2.0]))
res = tensor + rhs
self.assertEqual(res, "Works!")
if __name__ == "__main__":
test_lib.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/tensor_priority_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Transpose op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class TransposeTest(test.TestCase):
def _np_transpose(self, x, perm):
ret = np.copy(x)
ret = ret.transpose(perm)
return ret
def _compareCpu(self, x, p, conjugate=False):
if p is None:
rank = x.ndim
perm = (rank - 1) - np.arange(rank)
else:
perm = p
np_ans = self._np_transpose(x, perm)
if conjugate:
np_ans = np.conj(np_ans)
with self.cached_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
y = array_ops.transpose(inx, p, conjugate=conjugate)
tf_ans = self.evaluate(y)
self.assertShapeEqual(np_ans, y)
self.assertAllEqual(np_ans, tf_ans)
jacob_t = None
# Gradient check on CPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype in [np.float32, np.complex64]:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype in [np.float64, np.complex128]:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
def _compareGpu(self, x, p, conjugate=False):
if p is None:
rank = x.ndim
perm = (rank - 1) - np.arange(rank)
else:
perm = p
np_ans = self._np_transpose(x, perm)
if conjugate:
np_ans = np.conj(np_ans)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
y = array_ops.transpose(inx, p, conjugate=conjugate)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
jacob_t = None
# Gradient check on GPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype == np.float32:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype == np.float64:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
def _compare(self, x, use_gpu=False):
n = np.ndim(x)
# generate all permutations of [0, 1, ... n-1] in random order.
all_perm = np.random.permutation(
[p for p in itertools.permutations(range(n))]).astype(np.int32)
cs = [False, True] if x.dtype in [np.complex64, np.complex128] else [False]
for c in cs:
for p in all_perm[:2]:
self._compareCpu(x, p, conjugate=c)
if use_gpu:
self._compareGpu(x, p, conjugate=c)
# Test with an empty permutation
for c in cs:
self._compareCpu(x, None, conjugate=c)
if use_gpu:
self._compareGpu(x, None, conjugate=c)
def _compare_cpu_gpu(self, x):
n = np.ndim(x)
# generate all permutation of [0, 1, ... n-1] in random order,
# choose the first two.
perms = itertools.permutations(range(n))
for _ in range(2):
p = np.random.permutation(next(perms)).astype(np.int32)
tf_a_cpu, tf_g_cpu = self._compareCpu(x, p)
tf_a_gpu, tf_g_gpu = self._compareGpu(x, p)
assert tf_g_cpu is not None
assert tf_g_gpu is not None
if x.dtype == np.float32:
self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-3, 1e-3)
self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-3, 1e-3)
elif x.dtype == np.float64:
self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-6, 1e-6)
self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-6, 1e-6)
def _testBoth(self, x):
self._compare(x, use_gpu=False)
self._compare(x, use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testRank1(self):
self._compareCpu(np.arange(0., 2), [0])
def test1D(self):
vector = np.arange(0, 2).reshape((1, 1, 1, 2, 1))
self._compare(vector, use_gpu=False)
self._compare(vector, use_gpu=True)
def test5DGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
large_shapes = [[4, 10, 10, 10, 3], [4, 10, 10, 10, 8], [4, 10, 10, 10, 13],
[4, 3, 10, 10, 10], [4, 8, 10, 10, 10], [4, 13, 10, 10,
10]] * 3
perms = [[0, 4, 1, 2, 3]] * 3 + [[0, 2, 3, 4, 1]] * 3 + [[
4, 1, 2, 3, 0
]] * 6 + [[1, 2, 3, 4, 0]] * 6
datatypes = [np.int8, np.float16, np.float32, np.float64, np.complex128]
for datatype in datatypes:
for input_shape, perm in zip(large_shapes, perms):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
def test4DGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
large_shapes = [[4, 10, 10, 3], [4, 10, 10, 8], [4, 10, 10, 13],
[4, 3, 10, 10], [4, 8, 10, 10], [4, 13, 10, 10]] * 3
perms = [[0, 3, 1, 2]] * 3 + [[0, 2, 3, 1]] * 3 + [[3, 1, 2, 0]] * 6 + [[
1, 2, 3, 0
]] * 3 + [[2, 3, 0, 1]] * 3
for input_shape, perm in zip(large_shapes, perms):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
# shapes related to Inception (taken from conv_ops_test.py)
inception_shapes = [[4, 5, 5, 124], [4, 8, 8, 38], [4, 8, 8, 38], [
4, 8, 8, 204
], [4, 8, 8, 44], [4, 8, 8, 204], [4, 8, 8, 204], [4, 8, 8, 204], [
4, 8, 8, 176
], [4, 8, 8, 176], [4, 8, 8, 176], [4, 8, 8, 176], [4, 17, 17, 19], [
4, 17, 17, 19
], [4, 17, 17, 124], [4, 17, 17, 12], [4, 17, 17, 124], [4, 17, 17, 22], [
4, 17, 17, 19
], [4, 17, 17, 19], [4, 17, 17, 121], [4, 17, 17, 121], [4, 17, 17, 22], [
4, 17, 17, 19
], [4, 17, 17, 19], [4, 17, 17, 115], [4, 17, 17, 115], [4, 17, 17, 19], [
4, 17, 17, 16
], [4, 17, 17, 115], [4, 17, 17, 102], [4, 17, 17, 12], [4, 17, 17, 102], [
4, 17, 17, 12
], [4, 17, 17, 102], [4, 17, 17, 12], [4, 17, 17, 76], [4, 17, 17, 12], [
4, 17, 17, 12
], [4, 17, 17, 76], [4, 17, 17, 76], [4, 35, 35, 9], [4, 35, 35, 28], [
4, 35, 35, 6
], [4, 35, 35, 28], [4, 35, 35, 25], [4, 35, 35, 4], [4, 35, 35, 25],
[4, 35, 35, 9], [4, 35, 35, 19], [4, 35, 35, 19],
[4, 35, 35, 19], [4, 73, 73, 6], [4, 73, 73,
6], [4, 147, 147, 2]]
for input_shape in inception_shapes:
perm = [0, 3, 1, 2]
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
def test3DGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
datatypes = [np.int8, np.float16, np.float32, np.float64, np.complex128]
large_shapes = [[4, 1000, 3], [4, 1000, 8], [4, 1000, 13], [4, 3, 1000],
[4, 8, 1000], [4, 13, 1000]] * 3
perms = [[0, 2, 1]] * 6 + [[2, 1, 0]] * 6 + [[1, 2, 0]] * 3 + [[2, 0, 1]
] * 3
for datatype in datatypes:
for input_shape, perm in zip(large_shapes, perms):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
def testLargeSizeGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
large_shapes = [[1000000, 31, 3], [3, 1000000, 31], [3, 31, 1000000],
[10000, 310, 3], [3, 10000, 310], [3, 310, 10000],
[2, 1000, 1000], [1000, 2, 1000], [1000, 1000, 2]]
perms = [[0, 2, 1]] * 9
for input_shape, perm in zip(large_shapes, perms):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
def testRandomizedSmallDimLargeSizeGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
# Draw 10 random shapes with large dimension sizes.
# 40% prob to generate dim[0] size within [1, 2047]
# 40% prob to generate dim[0] size within [2048, 4095]
# 20% prob to generate dim[0] size within [4096, 100000]
# 50% prob to use dim[1] as the small dim (<16)
num_samples = 10
total_size = 500000
small_size_limit = 2048
large_size_limit = 95905
small_size_percentage = 0.4
medium_size_percentage = 0.4
large_size_percentage = 0.2
perms = [[0, 2, 1]] * num_samples
dim_zero_sizes = []
dim_zero_sizes += list(
np.random.randint(
small_size_limit, size=int(small_size_percentage * num_samples)) +
1)
dim_zero_sizes += list(
np.random.randint(
small_size_limit, size=int(medium_size_percentage * num_samples)) +
small_size_limit)
dim_zero_sizes += list(
np.random.randint(
large_size_limit, size=int(large_size_percentage * num_samples)) +
small_size_limit * 2)
input_shapes = []
small_dim_limit = 16
for dim_zero_size in dim_zero_sizes:
small_dim_size = np.random.randint(small_dim_limit - 1) + 1
large_dim_size = int(
total_size / dim_zero_size / small_dim_size) + small_dim_limit
input_shapes += ([[dim_zero_size, small_dim_size, large_dim_size]]
if np.random.randint(2) else
[[dim_zero_size, large_dim_size, small_dim_size]])
for input_shape, perm in zip(input_shapes, perms):
# generate input data with random ints from 0 to 9.
inp = np.random.randint(10, size=input_shape)
np_ans = self._np_transpose(inp, perm)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
self._ClearCachedSession()
@test_util.run_v1_only("b/120545219")
def testNop(self):
self._compareCpu(np.arange(0, 6).reshape([3, 2]).astype(np.float32), [0, 1])
@test_util.run_v1_only("b/120545219")
def testSimple(self):
self._compareCpu(
np.arange(0, 8).reshape([2, 4]).astype(np.float32),
np.array([1, 0]).astype(np.int32))
def testPermType(self):
for perm_dtype in [np.int64, np.int32]:
x = np.arange(0, 8).reshape([2, 4]).astype(np.float32)
p = np.array([1, 0]).astype(perm_dtype)
np_ans = np.copy(x).transpose(p)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
inp = constant_op.constant(p)
y = array_ops.transpose(inx, inp)
tf_ans = self.evaluate(y)
self.assertShapeEqual(np_ans, y)
self.assertAllEqual(np_ans, tf_ans)
def testHalf(self):
self._compare(np.arange(0, 21).reshape([3, 7]).astype(np.float16))
self._compare(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float16))
self._compare(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float16))
@test_util.run_v1_only("b/120545219")
def testFloat(self):
self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float32))
self._compare_cpu_gpu(
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32))
self._compare_cpu_gpu(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float32))
@test_util.run_v1_only("b/120545219")
def testDouble(self):
self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float64))
self._compare_cpu_gpu(
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float64))
self._compare_cpu_gpu(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float64))
@test_util.run_v1_only("b/120545219")
def testComplex64(self):
self._testBoth(
np.complex(1, 2) *
np.arange(0, 21).reshape([3, 7]).astype(np.complex64))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex64))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex64))
@test_util.run_v1_only("b/120545219")
def testComplex128(self):
self._testBoth(
np.complex(1, 2) *
np.arange(0, 21).reshape([3, 7]).astype(np.complex128))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex128))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex128))
def testInt8(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int8))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int8))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int8))
def testInt16(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int16))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int16))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int16))
def testInt32(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int32))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int32))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int32))
def testInt64(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int64))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int64))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int64))
@test_util.run_v1_only("b/120545219")
def testTranspose2DAuto(self):
x_np = [[1, 2, 3], [4, 5, 6]]
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
x_tf = array_ops.transpose(x_np).eval()
self.assertAllEqual(x_tf, [[1, 4], [2, 5], [3, 6]])
@test_util.run_v1_only("b/120545219")
def testSingletonDims(self):
# A singleton dimension is a dimension i with shape[i] == 1. Such dimensions
# can be collapsed and expanded using reshape without changing the
# underlying data storage. If all non-singleton dimensions remain in
# ascending order, the shuffled singletons will be transposed by a reshape,
# saving a memory allocation & copy. Since this gets a special code-path in
# transpose_op.cc, we test that the codepath is exercised and the results
# are as expected; we do not test that we save the memory allocation and
# copy here.
for shape in [[2, 1, 2], [2, 1, 2, 1, 1, 2], [1, 2, 2, 1, 1, 1],
[1, 1, 1, 2, 2, 2], [2, 2, 1, 1, 1]]:
self._compare_cpu_gpu(
np.arange(np.prod(shape)).reshape(shape).astype(np.float32))
@test_util.run_v1_only("b/120545219")
def testTransposeShapes(self):
self.assertEqual(
[],
array_ops.transpose(array_ops.placeholder(
dtypes.int32, shape=[])).get_shape().dims)
self.assertEqual(
[100],
array_ops.transpose(array_ops.placeholder(
dtypes.int32, shape=[100])).get_shape().dims)
self.assertEqual(
[37, 100],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37])).get_shape().dims)
self.assertEqual(
[100, 37],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37]), [0, 1]).get_shape().dims)
self.assertEqual(
[15, 37, 100],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37, 15])).get_shape().dims)
self.assertEqual(
[15, 100, 37],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37, 15]), [2, 0, 1]).get_shape().dims)
self.assertEqual(
tensor_shape.TensorShape(None),
array_ops.transpose(array_ops.placeholder(dtypes.int32)).get_shape())
self.assertEqual(
tensor_shape.TensorShape(None),
array_ops.transpose(array_ops.placeholder(dtypes.int32),
[0]).get_shape())
@test_util.run_v1_only("b/120545219")
def testNullTensor(self):
with self.cached_session():
x = constant_op.constant([], dtype=dtypes.float32, shape=[1, 4, 0])
xt = array_ops.transpose(x, [0, 2, 1]).eval()
self.assertAllEqual(xt.shape, (1, 0, 4))
@test_util.run_v1_only("b/120545219")
def testScalar(self):
with self.cached_session():
x = constant_op.constant(42, dtype=dtypes.float32, shape=[])
xt = array_ops.transpose(x).eval()
self.assertAllEqual(xt, x)
def _testError(self, x, p, err):
with self.cached_session():
with self.assertRaisesOpError(err):
array_ops.transpose(x, p).eval()
@test_util.run_v1_only("b/120545219")
def testError(self):
with self.assertRaises(ValueError):
array_ops.transpose(
np.arange(0., 30).reshape([2, 3, 5]), [[0, 1], [2, 3]])
with self.assertRaises(ValueError):
array_ops.transpose(np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 3])
self._testError(
np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 1], "2 is missing")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/transpose_op_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg.linalg_impl.tridiagonal_matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class TridiagonalMulOpTest(test.TestCase):
def _testAllFormats(self,
superdiag,
maindiag,
subdiag,
rhs,
expected,
dtype=dtypes.float64):
superdiag_extended = np.pad(superdiag, [0, 1], 'constant')
subdiag_extended = np.pad(subdiag, [1, 0], 'constant')
diags_compact = np.stack([superdiag_extended, maindiag, subdiag_extended])
diags_matrix = np.diag(superdiag, 1) + np.diag(maindiag, 0) + np.diag(
subdiag, -1)
diags_sequence = (constant_op.constant(superdiag_extended, dtype),
constant_op.constant(maindiag, dtype),
constant_op.constant(subdiag_extended, dtype))
diags_compact = constant_op.constant(diags_compact, dtype)
diags_matrix = constant_op.constant(diags_matrix, dtype)
rhs = constant_op.constant(rhs, dtype)
rhs_batch = array_ops.stack([rhs, 2 * rhs])
diags_compact_batch = array_ops.stack([diags_compact, 2 * diags_compact])
diags_matrix_batch = array_ops.stack([diags_matrix, 2 * diags_matrix])
diags_sequence_batch = [array_ops.stack([x, 2 * x]) for x in diags_sequence]
results = [
linalg_impl.tridiagonal_matmul(
diags_sequence, rhs, diagonals_format='sequence'),
linalg_impl.tridiagonal_matmul(
diags_compact, rhs, diagonals_format='compact'),
linalg_impl.tridiagonal_matmul(
diags_matrix, rhs, diagonals_format='matrix')
]
results_batch = [
linalg_impl.tridiagonal_matmul(
diags_sequence_batch, rhs_batch, diagonals_format='sequence'),
linalg_impl.tridiagonal_matmul(
diags_compact_batch, rhs_batch, diagonals_format='compact'),
linalg_impl.tridiagonal_matmul(
diags_matrix_batch, rhs_batch, diagonals_format='matrix')
]
with self.cached_session(use_gpu=True):
results = self.evaluate(results)
results_batch = self.evaluate(results_batch)
expected = np.array(expected)
expected_batch = np.stack([expected, 4 * expected])
for result in results:
self.assertAllClose(result, expected)
for result in results_batch:
self.assertAllClose(result, expected_batch)
def _makeTridiagonalMatrix(self, superdiag, maindiag, subdiag):
super_pad = [[0, 0], [0, 1], [1, 0]]
sub_pad = [[0, 0], [1, 0], [0, 1]]
super_part = array_ops.pad(array_ops.matrix_diag(superdiag), super_pad)
main_part = array_ops.matrix_diag(maindiag)
sub_part = array_ops.pad(array_ops.matrix_diag(subdiag), sub_pad)
return super_part + main_part + sub_part
def _randomComplexArray(self, shape):
np.random.seed(43)
return (np.random.uniform(-10, 10, shape) +
np.random.uniform(-10, 10, shape) * 1j)
def _gradientTest(self, diags, rhs, dtype=dtypes.float64):
def reference_matmul(diags, rhs):
matrix = self._makeTridiagonalMatrix(diags[..., 0, :-1], diags[..., 1, :],
diags[..., 2, 1:])
return math_ops.matmul(matrix, rhs)
diags = constant_op.constant(diags, dtype=dtype)
rhs = constant_op.constant(rhs, dtype=dtype)
with self.cached_session(use_gpu=True):
grad_reference, _ = gradient_checker_v2.compute_gradient(
reference_matmul, [diags, rhs])
grad_theoretical, grad_numerical = gradient_checker_v2.compute_gradient(
linalg_impl.tridiagonal_matmul, [diags, rhs])
self.assertAllClose(grad_theoretical, grad_numerical)
self.assertAllClose(grad_theoretical, grad_reference)
def test1x1(self):
self._testAllFormats([], [2], [], [[1, 4]], [[2, 8]])
def test2x2(self):
self._testAllFormats([1], [2, 3], [4], [[2, 1], [4, 3]], [[8, 5], [20, 13]])
def test3x3(self):
for dtype in [dtypes.float32, dtypes.float64]:
self._testAllFormats([1, 2], [1, 2, 1], [2, 1], [[1, 1], [2, 2], [3, 3]],
[[3, 3], [12, 12], [5, 5]],
dtype=dtype)
def testComplex(self):
for dtype in [dtypes.complex64, dtypes.complex128]:
self._testAllFormats([1j, 1j], [1, -1, 0], [1j, 1j],
np.array([[1, 1j], [1, 1j], [1, 1j]]),
[[1 + 1j, -1 + 1j], [-1 + 2j, -2 - 1j], [1j, -1]],
dtype=dtype)
def testBatch(self):
b = 20
m = 10
n = 15
superdiag = self._randomComplexArray((b, m - 1))
maindiag = self._randomComplexArray((b, m))
subdiag = self._randomComplexArray((b, m - 1))
rhs = self._randomComplexArray((b, m, n))
matrix = np.stack([np.diag(superdiag[i], 1) + \
np.diag(maindiag[i], 0) + \
np.diag(subdiag[i], -1) for i in range(b)])
expected_result = np.matmul(matrix, rhs)
result = linalg_impl.tridiagonal_matmul(
constant_op.constant(matrix, dtype=dtypes.complex128),
constant_op.constant(rhs, dtype=dtypes.complex128),
diagonals_format='matrix')
with self.cached_session(use_gpu=True):
result = self.evaluate(result)
self.assertAllClose(result, expected_result)
def testGradientSmall(self):
self._gradientTest([[[1, 2, 0], [1, 2, 3], [0, 1, 2]]],
[[[1, 2], [3, 4], [5, 6]]],
dtype=dtypes.float64)
def testGradientComplexSmall(self):
self._gradientTest(
np.array([[[1 + 1j, 2j, 0], [1 + 2j, 2j, 3 + 0j], [0, 1j, 2 + 0j]]]),
np.array([[[1j, 2 + 0j], [3 + 1j, 4j], [5j, 6 + 3j]]]),
dtype=dtypes.complex128)
def testGradientComplexWithBatches(self):
b = 5
m = 10
n = 15
diags = self._randomComplexArray((b, 3, m))
rhs = self._randomComplexArray((b, m, n))
self._gradientTest(diags, rhs, dtype=dtypes.complex128)
# Benchmark
class TridiagonalMatMulBenchmark(test.Benchmark):
sizes = [(100000, 1, 1), (1000000, 1, 1), (10000000, 1, 1), (100000, 10, 1),
(100000, 100, 1), (10000, 1, 100), (10000, 1, 1000),
(10000, 1, 10000)]
def baseline(self, upper, diag, lower, vec):
diag_part = array_ops.expand_dims(diag, -1) * vec
lower_part = array_ops.pad(
array_ops.expand_dims(lower[:, 1:], -1) * vec[:, :-1, :],
[[0, 0], [1, 0], [0, 0]])
upper_part = array_ops.pad(
array_ops.expand_dims(upper[:, :-1], -1) * vec[:, 1:, :],
[[0, 0], [0, 1], [0, 0]])
return lower_part + diag_part + upper_part
def _generateData(self, batch_size, m, n, seed=42):
np.random.seed(seed)
data = np.random.normal(size=(batch_size, m, 3 + n))
return (variables.Variable(data[:, :, 0], dtype=dtypes.float64),
variables.Variable(data[:, :, 1], dtype=dtypes.float64),
variables.Variable(data[:, :, 2], dtype=dtypes.float64),
variables.Variable(data[:, :, 3:], dtype=dtypes.float64))
def benchmarkTridiagonalMulOp(self):
devices = [('/cpu:0', 'cpu')]
if test.is_gpu_available(cuda_only=True):
devices += [('/gpu:0', 'gpu')]
for device_option, size_option in itertools.product(devices, self.sizes):
device_id, device_name = device_option
m, batch_size, n = size_option
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device(device_id):
upper, diag, lower, vec = self._generateData(batch_size, m, n)
x1 = self.baseline(upper, diag, lower, vec)
x2 = linalg_impl.tridiagonal_matmul((upper, diag, lower),
vec,
diagonals_format='sequence')
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x1),
min_iters=10,
store_memory_usage=False,
name=('tridiagonal_matmul_baseline_%s'
'_batch_size_%d_m_%d_n_%d' %
(device_name, batch_size, m, n)))
self.run_op_benchmark(
sess,
control_flow_ops.group(x2),
min_iters=10,
store_memory_usage=False,
name=('tridiagonal_matmul_%s_batch_size_%d_m_%d_n_%d' %
(device_name, batch_size, m, n)))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for EncodeBase64 and DecodeBase64."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
@test_util.run_deprecated_v1
class Base64OpsTest(test_util.TensorFlowTestCase):
def setUp(self):
self._msg = array_ops.placeholder(dtype=dtypes.string)
self._encoded_f = string_ops.encode_base64(self._msg, pad=False)
self._decoded_f = string_ops.decode_base64(self._encoded_f)
self._encoded_t = string_ops.encode_base64(self._msg, pad=True)
self._decoded_t = string_ops.decode_base64(self._encoded_t)
def _RemovePad(self, msg, base64_msg):
if len(msg) % 3 == 1:
return base64_msg[:-2]
if len(msg) % 3 == 2:
return base64_msg[:-1]
return base64_msg
def _RunTest(self, msg, pad):
with self.cached_session() as sess:
if pad:
encoded, decoded = sess.run([self._encoded_t, self._decoded_t],
feed_dict={self._msg: msg})
else:
encoded, decoded = sess.run([self._encoded_f, self._decoded_f],
feed_dict={self._msg: msg})
if not isinstance(msg, (list, tuple)):
msg = [msg]
encoded = [encoded]
decoded = [decoded]
base64_msg = [base64.urlsafe_b64encode(m) for m in msg]
if not pad:
base64_msg = [self._RemovePad(m, b) for m, b in zip(msg, base64_msg)]
for i in range(len(msg)):
self.assertEqual(base64_msg[i], encoded[i])
self.assertEqual(msg[i], decoded[i])
def testWithPythonBase64(self):
for pad in (False, True):
self._RunTest(b"", pad=pad)
for _ in range(100):
length = np.random.randint(1024 * 1024)
msg = np.random.bytes(length)
self._RunTest(msg, pad=pad)
def testShape(self):
for pad in (False, True):
for _ in range(10):
msg = [np.random.bytes(np.random.randint(20))
for _ in range(np.random.randint(10))]
self._RunTest(msg, pad=pad)
# Zero-element, non-trivial shapes.
for _ in range(10):
k = np.random.randint(10)
msg = np.empty((0, k), dtype=bytes)
encoded = string_ops.encode_base64(msg, pad=pad)
decoded = string_ops.decode_base64(encoded)
with self.cached_session() as sess:
encoded_value, decoded_value = self.evaluate([encoded, decoded])
self.assertEqual(encoded_value.shape, msg.shape)
self.assertEqual(decoded_value.shape, msg.shape)
def testInvalidInput(self):
def try_decode(enc):
self._decoded_f.eval(feed_dict={self._encoded_f: enc})
with self.cached_session():
# Invalid length.
msg = np.random.bytes(99)
enc = base64.urlsafe_b64encode(msg)
with self.assertRaisesRegexp(errors.InvalidArgumentError, "1 modulo 4"):
try_decode(enc + b"a")
# Invalid char used in encoding.
msg = np.random.bytes(34)
enc = base64.urlsafe_b64encode(msg)
for i in range(len(msg)):
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"?" + enc[(i + 1):])
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"\x80" + enc[(i + 1):]) # outside ascii range.
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"+" + enc[(i + 1):]) # not url-safe.
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"/" + enc[(i + 1):]) # not url-safe.
# Partial padding.
msg = np.random.bytes(34)
enc = base64.urlsafe_b64encode(msg)
with self.assertRaises(errors.InvalidArgumentError):
# enc contains == at the end. Partial padding is not allowed.
try_decode(enc[:-1])
# Unnecessary padding.
msg = np.random.bytes(33)
enc = base64.urlsafe_b64encode(msg)
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc + b"==")
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc + b"===")
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc + b"====")
# Padding in the middle. (Previous implementation was ok with this as long
# as padding char location was 2 or 3 (mod 4).
msg = np.random.bytes(33)
enc = base64.urlsafe_b64encode(msg)
for i in range(len(msg) - 1):
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"=" + enc[(i + 1):])
for i in range(len(msg) - 2):
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"==" + enc[(i + 2):])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/base64_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
NAN = float('nan')
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_2d_sparse_value(labels):
"""Convert dense 2D binary indicator to sparse ID.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator, shape [batch_size, num_classes].
Returns:
`SparseTensorValue` of shape [batch_size, num_classes], where num_classes
is the number of `1` values in each row of `labels`. Values are indices
of `1` values along the last dimension of `labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_2d_label_to_1d_sparse_value(labels):
"""Convert dense 2D binary indicator to sparse ID.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator, shape [batch_size, num_classes]. Each
row must contain exactly 1 `1` value.
Returns:
`SparseTensorValue` of shape [batch_size]. Values are indices of `1` values
along the last dimension of `labels`.
Raises:
ValueError: if there is not exactly 1 `1` value per row of `labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
if indices != [[i] for i in range(len(labels))]:
raise ValueError('Expected 1 label/example, got %s.' % indices)
shape = [len(labels)]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_metric_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
test_case.assertEquals(
set(expected),
set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
def _test_values(shape):
return np.reshape(np.cumsum(np.ones(shape)), newshape=shape)
class MeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.mean(array_ops.ones([4, 3]))
_assert_metric_variables(self, ('mean/count:0', 'mean/total:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean(values)
self.evaluate(variables.local_variables_initializer())
for _ in range(4):
self.evaluate(update_op)
self.assertAlmostEqual(1.65, self.evaluate(mean), 5)
@test_util.run_deprecated_v1
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean(values)
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, self.evaluate(update_op), 5)
self.assertAlmostEqual(1.475, self.evaluate(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, self.evaluate(update_op), 5)
self.assertAlmostEqual(1.65, self.evaluate(update_op), 5)
self.assertAlmostEqual(1.65, self.evaluate(mean), 5)
@test_util.run_deprecated_v1
def testUnweighted(self):
values = _test_values((3, 2, 4, 1))
mean_results = (
metrics.mean(values),
metrics.mean(values, weights=1.0),
metrics.mean(values, weights=np.ones((1, 1, 1))),
metrics.mean(values, weights=np.ones((1, 1, 1, 1))),
metrics.mean(values, weights=np.ones((1, 1, 1, 1, 1))),
metrics.mean(values, weights=np.ones((1, 1, 4))),
metrics.mean(values, weights=np.ones((1, 1, 4, 1))),
metrics.mean(values, weights=np.ones((1, 2, 1))),
metrics.mean(values, weights=np.ones((1, 2, 1, 1))),
metrics.mean(values, weights=np.ones((1, 2, 4))),
metrics.mean(values, weights=np.ones((1, 2, 4, 1))),
metrics.mean(values, weights=np.ones((3, 1, 1))),
metrics.mean(values, weights=np.ones((3, 1, 1, 1))),
metrics.mean(values, weights=np.ones((3, 1, 4))),
metrics.mean(values, weights=np.ones((3, 1, 4, 1))),
metrics.mean(values, weights=np.ones((3, 2, 1))),
metrics.mean(values, weights=np.ones((3, 2, 1, 1))),
metrics.mean(values, weights=np.ones((3, 2, 4))),
metrics.mean(values, weights=np.ones((3, 2, 4, 1))),
metrics.mean(values, weights=np.ones((3, 2, 4, 1, 1))),)
expected = np.mean(values)
with self.cached_session():
variables.local_variables_initializer().run()
for mean_result in mean_results:
mean, update_op = mean_result
self.assertAlmostEqual(expected, update_op.eval())
self.assertAlmostEqual(expected, mean.eval())
def _test_3d_weighted(self, values, weights):
expected = (
np.sum(np.multiply(weights, values)) /
np.sum(np.multiply(weights, np.ones_like(values)))
)
mean, update_op = metrics.mean(values, weights=weights)
with self.cached_session():
variables.local_variables_initializer().run()
self.assertAlmostEqual(expected, update_op.eval(), places=5)
self.assertAlmostEqual(expected, mean.eval(), places=5)
@test_util.run_deprecated_v1
def test1x1x1Weighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5,)).reshape((1, 1, 1)))
@test_util.run_deprecated_v1
def test1x1xNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 7, 11, 3)).reshape((1, 1, 4)))
@test_util.run_deprecated_v1
def test1xNx1Weighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 11)).reshape((1, 2, 1)))
@test_util.run_deprecated_v1
def test1xNxNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4)))
@test_util.run_deprecated_v1
def testNx1x1Weighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 7, 11)).reshape((3, 1, 1)))
@test_util.run_deprecated_v1
def testNx1xNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4)))
@test_util.run_deprecated_v1
def testNxNxNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidWeights(self):
values_placeholder = array_ops.placeholder(dtype=dtypes_lib.float32)
values = _test_values((3, 2, 4, 1))
invalid_weights = (
(1,),
(1, 1),
(3, 2),
(2, 4, 1),
(4, 2, 4, 1),
(3, 3, 4, 1),
(3, 2, 5, 1),
(3, 2, 4, 2),
(1, 1, 1, 1, 1))
expected_error_msg = 'weights can not be broadcast to values'
for invalid_weight in invalid_weights:
# Static shapes.
with self.assertRaisesRegexp(ValueError, expected_error_msg):
metrics.mean(values, invalid_weight)
# Dynamic shapes.
with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
with self.cached_session():
_, update_op = metrics.mean(values_placeholder, invalid_weight)
variables.local_variables_initializer().run()
update_op.eval(feed_dict={values_placeholder: values})
class MeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.mean_tensor(array_ops.ones([4, 3]))
_assert_metric_variables(self,
('mean/total_tensor:0', 'mean/count_tensor:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
self.evaluate(variables.local_variables_initializer())
for _ in range(4):
self.evaluate(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], self.evaluate(mean))
@test_util.run_deprecated_v1
def testMultiDimensional(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
self.evaluate(variables.local_variables_initializer())
for _ in range(2):
self.evaluate(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]],
self.evaluate(mean))
@test_util.run_deprecated_v1
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
self.evaluate(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], self.evaluate(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], self.evaluate(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], self.evaluate(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], self.evaluate(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], self.evaluate(mean), 5)
@test_util.run_deprecated_v1
def testBinaryWeighted1d(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
self.evaluate(variables.local_variables_initializer())
for _ in range(4):
self.evaluate(update_op)
self.assertAllClose([[3.25, 0.5]], self.evaluate(mean), 5)
@test_util.run_deprecated_v1
def testWeighted1d(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[0.0025]])
_enqueue_vector(sess, weights_queue, [[0.005]])
_enqueue_vector(sess, weights_queue, [[0.01]])
_enqueue_vector(sess, weights_queue, [[0.0075]])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
self.evaluate(variables.local_variables_initializer())
for _ in range(4):
self.evaluate(update_op)
self.assertAllClose([[0.8, 3.52]], self.evaluate(mean), 5)
@test_util.run_deprecated_v1
def testWeighted2d_1(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
self.evaluate(variables.local_variables_initializer())
for _ in range(4):
self.evaluate(update_op)
self.assertAllClose([[-2.1, 0.5]], self.evaluate(mean), 5)
@test_util.run_deprecated_v1
def testWeighted2d_2(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
self.evaluate(variables.local_variables_initializer())
for _ in range(4):
self.evaluate(update_op)
self.assertAllClose([[0, 0.5]], self.evaluate(mean), 5)
class AccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_metric_variables(self,
('my_accuracy/count:0', 'my_accuracy/total:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.accuracy(labels, predictions)
@test_util.run_deprecated_v1
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.accuracy(labels, predictions, weights)
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
accuracy, update_op = metrics.accuracy(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
@test_util.run_deprecated_v1
def testMultipleUpdates(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.accuracy(labels, predictions)
self.evaluate(variables.local_variables_initializer())
for _ in xrange(3):
self.evaluate(update_op)
self.assertEqual(0.5, self.evaluate(update_op))
self.assertEqual(0.5, accuracy.eval())
@test_util.run_deprecated_v1
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.cached_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions)
self.evaluate(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
@test_util.run_deprecated_v1
def testEffectivelyEquivalentSizesWithScalarWeight(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.cached_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions, weights=2.0)
self.evaluate(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
@test_util.run_deprecated_v1
def testEffectivelyEquivalentSizesWithStaticShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.cached_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions, weights)
self.evaluate(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
@test_util.run_deprecated_v1
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.cached_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions,
weights_placeholder)
self.evaluate(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
@test_util.run_deprecated_v1
def testMultipleUpdatesWithWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.accuracy(labels, predictions, weights)
self.evaluate(variables.local_variables_initializer())
for _ in xrange(3):
self.evaluate(update_op)
self.assertEqual(1.0, self.evaluate(update_op))
self.assertEqual(1.0, accuracy.eval())
class PrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
precision, update_op = metrics.precision(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
@test_util.run_deprecated_v1
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.precision(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(1, self.evaluate(update_op))
self.assertAlmostEqual(1, precision.eval())
@test_util.run_deprecated_v1
def testSomeCorrect_multipleInputDtypes(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant([1, 0, 1, 0], shape=(1, 4)), dtype=dtype)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=dtype)
precision, update_op = metrics.precision(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
@test_util.run_deprecated_v1
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.precision(
labels, predictions, weights=constant_op.constant([[2], [5]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
@test_util.run_deprecated_v1
def testWeightedScalar_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(labels, predictions, weights=2)
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 2.0
weighted_positives = (2.0 + 2.0) + (2.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(
labels, predictions, weights=constant_op.constant([[2], [5]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.precision(
labels,
predictions,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
@test_util.run_deprecated_v1
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(
labels,
predictions,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.precision(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.evaluate(update_op)
self.assertAlmostEqual(0, precision.eval())
@test_util.run_deprecated_v1
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.precision(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.evaluate(update_op)
self.assertEqual(0.0, precision.eval())
class RecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('recall/false_negatives/count:0', 'recall/true_positives/count:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
recall, update_op = metrics.recall(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
@test_util.run_deprecated_v1
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.recall(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.evaluate(update_op)
self.assertEqual(1, recall.eval())
@test_util.run_deprecated_v1
def testSomeCorrect_multipleInputDtypes(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant([1, 0, 1, 0], shape=(1, 4)), dtype=dtype)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=dtype)
recall, update_op = metrics.recall(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
@test_util.run_deprecated_v1
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.recall(labels, predictions, weights=weights)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
@test_util.run_deprecated_v1
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.recall(labels, predictions, weights=weights)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
@test_util.run_deprecated_v1
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.recall(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.evaluate(update_op)
self.assertEqual(0, recall.eval())
@test_util.run_deprecated_v1
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.recall(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.evaluate(update_op)
self.assertEqual(0, recall.eval())
class AUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.auc(predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.auc(predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.auc(predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
auc, update_op = metrics.auc(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
@test_util.run_deprecated_v1
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.auc(labels, predictions, curve=curve)
self.evaluate(variables.local_variables_initializer())
self.assertEqual(1, self.evaluate(update_op))
self.assertEqual(1, auc.eval())
@test_util.run_deprecated_v1
def testSomeCorrect_multipleLabelDtypes(self):
with self.cached_session() as sess:
for label_dtype in (
dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=label_dtype)
auc, update_op = metrics.auc(labels, predictions)
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, self.evaluate(update_op))
self.assertAlmostEqual(0.5, auc.eval())
@test_util.run_deprecated_v1
def testWeighted1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.auc(labels, predictions, weights=weights)
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, self.evaluate(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
@test_util.run_deprecated_v1
def testWeighted2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.auc(labels, predictions, weights=weights)
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, self.evaluate(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
@test_util.run_deprecated_v1
def testManualThresholds(self):
with self.cached_session():
# Verifies that thresholds passed in to the `thresholds` parameter are
# used correctly.
# The default thresholds do not split the second and third predictions.
# Thus, when we provide manual thresholds which correctly split it, we get
# an accurate AUC value.
predictions = constant_op.constant(
[0.12, 0.3001, 0.3003, 0.72], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
weights = constant_op.constant([1, 1, 1, 1], shape=(1, 4))
thresholds = [0.0, 0.2, 0.3002, 0.6, 1.0]
default_auc, default_update_op = metrics.auc(labels,
predictions,
weights=weights)
manual_auc, manual_update_op = metrics.auc(labels,
predictions,
weights=weights,
thresholds=thresholds)
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.875, self.evaluate(default_update_op), 3)
self.assertAlmostEqual(0.875, default_auc.eval(), 3)
self.assertAlmostEqual(0.75, self.evaluate(manual_update_op), 3)
self.assertAlmostEqual(0.75, manual_auc.eval(), 3)
# Regarding the AUC-PR tests: note that the preferred method when
# calculating AUC-PR is summation_method='careful_interpolation'.
@test_util.run_deprecated_v1
def testCorrectAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.auc(labels, predictions, curve='PR',
summation_method='careful_interpolation')
self.evaluate(variables.local_variables_initializer())
# expected ~= 0.79726744594
expected = 1 - math.log(1.5) / 2
self.assertAlmostEqual(expected, self.evaluate(update_op), delta=1e-3)
self.assertAlmostEqual(expected, auc.eval(), delta=1e-3)
@test_util.run_deprecated_v1
def testCorrectAnotherAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.auc(labels, predictions, curve='PR',
summation_method='careful_interpolation')
self.evaluate(variables.local_variables_initializer())
# expected ~= 0.61350593198
expected = (2.5 - 2 * math.log(4./3) - 0.25 * math.log(7./5)) / 3
self.assertAlmostEqual(expected, self.evaluate(update_op), delta=1e-3)
self.assertAlmostEqual(expected, auc.eval(), delta=1e-3)
@test_util.run_deprecated_v1
def testThirdCorrectAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.auc(labels, predictions, curve='PR',
summation_method='careful_interpolation')
self.evaluate(variables.local_variables_initializer())
# expected ~= 0.90410597584
expected = 1 - math.log(4./3) / 3
self.assertAlmostEqual(expected, self.evaluate(update_op), delta=1e-3)
self.assertAlmostEqual(expected, auc.eval(), delta=1e-3)
@test_util.run_deprecated_v1
def testIncorrectAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.auc(labels, predictions, curve='PR',
summation_method='trapezoidal')
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, self.evaluate(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
@test_util.run_deprecated_v1
def testAnotherIncorrectAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.auc(labels, predictions, curve='PR',
summation_method='trapezoidal')
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, self.evaluate(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
@test_util.run_deprecated_v1
def testThirdIncorrectAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.auc(labels, predictions, curve='PR',
summation_method='trapezoidal')
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, self.evaluate(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
@test_util.run_deprecated_v1
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.auc(labels, predictions)
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0, self.evaluate(update_op))
self.assertAlmostEqual(0, auc.eval())
@test_util.run_deprecated_v1
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.auc(labels, predictions)
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(1, self.evaluate(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
@test_util.run_deprecated_v1
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.cached_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.auc(labels, predictions, curve='PR')
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(1, self.evaluate(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def np_auc(self, predictions, labels, weights):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
@test_util.run_deprecated_v1
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples), np.random.exponential(
scale=1.0, size=num_samples)):
expected_auc = self.np_auc(predictions, labels, weights)
with self.cached_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (_enqueue_as_batches(weights, enqueue_ops) if
weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.auc(tf_labels,
tf_predictions,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
self.evaluate(variables.local_variables_initializer())
for i in range(num_batches):
self.evaluate(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class SpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_metric_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.7)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
@test_util.run_deprecated_v1
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.7)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertEqual(1, self.evaluate(update_op))
self.assertEqual(1, specificity.eval())
@test_util.run_deprecated_v1
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.8)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, self.evaluate(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
@test_util.run_deprecated_v1
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.4)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, self.evaluate(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
@test_util.run_deprecated_v1
def testWeighted1d_multipleLabelDtypes(self):
for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = math_ops.cast(labels_values, dtype=label_dtype)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, weights=weights, sensitivity=0.4)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, self.evaluate(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
@test_util.run_deprecated_v1
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, weights=weights, sensitivity=0.4)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, self.evaluate(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class SensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_metric_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
sensitivity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.7)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
@test_util.run_deprecated_v1
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.7)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertEqual(1, self.evaluate(update_op))
self.assertEqual(1, specificity.eval())
@test_util.run_deprecated_v1
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.8)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, self.evaluate(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
@test_util.run_deprecated_v1
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.4)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, self.evaluate(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
@test_util.run_deprecated_v1
def testWeighted_multipleLabelDtypes(self):
for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions_values = [
0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = math_ops.cast(labels_values, dtype=label_dtype)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, weights=weights, specificity=0.4)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, self.evaluate(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class PrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',
))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions, thresholds)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates, then verify idempotency.
self.evaluate([prec_op, rec_op])
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
self.evaluate([prec_op, rec_op])
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
@test_util.run_deprecated_v1
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
@test_util.run_deprecated_v1
def testSomeCorrect_multipleLabelDtypes(self):
with self.cached_session() as sess:
for label_dtype in (
dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=label_dtype)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
@test_util.run_deprecated_v1
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
@test_util.run_deprecated_v1
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.precision_at_thresholds(
labels, predictions, thresholds, weights=weights)
rec, rec_op = metrics.recall_at_thresholds(
labels, predictions, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
@test_util.run_deprecated_v1
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.precision_at_thresholds(
labels, predictions, thresholds, weights=weights)
rec, rec_op = metrics.recall_at_thresholds(
labels, predictions, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
@test_util.run_deprecated_v1
def testExtremeThresholds(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
@test_util.run_deprecated_v1
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
@test_util.run_deprecated_v1
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.precision_at_thresholds(tf_labels, tf_predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(tf_labels, tf_predictions,
thresholds)
self.evaluate(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
self.evaluate([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
def _test_precision_at_k(predictions,
labels,
k,
expected,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
def _test_precision_at_top_k(
predictions_idx,
labels,
expected,
k=None,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.precision_at_top_k(
predictions_idx=constant_op.constant(predictions_idx, dtypes_lib.int32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
test_case.assertTrue(math.isnan(update.eval()))
test_case.assertTrue(math.isnan(metric.eval()))
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
def _test_average_precision_at_k(predictions,
labels,
k,
expected,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.average_precision_at_k(
labels, predictions, k, weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertAlmostEqual(expected, update.eval())
test_case.assertAlmostEqual(expected, metric.eval())
class SingleLabelPrecisionAtKTest(test.TestCase):
def setUp(self):
self._predictions = ((0.1, 0.3, 0.2, 0.4), (0.1, 0.2, 0.3, 0.4))
self._predictions_idx = [[3], [3]]
indicator_labels = ((0, 0, 0, 1), (0, 0, 1, 0))
class_labels = (3, 2)
# Sparse vs dense, and 1d vs 2d labels should all be handled the same.
self._labels = (
_binary_2d_label_to_1d_sparse_value(indicator_labels),
_binary_2d_label_to_2d_sparse_value(indicator_labels), np.array(
class_labels, dtype=np.int64), np.array(
[[class_id] for class_id in class_labels], dtype=np.int64))
self._test_precision_at_k = functools.partial(
_test_precision_at_k, test_case=self)
self._test_precision_at_top_k = functools.partial(
_test_precision_at_top_k, test_case=self)
self._test_average_precision_at_k = functools.partial(
_test_average_precision_at_k, test_case=self)
@test_util.run_deprecated_v1
def test_at_k1_nan(self):
for labels in self._labels:
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_precision_at_k(
self._predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_precision_at_top_k(
self._predictions_idx, labels, k=1, expected=NAN, class_id=class_id)
@test_util.run_deprecated_v1
def test_at_k1(self):
for labels in self._labels:
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_precision_at_k(
self._predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_precision_at_top_k(
self._predictions_idx, labels, k=1, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_precision_at_k(
self._predictions, labels, k=1, expected=1.0 / 2)
self._test_precision_at_top_k(
self._predictions_idx, labels, k=1, expected=1.0 / 2)
self._test_average_precision_at_k(
self._predictions, labels, k=1, expected=1.0 / 2)
class MultiLabelPrecisionAtKTest(test.TestCase):
def setUp(self):
self._test_precision_at_k = functools.partial(
_test_precision_at_k, test_case=self)
self._test_precision_at_top_k = functools.partial(
_test_precision_at_top_k, test_case=self)
self._test_average_precision_at_k = functools.partial(
_test_average_precision_at_k, test_case=self)
@test_util.run_deprecated_v1
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_idx_ex1 = (5, 3, 6, 0, 1)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_precision_at_top_k(
(predictions_idx_ex1[:k],), labels, k=k, expected=precision_ex1[i])
self._test_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_idx_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_precision_at_top_k(
(predictions_idx_ex2[:k],), labels, k=k, expected=precision_ex2[i])
self._test_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [(ex1 + ex2) / 2
for ex1, ex2 in zip(precision_ex1, precision_ex2)]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
predictions_idx = (predictions_idx_ex1[:k], predictions_idx_ex2[:k])
self._test_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
self._test_precision_at_top_k(
predictions_idx, labels, k=k, expected=streaming_precision[i])
self._test_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
@test_util.run_deprecated_v1
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_idx_ex1 = (5, 3, 6, 0, 1)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_precision_at_top_k(
(predictions_idx_ex1[:k],), labels, k=k, expected=precision_ex1[i])
self._test_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
@test_util.run_deprecated_v1
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
predictions_idx = [[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=NAN, class_id=class_id)
@test_util.run_deprecated_v1
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
predictions_idx = [[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=0.0, class_id=class_id)
@test_util.run_deprecated_v1
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
predictions_idx = [[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=3.0 / 10)
@test_util.run_deprecated_v1
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
predictions_idx = [[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_precision_at_top_k(
predictions_idx, sp_labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_precision_at_top_k(
predictions_idx, sp_labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_precision_at_top_k(
predictions_idx, sp_labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_precision_at_top_k(
predictions_idx, sp_labels, k=5, expected=3.0 / 10)
@test_util.run_deprecated_v1
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
predictions_idx = [[[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]],
[[5, 7, 2, 9, 6], [9, 4, 6, 2, 0]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=NAN, class_id=class_id)
@test_util.run_deprecated_v1
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
predictions_idx = [[[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]],
[[5, 7, 2, 9, 6], [9, 4, 6, 2, 0]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=0.0, class_id=class_id)
@test_util.run_deprecated_v1
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
predictions_idx = [[[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]],
[[5, 7, 2, 9, 6], [9, 4, 6, 2, 0]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=7.0 / 20)
@test_util.run_deprecated_v1
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
predictions_idx = [[[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]],
[[5, 7, 2, 9, 6], [9, 4, 6, 2, 0]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[1], [0]])
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[0], [1]])
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1.0, class_id=7,
weights=[[1], [0]])
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=0.0 / 1.0, class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1.0, class_id=7,
weights=[[0], [1]])
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=1.0 / 1.0, class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=7,
weights=[[1, 0], [0, 1]])
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=NAN, class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2.0, class_id=7,
weights=[[0, 1], [1, 0]])
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=1.0 / 2.0, class_id=7,
weights=[[0, 1], [1, 0]])
def _test_recall_at_k(predictions,
labels,
k,
expected,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
def _test_recall_at_top_k(
predictions_idx,
labels,
expected,
k=None,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.recall_at_top_k(
predictions_idx=constant_op.constant(predictions_idx, dtypes_lib.int32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
class SingleLabelRecallAtKTest(test.TestCase):
def setUp(self):
self._predictions = ((0.1, 0.3, 0.2, 0.4), (0.1, 0.2, 0.3, 0.4))
self._predictions_idx = [[3], [3]]
indicator_labels = ((0, 0, 0, 1), (0, 0, 1, 0))
class_labels = (3, 2)
# Sparse vs dense, and 1d vs 2d labels should all be handled the same.
self._labels = (
_binary_2d_label_to_1d_sparse_value(indicator_labels),
_binary_2d_label_to_2d_sparse_value(indicator_labels), np.array(
class_labels, dtype=np.int64), np.array(
[[class_id] for class_id in class_labels], dtype=np.int64))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
self._test_recall_at_top_k = functools.partial(
_test_recall_at_top_k, test_case=self)
@test_util.run_deprecated_v1
def test_at_k1_nan(self):
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in self._labels:
for class_id in (-1, 0, 1, 4):
self._test_recall_at_k(
self._predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=1, expected=NAN, class_id=class_id)
@test_util.run_deprecated_v1
def test_at_k1_no_predictions(self):
for labels in self._labels:
# Class 2: 0 predictions.
self._test_recall_at_k(
self._predictions, labels, k=1, expected=0.0, class_id=2)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=1, expected=0.0, class_id=2)
@test_util.run_deprecated_v1
def test_one_label_at_k1(self):
for labels in self._labels:
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_recall_at_k(
self._predictions, labels, k=1, expected=1.0 / 1, class_id=3)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=1, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_recall_at_k(self._predictions, labels, k=1, expected=1.0 / 2)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=1, expected=1.0 / 2)
@test_util.run_deprecated_v1
def test_one_label_at_k1_weighted_class_id3(self):
predictions = self._predictions
predictions_idx = self._predictions_idx
for labels in self._labels:
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_recall_at_top_k(
predictions_idx, labels, k=1, expected=NAN, class_id=3,
weights=(0.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3,
weights=(1.0,))
self._test_recall_at_top_k(
predictions_idx, labels, k=1, expected=1.0 / 1, class_id=3,
weights=(1.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3,
weights=(2.0,))
self._test_recall_at_top_k(
predictions_idx, labels, k=1, expected=1.0 / 1, class_id=3,
weights=(2.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3,
weights=(0.0, 1.0))
self._test_recall_at_top_k(
predictions_idx, labels, k=1, expected=NAN, class_id=3,
weights=(0.0, 1.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3,
weights=(1.0, 0.0))
self._test_recall_at_top_k(
predictions_idx, labels, k=1, expected=1.0 / 1, class_id=3,
weights=(1.0, 0.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=2.0 / 2, class_id=3,
weights=(2.0, 3.0))
self._test_recall_at_top_k(
predictions_idx, labels, k=1, expected=2.0 / 2, class_id=3,
weights=(2.0, 3.0))
@test_util.run_deprecated_v1
def test_one_label_at_k1_weighted(self):
predictions = self._predictions
predictions_idx = self._predictions_idx
for labels in self._labels:
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_recall_at_top_k(
predictions_idx, labels, k=1, expected=NAN, weights=(0.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_recall_at_top_k(
predictions_idx, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_recall_at_top_k(
predictions_idx, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_recall_at_top_k(
predictions_idx, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_recall_at_top_k(
predictions_idx, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_recall_at_top_k(
predictions_idx, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
class MultiLabel2dRecallAtKTest(test.TestCase):
def setUp(self):
self._predictions = ((0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
(0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6))
self._predictions_idx = ((9, 4, 6, 2, 0), (5, 7, 2, 9, 6))
indicator_labels = ((0, 0, 1, 0, 0, 0, 0, 1, 1, 0),
(0, 1, 1, 0, 0, 1, 0, 0, 0, 0))
class_labels = ((2, 7, 8), (1, 2, 5))
# Sparse vs dense labels should be handled the same.
self._labels = (_binary_2d_label_to_2d_sparse_value(indicator_labels),
np.array(
class_labels, dtype=np.int64))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
self._test_recall_at_top_k = functools.partial(
_test_recall_at_top_k, test_case=self)
@test_util.run_deprecated_v1
def test_at_k5_nan(self):
for labels in self._labels:
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_recall_at_k(
self._predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=NAN, class_id=class_id)
@test_util.run_deprecated_v1
def test_at_k5_no_predictions(self):
for labels in self._labels:
# Class 8: 1 label, no predictions.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=0.0 / 1, class_id=8)
@test_util.run_deprecated_v1
def test_at_k5(self):
for labels in self._labels:
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_recall_at_k(self._predictions, labels, k=5, expected=3.0 / 6)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=3.0 / 6)
@test_util.run_deprecated_v1
def test_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 8 labels, 3 correct.
self._test_recall_at_k(self._predictions, labels, k=5, expected=3.0 / 8)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=3.0 / 8)
class MultiLabel3dRecallAtKTest(test.TestCase):
def setUp(self):
self._predictions = (((0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
(0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6)),
((0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6),
(0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9)))
self._predictions_idx = (((9, 4, 6, 2, 0), (5, 7, 2, 9, 6)),
((5, 7, 2, 9, 6), (9, 4, 6, 2, 0)))
# Note: We don't test dense labels here, since examples have different
# numbers of labels.
self._labels = _binary_3d_label_to_sparse_value(((
(0, 0, 1, 0, 0, 0, 0, 1, 1, 0), (0, 1, 1, 0, 0, 1, 0, 0, 0, 0)), (
(0, 1, 1, 0, 0, 1, 0, 1, 0, 0), (0, 0, 1, 0, 0, 0, 0, 0, 1, 0))))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
self._test_recall_at_top_k = functools.partial(
_test_recall_at_top_k, test_case=self)
@test_util.run_deprecated_v1
def test_3d_nan(self):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, class_id=class_id)
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=NAN,
class_id=class_id)
@test_util.run_deprecated_v1
def test_3d_no_predictions(self):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=0.0, class_id=class_id)
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=0.0,
class_id=class_id)
@test_util.run_deprecated_v1
def test_3d(self):
# Class 2: 4 labels, all correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=4.0 / 4, class_id=2)
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=4.0 / 4,
class_id=2)
# Class 5: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=2.0 / 2, class_id=5)
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=2.0 / 2,
class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=1.0 / 2, class_id=7)
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=1.0 / 2,
class_id=7)
# All classes: 12 labels, 7 correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=7.0 / 12)
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=7.0 / 12)
@test_util.run_deprecated_v1
def test_3d_ignore_all(self):
for class_id in xrange(10):
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, class_id=class_id,
weights=[[0], [0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=NAN,
class_id=class_id, weights=[[0], [0]])
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=NAN,
class_id=class_id, weights=[[0, 0], [0, 0]])
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=NAN,
weights=[[0], [0]])
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN,
weights=[[0, 0], [0, 0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=NAN,
weights=[[0, 0], [0, 0]])
@test_util.run_deprecated_v1
def test_3d_ignore_some(self):
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[1], [0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=2.0 / 2.0,
class_id=2, weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[0], [1]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=2.0 / 2.0,
class_id=2, weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=1.0 / 1.0, class_id=7,
weights=[[0], [1]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=1.0 / 1.0,
class_id=7, weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=0.0 / 1.0, class_id=7,
weights=[[1], [0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=0.0 / 1.0,
class_id=7, weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=1.0 / 2.0, class_id=7,
weights=[[1, 0], [1, 0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=1.0 / 2.0,
class_id=7, weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, class_id=7,
weights=[[0, 1], [0, 1]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=NAN, class_id=7,
weights=[[0, 1], [0, 1]])
class MeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_absolute_error/count:0', 'mean_absolute_error/total:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_absolute_error(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
@test_util.run_deprecated_v1
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.mean_absolute_error(labels, predictions, weights)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertEqual(3, self.evaluate(update_op))
self.assertEqual(3, error.eval())
class MeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_relative_error/count:0', 'mean_relative_error/total:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.mean_relative_error(labels, predictions,
normalizer)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
@test_util.run_deprecated_v1
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.mean_relative_error(
labels, predictions, normalizer=labels)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertEqual(expected_error, self.evaluate(update_op))
self.assertEqual(expected_error, error.eval())
@test_util.run_deprecated_v1
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_relative_error(
labels, predictions, normalizer=array_ops.zeros_like(labels))
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertEqual(0.0, self.evaluate(update_op))
self.assertEqual(0.0, error.eval())
class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_squared_error/count:0', 'mean_squared_error/total:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
@test_util.run_deprecated_v1
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertEqual(0, self.evaluate(update_op))
self.assertEqual(0, error.eval())
@test_util.run_deprecated_v1
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertEqual(6, self.evaluate(update_op))
self.assertEqual(6, error.eval())
@test_util.run_deprecated_v1
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.mean_squared_error(labels, predictions, weights)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertEqual(13, self.evaluate(update_op))
self.assertEqual(13, error.eval())
@test_util.run_deprecated_v1
def testMultipleBatchesOfSizeOne(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.mean_squared_error(labels, predictions)
self.evaluate(variables.local_variables_initializer())
self.evaluate(update_op)
self.assertAlmostEqual(208.0 / 6, self.evaluate(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
@test_util.run_deprecated_v1
def testMetricsComputedConcurrently(self):
with self.cached_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.mean_squared_error(
labels0, predictions0, name='msd0')
mse1, update_op1 = metrics.mean_squared_error(
labels1, predictions1, name='msd1')
self.evaluate(variables.local_variables_initializer())
self.evaluate([update_op0, update_op1])
self.evaluate([update_op0, update_op1])
mse0, mse1 = self.evaluate([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
@test_util.run_deprecated_v1
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.mean_absolute_error(labels, predictions)
mse, ms_update_op = metrics.mean_squared_error(labels, predictions)
self.evaluate(variables.local_variables_initializer())
self.evaluate([ma_update_op, ms_update_op])
self.evaluate([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class RootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('root_mean_squared_error/count:0', 'root_mean_squared_error/total:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.root_mean_squared_error(labels, predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
@test_util.run_deprecated_v1
def testSingleUpdateZeroError(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.root_mean_squared_error(labels, predictions)
self.evaluate(variables.local_variables_initializer())
self.assertEqual(0, self.evaluate(update_op))
self.assertEqual(0, rmse.eval())
@test_util.run_deprecated_v1
def testSingleUpdateWithError(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.root_mean_squared_error(labels, predictions)
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
@test_util.run_deprecated_v1
def testSingleUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.root_mean_squared_error(labels, predictions,
weights)
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), self.evaluate(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
def _reweight(predictions, labels, weights):
return (np.concatenate([[p] * int(w) for p, w in zip(predictions, weights)]),
np.concatenate([[l] * int(w) for l, w in zip(labels, weights)]))
class MeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_metric_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',
))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=1)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
@test_util.run_deprecated_v1
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertEqual(0, self.evaluate(update_op))
self.assertEqual(0, error.eval())
@test_util.run_deprecated_v1
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(1, self.evaluate(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
@test_util.run_deprecated_v1
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, self.evaluate(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
@test_util.run_deprecated_v1
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(
labels, predictions, dim=2, weights=weights)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertEqual(0, self.evaluate(update_op))
self.assertEqual(0, error.eval())
@test_util.run_deprecated_v1
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(
labels, predictions, dim=2, weights=weights)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.percentage_below(values=array_ops.ones((10,)), threshold=2)
_assert_metric_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',
))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.percentage_below(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.percentage_below(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testOneUpdate(self):
with self.cached_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.percentage_below(values, 100, name='high')
pcnt1, update_op1 = metrics.percentage_below(values, 7, name='medium')
pcnt2, update_op2 = metrics.percentage_below(values, 1, name='low')
self.evaluate(variables.local_variables_initializer())
self.evaluate([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = self.evaluate([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
@test_util.run_deprecated_v1
def testSomePresentOneUpdate(self):
with self.cached_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.percentage_below(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.percentage_below(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.percentage_below(
values, 1, weights=weights, name='low')
self.evaluate(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
self.evaluate([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = self.evaluate([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class MeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_metric_variables(self, ('mean_iou/total_confusion_matrix:0',))
@test_util.run_deprecated_v1
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.mean_iou(labels, predictions, num_classes=2)
@test_util.run_deprecated_v1
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.mean_iou(labels, predictions, num_classes=2, weights=weights)
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
mean_iou, update_op = metrics.mean_iou(
labels, predictions, num_classes=num_classes)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_mean_iou = mean_iou.eval()
for _ in range(10):
self.assertEqual(initial_mean_iou, mean_iou.eval())
@test_util.run_deprecated_v1
def testMultipleUpdates(self):
num_classes = 3
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
for _ in range(5):
self.evaluate(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
@test_util.run_deprecated_v1
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
mean_iou, update_op = metrics.mean_iou(
labels, predictions, num_classes, weights=weights)
variables.local_variables_initializer().run()
for _ in range(6):
self.evaluate(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, mean_iou.eval())
@test_util.run_deprecated_v1
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.cached_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
for _ in range(5):
self.evaluate(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0])
self.assertAlmostEqual(desired_output, miou.eval())
@test_util.run_deprecated_v1
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
@test_util.run_deprecated_v1
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.cached_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
@test_util.run_deprecated_v1
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
@test_util.run_deprecated_v1
def testResultsWithSomeMissing(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
weights = array_ops.concat(
[
constant_op.constant(
0, shape=[1]), constant_op.constant(
1, shape=[8]), constant_op.constant(
0, shape=[1])
],
0)
with self.cached_session() as sess:
miou, update_op = metrics.mean_iou(
labels, predictions, num_classes, weights=weights)
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
@test_util.run_deprecated_v1
def testMissingClassInLabels(self):
labels = constant_op.constant([
[[0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant([
[[0, 0, 2, 1, 1, 0],
[0, 1, 2, 2, 0, 1]],
[[0, 0, 2, 1, 1, 1],
[1, 1, 2, 0, 0, 0]]])
num_classes = 3
with self.cached_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual([[7, 4, 3], [3, 5, 2], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(
1 / 3 * (7 / (7 + 3 + 7) + 5 / (5 + 4 + 5) + 0 / (0 + 5 + 0)),
miou.eval())
@test_util.run_deprecated_v1
def testMissingClassOverallSmall(self):
labels = constant_op.constant([0])
predictions = constant_op.constant([0])
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual([[1, 0], [0, 0]], update_op.eval())
self.assertAlmostEqual(1, miou.eval())
@test_util.run_deprecated_v1
def testMissingClassOverallLarge(self):
labels = constant_op.constant([
[[0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant([
[[0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1]],
[[0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0]]])
num_classes = 3
with self.cached_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual([[9, 5, 0], [3, 7, 0], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(
1 / 2 * (9 / (9 + 3 + 5) + 7 / (7 + 5 + 3)), miou.eval())
class MeanPerClassAccuracyTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.mean_per_class_accuracy(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_metric_variables(self, ('mean_accuracy/count:0',
'mean_accuracy/total:0'))
@test_util.run_deprecated_v1
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_accuracy, _ = metrics.mean_per_class_accuracy(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [mean_accuracy])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_per_class_accuracy(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.mean_per_class_accuracy(labels, predictions, num_classes=2)
@test_util.run_deprecated_v1
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.mean_per_class_accuracy(
labels, predictions, num_classes=2, weights=weights)
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes=num_classes)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_mean_accuracy = mean_accuracy.eval()
for _ in range(10):
self.assertEqual(initial_mean_accuracy, mean_accuracy.eval())
num_classes = 3
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
for _ in range(5):
self.evaluate(update_op)
desired_output = np.mean([1.0, 1.0 / 3.0, 0.0])
self.assertAlmostEqual(desired_output, mean_accuracy.eval())
@test_util.run_deprecated_v1
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.5])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes, weights=weights)
variables.local_variables_initializer().run()
for _ in range(6):
self.evaluate(update_op)
desired_output = np.mean([2.0 / 2.0, 0.5 / 1.5])
self.assertAlmostEqual(desired_output, mean_accuracy.eval())
@test_util.run_deprecated_v1
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.cached_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
for _ in range(5):
self.evaluate(update_op)
desired_output = np.mean([1.0 / 2.0, 2.0 / 3.0, 0.])
self.assertAlmostEqual(desired_output, mean_accuracy.eval())
@test_util.run_deprecated_v1
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.cached_session() as sess:
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval()[0])
self.assertEqual(1.0, mean_accuracy.eval())
@test_util.run_deprecated_v1
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.cached_session() as sess:
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual([0.0, 0.0], update_op.eval())
self.assertEqual(0., mean_accuracy.eval())
@test_util.run_deprecated_v1
def testResultsWithSomeMissing(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]), constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]), constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
weights = array_ops.concat([
constant_op.constant(0, shape=[1]), constant_op.constant(1, shape=[8]),
constant_op.constant(0, shape=[1])
], 0)
with self.cached_session() as sess:
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes, weights=weights)
self.evaluate(variables.local_variables_initializer())
desired_accuracy = np.array([2. / 2., 4. / 6.], dtype=np.float32)
self.assertAllEqual(desired_accuracy, update_op.eval())
desired_mean_accuracy = np.mean(desired_accuracy)
self.assertAlmostEqual(desired_mean_accuracy, mean_accuracy.eval())
class FalseNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.false_negatives(
labels=(0, 1, 0, 1),
predictions=(0, 0, 1, 1))
_assert_metric_variables(self, ('false_negatives/count:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
labels = constant_op.constant(((0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1)))
predictions = constant_op.constant(((0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1)))
tn, tn_update_op = metrics.false_negatives(
labels=labels, predictions=predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(3., tn_update_op.eval())
self.assertAllClose(3., tn.eval())
@test_util.run_deprecated_v1
def testWeighted(self):
labels = constant_op.constant(((0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1)))
predictions = constant_op.constant(((0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1)))
weights = constant_op.constant((1., 1.5, 2., 2.5))
tn, tn_update_op = metrics.false_negatives(
labels=labels, predictions=predictions, weights=weights)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(5., tn_update_op.eval())
self.assertAllClose(5., tn.eval())
class FalseNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.false_negatives_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0.15, 0.5, 0.85])
_assert_metric_variables(self, ('false_negatives/false_negatives:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.false_negatives_at_thresholds(
predictions=predictions, labels=labels, thresholds=[0.15, 0.5, 0.85])
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
@test_util.run_deprecated_v1
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.false_negatives_at_thresholds(
predictions=predictions,
labels=labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=[0.15, 0.5, 0.85])
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
class FalsePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.false_positives(
labels=(0, 1, 0, 1),
predictions=(0, 0, 1, 1))
_assert_metric_variables(self, ('false_positives/count:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
labels = constant_op.constant(((0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1)))
predictions = constant_op.constant(((0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1)))
tn, tn_update_op = metrics.false_positives(
labels=labels, predictions=predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(7., tn_update_op.eval())
self.assertAllClose(7., tn.eval())
@test_util.run_deprecated_v1
def testWeighted(self):
labels = constant_op.constant(((0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1)))
predictions = constant_op.constant(((0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1)))
weights = constant_op.constant((1., 1.5, 2., 2.5))
tn, tn_update_op = metrics.false_positives(
labels=labels, predictions=predictions, weights=weights)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(14., tn_update_op.eval())
self.assertAllClose(14., tn.eval())
class FalsePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.false_positives_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0.15, 0.5, 0.85])
_assert_metric_variables(self, ('false_positives/false_positives:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.false_positives_at_thresholds(
predictions=predictions, labels=labels, thresholds=[0.15, 0.5, 0.85])
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
@test_util.run_deprecated_v1
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.false_positives_at_thresholds(
predictions=predictions,
labels=labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)),
thresholds=[0.15, 0.5, 0.85])
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
class TrueNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.true_negatives(
labels=(0, 1, 0, 1),
predictions=(0, 0, 1, 1))
_assert_metric_variables(self, ('true_negatives/count:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
labels = constant_op.constant(((0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1)))
predictions = constant_op.constant(((0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1)))
tn, tn_update_op = metrics.true_negatives(
labels=labels, predictions=predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(3., tn_update_op.eval())
self.assertAllClose(3., tn.eval())
@test_util.run_deprecated_v1
def testWeighted(self):
labels = constant_op.constant(((0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1)))
predictions = constant_op.constant(((0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1)))
weights = constant_op.constant((1., 1.5, 2., 2.5))
tn, tn_update_op = metrics.true_negatives(
labels=labels, predictions=predictions, weights=weights)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(4., tn_update_op.eval())
self.assertAllClose(4., tn.eval())
class TrueNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.true_negatives_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0.15, 0.5, 0.85])
_assert_metric_variables(self, ('true_negatives/true_negatives:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.true_negatives_at_thresholds(
predictions=predictions, labels=labels, thresholds=[0.15, 0.5, 0.85])
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
@test_util.run_deprecated_v1
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.true_negatives_at_thresholds(
predictions=predictions,
labels=labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=[0.15, 0.5, 0.85])
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
class TruePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.true_positives(
labels=(0, 1, 0, 1),
predictions=(0, 0, 1, 1))
_assert_metric_variables(self, ('true_positives/count:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
labels = constant_op.constant(((0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1)))
predictions = constant_op.constant(((0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1)))
tn, tn_update_op = metrics.true_positives(
labels=labels, predictions=predictions)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(7., tn_update_op.eval())
self.assertAllClose(7., tn.eval())
@test_util.run_deprecated_v1
def testWeighted(self):
labels = constant_op.constant(((0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1)))
predictions = constant_op.constant(((0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1)))
weights = constant_op.constant((1., 1.5, 2., 2.5))
tn, tn_update_op = metrics.true_positives(
labels=labels, predictions=predictions, weights=weights)
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(12., tn_update_op.eval())
self.assertAllClose(12., tn.eval())
class TruePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.true_positives_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0.15, 0.5, 0.85])
_assert_metric_variables(self, ('true_positives/true_positives:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.true_positives_at_thresholds(
predictions=predictions, labels=labels, thresholds=[0.15, 0.5, 0.85])
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
@test_util.run_deprecated_v1
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.true_positives_at_thresholds(
predictions=predictions, labels=labels, weights=37.0,
thresholds=[0.15, 0.5, 0.85])
with self.cached_session() as sess:
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/metrics_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for SpacetoDepth op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class SpaceToDepthTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
input_nhwc = math_ops.cast(inputs, dtype)
with test_util.force_cpu():
# test NHWC (default) on CPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(self.evaluate(x_tf), outputs)
if test_util.is_gpu_available():
with test_util.force_gpu():
# test NHWC (default) on GPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(self.evaluate(x_tf), outputs)
# test NCHW on GPU
input_nchw = test_util.NHWCToNCHW(input_nhwc)
output_nchw = array_ops.space_to_depth(
input_nchw, block_size, data_format="NCHW")
output_nhwc = test_util.NCHWToNHWC(output_nchw)
self.assertAllEqual(self.evaluate(output_nhwc), outputs)
def testBasic(self):
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4]]]]
for dtype in [dtypes.float32, dtypes.float16, dtypes.uint8]:
self._testOne(x_np, block_size, x_out, dtype=dtype)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testLargerInput2x2(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered in depth. Here, larger block size.
def testLargerInput4x4(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 4
x_out = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleaved(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions AND for larger input depths.
# To make sure elements are properly interleaved in depth and ordered
# spatially.
def testDepthInterleavedLarge(self):
x_np = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40], [5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
self._testOne(x_np, block_size, x_out)
def testBlockSize2Batch10(self):
block_size = 2
def batch_input_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
def batch_output_elt(i):
return [[[1 * i, 2 * i, 3 * i, 4 * i], [5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
self._testOne(x_np, block_size, x_out)
def testBatchSize0(self):
block_size = 2
batch_size = 0
input_nhwc = array_ops.ones([batch_size, 4, 6, 3])
x_out = array_ops.ones([batch_size, 2, 3, 12])
with test_util.force_cpu():
# test NHWC (default) on CPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
self.evaluate(x_tf)
if test.is_gpu_available():
with test_util.use_gpu():
# test NHWC (default) on GPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
self.evaluate(x_tf)
# Tests for different width and height.
def testNonSquare(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]], [[5, 50], [6, 60]],
[[7, 70], [8, 80]], [[9, 90], [10, 100]], [[11, 110], [12, 120]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]], [[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
self._testOne(x_np, block_size, x_out)
# Error handling:
@test_util.run_deprecated_v1
def testInputWrongDimMissingDepth(self):
# The input is missing the last dimension ("depth")
x_np = [[[1, 2], [3, 4]]]
block_size = 2
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
self.evaluate(out_tf)
@test_util.run_deprecated_v1
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
block_size = 2
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
@test_util.run_deprecated_v1
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
self.evaluate(out_tf)
@test_util.run_deprecated_v1
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 1
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
self.evaluate(out_tf)
@test_util.run_deprecated_v1
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 10
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
self.evaluate(out_tf)
@test_util.run_deprecated_v1
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
@test_util.run_deprecated_v1
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
@test_util.run_deprecated_v1
def testBlockSizeNotDivisibleBoth(self):
# The block size does not divide neither width or height.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
@test_util.run_deprecated_v1
def testUnknownShape(self):
t = array_ops.space_to_depth(
array_ops.placeholder(dtypes.float32), block_size=4)
self.assertEqual(4, t.get_shape().ndims)
def spaceToDepthUsingTranspose(self, tensor, block_size, data_format):
block_size_sq = block_size * block_size
dtype = tensor.dtype
if dtype == dtypes.qint8:
tensor = array_ops.bitcast(tensor, dtypes.int8)
if data_format == "NHWC":
b, ih, iw, ic = tensor.shape.as_list()
assert ih % block_size == 0, (ih, block_size)
assert iw % block_size == 0, (iw, block_size)
ow, oh, oc = iw // block_size, ih // block_size, ic * block_size_sq
tensor = array_ops.reshape(tensor,
[b, oh, block_size, ow, block_size, ic])
tensor = array_ops.transpose(tensor, [0, 1, 3, 2, 4, 5])
tensor = array_ops.reshape(tensor, [b, oh, ow, oc])
elif data_format == "NCHW":
b, ic, ih, iw = tensor.shape.as_list()
assert ih % block_size == 0, (ih, block_size)
assert iw % block_size == 0, (iw, block_size)
ow, oh, oc = iw // block_size, ih // block_size, ic * block_size_sq
tensor = array_ops.reshape(tensor,
[b, ic, oh, block_size, ow, block_size])
tensor = array_ops.transpose(tensor, [0, 3, 5, 1, 2, 4])
tensor = array_ops.reshape(tensor, [b, oc, oh, ow])
if dtype == dtypes.qint8:
tensor = array_ops.bitcast(tensor, dtype)
return tensor
def compareToTranspose(self, batch_size, out_height, out_width, in_channels,
block_size, data_format, data_type, use_gpu):
in_height = out_height * block_size
in_width = out_width * block_size
nhwc_input_shape = [batch_size, in_height, in_width, in_channels]
nchw_input_shape = [batch_size, in_channels, in_height, in_width]
total_size = np.prod(nhwc_input_shape)
# Construct the input tensor in data_type and NHWC.
# force_cpu is needed because quantize_v2 runs on only CPU.
with test_util.force_cpu():
if data_type == dtypes.qint8:
# Initialize the input tensor with qint8 values that circle -127..127.
x = [((f + 128) % 255) - 127 for f in range(total_size)]
t = constant_op.constant(
x, shape=nhwc_input_shape, dtype=dtypes.float32)
t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
else:
assert data_type == dtypes.float32
# Initialize the input tensor with ascending whole numbers as floats.
x = [f * 1.0 for f in range(total_size)]
shape = nchw_input_shape if data_format == "NCHW" else nhwc_input_shape
t = constant_op.constant(x, shape=shape, dtype=dtypes.float32)
with test_util.device(use_gpu):
if data_format == "NCHW_VECT_C":
assert data_type == dtypes.qint8
# Convert to int8, then NHWCToNCHW_VECT_C, and then back to qint8.
actual = array_ops.bitcast(t, dtypes.int8)
actual = test_util.NHWCToNCHW_VECT_C(actual)
actual = array_ops.bitcast(actual, dtypes.qint8)
actual = array_ops.space_to_depth(
actual, block_size, data_format=data_format)
actual = array_ops.bitcast(actual, dtypes.int8)
actual = test_util.NCHW_VECT_CToNHWC(actual)
actual = array_ops.bitcast(actual, dtypes.qint8)
expected = array_ops.bitcast(t, dtypes.int8)
expected = math_ops.cast(expected, dtypes.float32)
expected = self.spaceToDepthUsingTranspose(expected, block_size, "NHWC")
expected = math_ops.cast(expected, dtypes.int8)
expected = array_ops.bitcast(expected, dtypes.qint8)
else:
# Initialize the input tensor with ascending whole numbers as floats.
actual = array_ops.space_to_depth(
t, block_size, data_format=data_format)
expected = self.spaceToDepthUsingTranspose(t, block_size, data_format)
actual_vals, expected_vals = self.evaluate([actual, expected])
self.assertTrue(np.array_equal(actual_vals, expected_vals))
# TODO(jingyue): figure out why this test failed in eager mode.
@test_util.run_deprecated_v1
def testAgainstTranspose(self):
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", dtypes.float32, False)
self.compareToTranspose(1, 2, 3, 2, 2, "NHWC", dtypes.float32, False)
self.compareToTranspose(1, 2, 3, 2, 3, "NHWC", dtypes.float32, False)
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", dtypes.qint8, False)
self.compareToTranspose(1, 2, 3, 2, 2, "NHWC", dtypes.qint8, False)
self.compareToTranspose(1, 2, 3, 2, 3, "NHWC", dtypes.qint8, False)
if not test.is_gpu_available():
tf_logging.info("skipping gpu tests since gpu not available")
return
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", dtypes.float32, True)
self.compareToTranspose(3, 2, 3, 2, 2, "NHWC", dtypes.float32, True)
self.compareToTranspose(3, 2, 3, 1, 2, "NCHW", dtypes.float32, True)
self.compareToTranspose(3, 2, 3, 2, 3, "NCHW", dtypes.float32, True)
self.compareToTranspose(5, 7, 11, 3, 2, "NCHW", dtypes.float32, True)
self.compareToTranspose(3, 2, 3, 4, 2, "NCHW_VECT_C", dtypes.qint8, True)
self.compareToTranspose(3, 2, 3, 8, 3, "NCHW_VECT_C", dtypes.qint8, True)
self.compareToTranspose(5, 7, 11, 12, 2, "NCHW_VECT_C", dtypes.qint8, True)
class SpaceToDepthGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size, data_format):
# NCHW is implemented for only GPU.
if data_format == "NCHW" and not test.is_gpu_available():
return
assert 4 == x.ndim
with self.cached_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.space_to_depth(tf_x, block_size, data_format=data_format)
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for space_to_depth of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size, data_format):
block_size_sq = block_size * block_size
data = np.random.normal(0, 1, b * h * w * d * block_size_sq).astype(
np.float32)
if data_format == "NHWC":
x = data.reshape([b, h * block_size, w * block_size, d])
else:
x = data.reshape([b, d, h * block_size, w * block_size])
self._checkGrad(x, block_size, data_format)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
block_size = 2
self._compare(1, 2, 3, 5, block_size, "NHWC")
self._compare(1, 2, 3, 5, block_size, "NCHW")
@test_util.run_deprecated_v1
def testSmall2(self):
block_size = 2
self._compare(2, 4, 3, 2, block_size, "NHWC")
self._compare(2, 4, 3, 2, block_size, "NCHW")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/spacetodepth_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.listdiff_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
_TYPES = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string
]
class ListDiffTest(test.TestCase):
def _testListDiff(self, x, y, out, idx):
for dtype in _TYPES:
if dtype == dtypes.string:
x = [compat.as_bytes(str(a)) for a in x]
y = [compat.as_bytes(str(a)) for a in y]
out = [compat.as_bytes(str(a)) for a in out]
for diff_func in [array_ops.setdiff1d]:
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session() as sess:
x_tensor = ops.convert_to_tensor(x, dtype=dtype)
y_tensor = ops.convert_to_tensor(y, dtype=dtype)
out_tensor, idx_tensor = diff_func(x_tensor, y_tensor,
index_dtype=index_dtype)
tf_out, tf_idx = self.evaluate([out_tensor, idx_tensor])
self.assertAllEqual(tf_out, out)
self.assertAllEqual(tf_idx, idx)
self.assertEqual(1, out_tensor.get_shape().ndims)
self.assertEqual(1, idx_tensor.get_shape().ndims)
def testBasic1(self):
x = [1, 2, 3, 4]
y = [1, 2]
out = [3, 4]
idx = [2, 3]
self._testListDiff(x, y, out, idx)
def testBasic2(self):
x = [1, 2, 3, 4]
y = [2]
out = [1, 3, 4]
idx = [0, 2, 3]
self._testListDiff(x, y, out, idx)
def testBasic3(self):
x = [1, 4, 3, 2]
y = [4, 2]
out = [1, 3]
idx = [0, 2]
self._testListDiff(x, y, out, idx)
def testDuplicates(self):
x = [1, 2, 4, 3, 2, 3, 3, 1]
y = [4, 2]
out = [1, 3, 3, 3, 1]
idx = [0, 3, 5, 6, 7]
self._testListDiff(x, y, out, idx)
def testRandom(self):
num_random_tests = 10
int_low = -7
int_high = 8
max_size = 50
for _ in xrange(num_random_tests):
x_size = np.random.randint(max_size + 1)
x = np.random.randint(int_low, int_high, size=x_size)
y_size = np.random.randint(max_size + 1)
y = np.random.randint(int_low, int_high, size=y_size)
out_idx = [(entry, pos) for pos, entry in enumerate(x) if entry not in y]
if out_idx:
out, idx = map(list, zip(*out_idx))
else:
out = []
idx = []
self._testListDiff(list(x), list(y), out, idx)
def testFullyOverlapping(self):
x = [1, 2, 3, 4]
y = [1, 2, 3, 4]
out = []
idx = []
self._testListDiff(x, y, out, idx)
def testNonOverlapping(self):
x = [1, 2, 3, 4]
y = [5, 6]
out = x
idx = np.arange(len(x))
self._testListDiff(x, y, out, idx)
def testEmptyX(self):
x = []
y = [1, 2]
out = []
idx = []
self._testListDiff(x, y, out, idx)
def testEmptyY(self):
x = [1, 2, 3, 4]
y = []
out = x
idx = np.arange(len(x))
self._testListDiff(x, y, out, idx)
def testEmptyXY(self):
x = []
y = []
out = []
idx = []
self._testListDiff(x, y, out, idx)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/listdiff_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg.linalg_impl.tridiagonal_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
_sample_diags = np.array([[2, 1, 4, 0], [1, 3, 2, 2], [0, 1, -1, 1]])
_sample_rhs = np.array([1, 2, 3, 4])
_sample_result = np.array([-9, 5, -4, 4])
# Flag, indicating that test should be run only with partial_pivoting=True
FLAG_REQUIRES_PIVOTING = "FLAG_REQUIRES_PIVOT"
# Flag, indicating that test shouldn't be parameterized by different values of
# partial_pivoting, etc.
FLAG_NO_PARAMETERIZATION = "FLAG_NO_PARAMETERIZATION"
def flags(*args):
def decorator(f):
for flag in args:
setattr(f, flag, True)
return f
return decorator
def _tfconst(array):
return constant_op.constant(array, dtypes.float64)
def _tf_ones(shape):
return array_ops.ones(shape, dtype=dtypes.float64)
class TridiagonalSolveOpTest(test.TestCase):
def _test(self,
diags,
rhs,
expected,
diags_format="compact",
transpose_rhs=False,
conjugate_rhs=False):
with self.cached_session(use_gpu=True):
result = linalg_impl.tridiagonal_solve(diags, rhs, diags_format,
transpose_rhs, conjugate_rhs)
self.assertAllClose(self.evaluate(result), expected)
def _testWithLists(self,
diags,
rhs,
expected,
diags_format="compact",
transpose_rhs=False,
conjugate_rhs=False):
self._test(
_tfconst(diags), _tfconst(rhs), _tfconst(expected), diags_format,
transpose_rhs, conjugate_rhs)
def _assertRaises(self, diags, rhs, diags_format="compact"):
with self.assertRaises(ValueError):
linalg_impl.tridiagonal_solve(diags, rhs, diags_format)
# Tests with various dtypes
def testReal(self):
for dtype in dtypes.float32, dtypes.float64:
self._test(
diags=constant_op.constant(_sample_diags, dtype),
rhs=constant_op.constant(_sample_rhs, dtype),
expected=constant_op.constant(_sample_result, dtype))
def testComplex(self):
for dtype in dtypes.complex64, dtypes.complex128:
self._test(
diags=constant_op.constant(_sample_diags, dtype) * (1 + 1j),
rhs=constant_op.constant(_sample_rhs, dtype) * (1 - 1j),
expected=constant_op.constant(_sample_result, dtype) * (1 - 1j) /
(1 + 1j))
# Tests with small matrix sizes
def test3x3(self):
self._testWithLists(
diags=[[2, -1, 0], [1, 3, 1], [0, -1, -2]],
rhs=[1, 2, 3],
expected=[-3, 2, 7])
def test2x2(self):
self._testWithLists(
diags=[[2, 0], [1, 3], [0, 1]], rhs=[1, 4], expected=[-5, 3])
def test2x2Complex(self):
for dtype in dtypes.complex64, dtypes.complex128:
self._test(
diags=constant_op.constant([[2j, 0j], [1j, 3j], [0j, 1j]], dtype),
rhs=constant_op.constant([1 - 1j, 4 - 4j], dtype),
expected=constant_op.constant([5 + 5j, -3 - 3j], dtype))
def test1x1(self):
self._testWithLists(diags=[[0], [3], [0]], rhs=[6], expected=[2])
def test0x0(self):
self._test(
diags=constant_op.constant(0, shape=(3, 0), dtype=dtypes.float32),
rhs=constant_op.constant(0, shape=(0, 1), dtype=dtypes.float32),
expected=constant_op.constant(0, shape=(0, 1), dtype=dtypes.float32))
def test2x2WithMultipleRhs(self):
self._testWithLists(
diags=[[2, 0], [1, 3], [0, 1]],
rhs=[[1, 2, 3], [4, 8, 12]],
expected=[[-5, -10, -15], [3, 6, 9]])
def test1x1WithMultipleRhs(self):
self._testWithLists(
diags=[[0], [3], [0]], rhs=[[6, 9, 12]], expected=[[2, 3, 4]])
def test1x1NotInvertible(self):
with self.assertRaises(errors_impl.InvalidArgumentError):
self._testWithLists(diags=[[0], [0], [0]], rhs=[[6, 9, 12]], expected=[])
def test2x2NotInvertible(self):
with self.assertRaises(errors_impl.InvalidArgumentError):
self._testWithLists(
diags=[[3, 0], [1, 3], [0, 1]], rhs=[1, 4], expected=[])
# Other edge cases
@flags(FLAG_REQUIRES_PIVOTING)
def testCaseRequiringPivoting(self):
# Without partial pivoting (e.g. Thomas algorithm) this would fail.
self._testWithLists(
diags=[[2, -1, 1, 0], [1, 4, 1, -1], [0, 2, -2, 3]],
rhs=[1, 2, 3, 4],
expected=[8, -3.5, 0, -4])
@flags(FLAG_REQUIRES_PIVOTING)
def testCaseRequiringPivotingLastRows(self):
self._testWithLists(
diags=[[2, 1, -1, 0], [1, -1, 2, 1], [0, 1, -6, 1]],
rhs=[1, 2, -1, -2],
expected=[5, -2, -5, 3])
def testNotInvertible(self):
if test.is_gpu_available(cuda_only=True):
# CuSparse gtsv routines don't raise errors for non-invertible
# matrices.
return
with self.assertRaises(errors_impl.InvalidArgumentError):
self._testWithLists(
diags=[[2, -1, 1, 0], [1, 4, 1, -1], [0, 2, 0, 3]],
rhs=[1, 2, 3, 4],
expected=[8, -3.5, 0, -4])
def testDiagonal(self):
self._testWithLists(
diags=[[0, 0, 0, 0], [1, 2, -1, -2], [0, 0, 0, 0]],
rhs=[1, 2, 3, 4],
expected=[1, 1, -3, -2])
def testUpperTriangular(self):
self._testWithLists(
diags=[[2, 4, -1, 0], [1, 3, 1, 2], [0, 0, 0, 0]],
rhs=[1, 6, 4, 4],
expected=[13, -6, 6, 2])
def testLowerTriangular(self):
self._testWithLists(
diags=[[0, 0, 0, 0], [2, -1, 3, 1], [0, 1, 4, 2]],
rhs=[4, 5, 6, 1],
expected=[2, -3, 6, -11])
# Multiple right-hand sides and batching
def testWithTwoRightHandSides(self):
self._testWithLists(
diags=_sample_diags,
rhs=np.transpose([_sample_rhs, 2 * _sample_rhs]),
expected=np.transpose([_sample_result, 2 * _sample_result]))
def testBatching(self):
self._testWithLists(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, -2 * _sample_result]))
def testWithTwoBatchingDimensions(self):
self._testWithLists(
diags=np.array([[_sample_diags, -_sample_diags, _sample_diags],
[-_sample_diags, _sample_diags, -_sample_diags]]),
rhs=np.array([[_sample_rhs, 2 * _sample_rhs, 3 * _sample_rhs],
[4 * _sample_rhs, 5 * _sample_rhs, 6 * _sample_rhs]]),
expected=np.array(
[[_sample_result, -2 * _sample_result, 3 * _sample_result],
[-4 * _sample_result, 5 * _sample_result, -6 * _sample_result]]))
def testBatchingAndTwoRightHandSides(self):
rhs = np.transpose([_sample_rhs, 2 * _sample_rhs])
expected_result = np.transpose([_sample_result, 2 * _sample_result])
self._testWithLists(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([rhs, 2 * rhs]),
expected=np.array([expected_result, -2 * expected_result]))
# Various input formats
def testSequenceFormat(self):
self._test(
diags=(_tfconst([2, 1, 4]), _tfconst([1, 3, 2, 2]), _tfconst([1, -1,
1])),
rhs=_tfconst([1, 2, 3, 4]),
expected=_tfconst([-9, 5, -4, 4]),
diags_format="sequence")
def testSequenceFormatWithDummyElements(self):
dummy = 20
self._test(
diags=(_tfconst([2, 1, 4, dummy]), _tfconst([1, 3, 2, 2]),
_tfconst([dummy, 1, -1, 1])),
rhs=_tfconst([1, 2, 3, 4]),
expected=_tfconst([-9, 5, -4, 4]),
diags_format="sequence")
def testSequenceFormatWithBatching(self):
self._test(
diags=(_tfconst([[2, 1, 4], [-2, -1, -4]]),
_tfconst([[1, 3, 2, 2], [-1, -3, -2, -2]]),
_tfconst([[1, -1, 1], [-1, 1, -1]])),
rhs=_tfconst([[1, 2, 3, 4], [1, 2, 3, 4]]),
expected=_tfconst([[-9, 5, -4, 4], [9, -5, 4, -4]]),
diags_format="sequence")
def testMatrixFormat(self):
self._testWithLists(
diags=[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]],
rhs=[1, 2, 3, 4],
expected=[-9, 5, -4, 4],
diags_format="matrix")
def testMatrixFormatWithMultipleRightHandSides(self):
self._testWithLists(
diags=[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]],
rhs=[[1, -1], [2, -2], [3, -3], [4, -4]],
expected=[[-9, 9], [5, -5], [-4, 4], [4, -4]],
diags_format="matrix")
def testMatrixFormatWithBatching(self):
self._testWithLists(
diags=[[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]],
[[-1, -2, 0, 0], [-1, -3, -1, 0], [0, 1, -2, -4], [0, 0, -1,
-2]]],
rhs=[[1, 2, 3, 4], [1, 2, 3, 4]],
expected=[[-9, 5, -4, 4], [9, -5, 4, -4]],
diags_format="matrix")
def testRightHandSideAsColumn(self):
self._testWithLists(
diags=_sample_diags,
rhs=np.transpose([_sample_rhs]),
expected=np.transpose([_sample_result]),
diags_format="compact")
# Tests with transpose and adjoint
def testTransposeRhs(self):
self._testWithLists(
diags=_sample_diags,
rhs=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, 2 * _sample_result]),
transpose_rhs=True)
def testConjugateRhs(self):
self._testWithLists(
diags=_sample_diags,
rhs=np.transpose([_sample_rhs * (1 + 1j), _sample_rhs * (1 - 2j)]),
expected=np.transpose(
[_sample_result * (1 - 1j), _sample_result * (1 + 2j)]),
conjugate_rhs=True)
def testAdjointRhs(self):
self._testWithLists(
diags=_sample_diags,
rhs=np.array([_sample_rhs * (1 + 1j), _sample_rhs * (1 - 2j)]),
expected=np.array(
[_sample_result * (1 - 1j), _sample_result * (1 + 2j)]),
transpose_rhs=True,
conjugate_rhs=True)
def testTransposeRhsWithBatching(self):
self._testWithLists(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([[_sample_rhs, 2 * _sample_rhs],
[3 * _sample_rhs, 4 * _sample_rhs]]),
expected=np.array([[_sample_result, 2 * _sample_result],
[-3 * _sample_result, -4 * _sample_result]]),
transpose_rhs=True)
def testTransposeRhsWithRhsAsVector(self):
self._testWithLists(
diags=_sample_diags,
rhs=_sample_rhs,
expected=_sample_result,
transpose_rhs=True)
def testConjugateRhsWithRhsAsVector(self):
self._testWithLists(
diags=_sample_diags,
rhs=_sample_rhs * (1 + 1j),
expected=_sample_result * (1 - 1j),
conjugate_rhs=True)
def testTransposeRhsWithRhsAsVectorAndBatching(self):
self._testWithLists(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, -2 * _sample_result]),
transpose_rhs=True)
# Gradient tests
def _gradientTest(
self,
diags,
rhs,
y, # output = reduce_sum(y * tridiag_solve(diags, rhs))
expected_grad_diags, # expected gradient of output w.r.t. diags
expected_grad_rhs, # expected gradient of output w.r.t. rhs
diags_format="compact",
transpose_rhs=False,
conjugate_rhs=False,
feed_dict=None):
expected_grad_diags = _tfconst(expected_grad_diags)
expected_grad_rhs = _tfconst(expected_grad_rhs)
with backprop.GradientTape() as tape_diags:
with backprop.GradientTape() as tape_rhs:
tape_diags.watch(diags)
tape_rhs.watch(rhs)
x = linalg_impl.tridiagonal_solve(
diags,
rhs,
diagonals_format=diags_format,
transpose_rhs=transpose_rhs,
conjugate_rhs=conjugate_rhs)
res = math_ops.reduce_sum(x * y)
with self.cached_session(use_gpu=True) as sess:
actual_grad_diags = sess.run(
tape_diags.gradient(res, diags), feed_dict=feed_dict)
actual_rhs_diags = sess.run(
tape_rhs.gradient(res, rhs), feed_dict=feed_dict)
self.assertAllClose(expected_grad_diags, actual_grad_diags)
self.assertAllClose(expected_grad_rhs, actual_rhs_diags)
def _gradientTestWithLists(self,
diags,
rhs,
y,
expected_grad_diags,
expected_grad_rhs,
diags_format="compact",
transpose_rhs=False,
conjugate_rhs=False):
self._gradientTest(
_tfconst(diags), _tfconst(rhs), _tfconst(y), expected_grad_diags,
expected_grad_rhs, diags_format, transpose_rhs, conjugate_rhs)
def testGradientSimple(self):
self._gradientTestWithLists(
diags=_sample_diags,
rhs=_sample_rhs,
y=[1, 3, 2, 4],
expected_grad_diags=[[-5, 0, 4, 0], [9, 0, -4, -16], [0, 0, 5, 16]],
expected_grad_rhs=[1, 0, -1, 4])
def testGradientWithMultipleRhs(self):
self._gradientTestWithLists(
diags=_sample_diags,
rhs=[[1, 2], [2, 4], [3, 6], [4, 8]],
y=[[1, 5], [2, 6], [3, 7], [4, 8]],
expected_grad_diags=([[-20, 28, -60, 0], [36, -35, 60, 80],
[0, 63, -75, -80]]),
expected_grad_rhs=[[0, 2], [1, 3], [1, 7], [0, -10]])
def _makeDataForGradientWithBatching(self):
y = np.array([1, 3, 2, 4])
grad_diags = np.array([[-5, 0, 4, 0], [9, 0, -4, -16], [0, 0, 5, 16]])
grad_rhs = np.array([1, 0, -1, 4])
diags_batched = np.array(
[[_sample_diags, 2 * _sample_diags, 3 * _sample_diags],
[4 * _sample_diags, 5 * _sample_diags, 6 * _sample_diags]])
rhs_batched = np.array([[_sample_rhs, -_sample_rhs, _sample_rhs],
[-_sample_rhs, _sample_rhs, -_sample_rhs]])
y_batched = np.array([[y, y, y], [y, y, y]])
expected_grad_diags_batched = np.array(
[[grad_diags, -grad_diags / 4, grad_diags / 9],
[-grad_diags / 16, grad_diags / 25, -grad_diags / 36]])
expected_grad_rhs_batched = np.array(
[[grad_rhs, grad_rhs / 2, grad_rhs / 3],
[grad_rhs / 4, grad_rhs / 5, grad_rhs / 6]])
return (y_batched, diags_batched, rhs_batched, expected_grad_diags_batched,
expected_grad_rhs_batched)
def testGradientWithBatchDims(self):
y, diags, rhs, expected_grad_diags, expected_grad_rhs = \
self._makeDataForGradientWithBatching()
self._gradientTestWithLists(
diags=diags,
rhs=rhs,
y=y,
expected_grad_diags=expected_grad_diags,
expected_grad_rhs=expected_grad_rhs)
@test_util.run_deprecated_v1
def testGradientWithUnknownShapes(self):
def placeholder(rank):
return array_ops.placeholder(
dtypes.float64, shape=(None for _ in range(rank)))
y, diags, rhs, expected_grad_diags, expected_grad_rhs = \
self._makeDataForGradientWithBatching()
diags_placeholder = placeholder(rank=4)
rhs_placeholder = placeholder(rank=3)
y_placeholder = placeholder(rank=3)
self._gradientTest(
diags=diags_placeholder,
rhs=rhs_placeholder,
y=y_placeholder,
expected_grad_diags=expected_grad_diags,
expected_grad_rhs=expected_grad_rhs,
feed_dict={
diags_placeholder: diags,
rhs_placeholder: rhs,
y_placeholder: y
})
# Invalid input shapes
@flags(FLAG_NO_PARAMETERIZATION)
def testInvalidShapesCompactFormat(self):
def test_raises(diags_shape, rhs_shape):
self._assertRaises(_tf_ones(diags_shape), _tf_ones(rhs_shape), "compact")
test_raises((5, 4, 4), (5, 4))
test_raises((5, 3, 4), (4, 5))
test_raises((5, 3, 4), (5))
test_raises((5), (5, 4))
@flags(FLAG_NO_PARAMETERIZATION)
def testInvalidShapesSequenceFormat(self):
def test_raises(diags_tuple_shapes, rhs_shape):
diagonals = tuple(_tf_ones(shape) for shape in diags_tuple_shapes)
self._assertRaises(diagonals, _tf_ones(rhs_shape), "sequence")
test_raises(((5, 4), (5, 4)), (5, 4))
test_raises(((5, 4), (5, 4), (5, 6)), (5, 4))
test_raises(((5, 3), (5, 4), (5, 6)), (5, 4))
test_raises(((5, 6), (5, 4), (5, 3)), (5, 4))
test_raises(((5, 4), (7, 4), (5, 4)), (5, 4))
test_raises(((5, 4), (7, 4), (5, 4)), (3, 4))
@flags(FLAG_NO_PARAMETERIZATION)
def testInvalidShapesMatrixFormat(self):
def test_raises(diags_shape, rhs_shape):
self._assertRaises(_tf_ones(diags_shape), _tf_ones(rhs_shape), "matrix")
test_raises((5, 4, 7), (5, 4))
test_raises((5, 4, 4), (3, 4))
test_raises((5, 4, 4), (5, 3))
# Tests with placeholders
def _testWithPlaceholders(self,
diags_shape,
rhs_shape,
diags_feed,
rhs_feed,
expected,
diags_format="compact"):
if context.executing_eagerly():
return
diags = array_ops.placeholder(dtypes.float64, shape=diags_shape)
rhs = array_ops.placeholder(dtypes.float64, shape=rhs_shape)
x = linalg_impl.tridiagonal_solve(
diags, rhs, diags_format, partial_pivoting=self.pivoting)
with self.cached_session(use_gpu=True) as sess:
result = sess.run(x, feed_dict={diags: diags_feed, rhs: rhs_feed})
self.assertAllClose(result, expected)
@test_util.run_deprecated_v1
def testCompactFormatAllDimsUnknown(self):
self._testWithPlaceholders(
diags_shape=[None, None],
rhs_shape=[None],
diags_feed=_sample_diags,
rhs_feed=_sample_rhs,
expected=_sample_result)
@test_util.run_deprecated_v1
def testCompactFormatUnknownMatrixSize(self):
self._testWithPlaceholders(
diags_shape=[3, None],
rhs_shape=[4],
diags_feed=_sample_diags,
rhs_feed=_sample_rhs,
expected=_sample_result)
@test_util.run_deprecated_v1
def testCompactFormatUnknownRhsCount(self):
self._testWithPlaceholders(
diags_shape=[3, 4],
rhs_shape=[4, None],
diags_feed=_sample_diags,
rhs_feed=np.transpose([_sample_rhs, 2 * _sample_rhs]),
expected=np.transpose([_sample_result, 2 * _sample_result]))
@test_util.run_deprecated_v1
def testCompactFormatUnknownBatchSize(self):
self._testWithPlaceholders(
diags_shape=[None, 3, 4],
rhs_shape=[None, 4],
diags_feed=np.array([_sample_diags, -_sample_diags]),
rhs_feed=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, -2 * _sample_result]))
@test_util.run_deprecated_v1
def testMatrixFormatWithUnknownDims(self):
if context.executing_eagerly():
return
def test_with_matrix_shapes(matrix_shape):
matrix = np.array([[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4],
[0, 0, 1, 2]])
rhs = np.array([1, 2, 3, 4])
x = np.array([-9, 5, -4, 4])
self._testWithPlaceholders(
diags_shape=matrix_shape,
rhs_shape=[None, None],
diags_feed=matrix,
rhs_feed=np.transpose([rhs, 2 * rhs]),
expected=np.transpose([x, 2 * x]),
diags_format="matrix")
test_with_matrix_shapes(matrix_shape=[4, 4])
test_with_matrix_shapes(matrix_shape=[None, 4])
test_with_matrix_shapes(matrix_shape=[4, None])
with self.assertRaises(ValueError):
test_with_matrix_shapes(matrix_shape=[None, None])
@test_util.run_deprecated_v1
def testSequenceFormatWithUnknownDims(self):
if context.executing_eagerly():
return
superdiag = array_ops.placeholder(dtypes.float64, shape=[None])
diag = array_ops.placeholder(dtypes.float64, shape=[None])
subdiag = array_ops.placeholder(dtypes.float64, shape=[None])
rhs = array_ops.placeholder(dtypes.float64, shape=[None])
x = linalg_impl.tridiagonal_solve((superdiag, diag, subdiag),
rhs,
diagonals_format="sequence",
partial_pivoting=self.pivoting)
with self.cached_session(use_gpu=True) as sess:
result = sess.run(
x,
feed_dict={
subdiag: [20, 1, -1, 1],
diag: [1, 3, 2, 2],
superdiag: [2, 1, 4, 20],
rhs: [1, 2, 3, 4]
})
self.assertAllClose(result, [-9, 5, -4, 4])
# Benchmark
class TridiagonalSolveBenchmark(test.Benchmark):
sizes = [(100000, 1, 1), (1000000, 1, 1), (10000000, 1, 1), (100000, 10, 1),
(100000, 100, 1), (10000, 1, 100), (10000, 1, 1000),
(10000, 1, 10000)]
pivoting_options = [(True, "pivoting"), (False, "no_pivoting")]
def _generateData(self, matrix_size, batch_size, num_rhs, seed=42):
np.random.seed(seed)
data = np.random.normal(size=(batch_size, matrix_size, 3 + num_rhs))
diags = np.stack([data[:, :, 0], data[:, :, 1], data[:, :, 2]], axis=-2)
rhs = data[:, :, 3:]
return (ops.convert_to_tensor(diags, dtype=dtypes.float64),
ops.convert_to_tensor(rhs, dtype=dtypes.float64))
def benchmarkTridiagonalSolveOp(self):
devices = [("/cpu:0", "cpu")]
if test.is_gpu_available(cuda_only=True):
devices += [("/gpu:0", "gpu")]
for device_option, pivoting_option, size_option in \
itertools.product(devices, self.pivoting_options, self.sizes):
device_id, device_name = device_option
pivoting, pivoting_name = pivoting_option
matrix_size, batch_size, num_rhs = size_option
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device(device_id):
diags, rhs = self._generateData(matrix_size, batch_size, num_rhs)
x = linalg_impl.tridiagonal_solve(
diags, rhs, partial_pivoting=pivoting)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=10,
store_memory_usage=False,
name=("tridiagonal_solve_{}_matrix_size_{}_batch_size_{}_"
"num_rhs_{}_{}").format(device_name, matrix_size,
batch_size, num_rhs, pivoting_name))
if __name__ == "__main__":
for name, fun in dict(TridiagonalSolveOpTest.__dict__).items():
if not name.startswith("test"):
continue
if hasattr(fun, FLAG_NO_PARAMETERIZATION):
continue
# Replace testFoo with testFoo_pivoting and testFoo_noPivoting, setting
# self.pivoting to corresponding value.
delattr(TridiagonalSolveOpTest, name)
def decor(test_fun, pivoting):
def wrapped(instance):
instance.pivoting = pivoting
test_fun(instance)
return wrapped
setattr(TridiagonalSolveOpTest, name + "_pivoting",
decor(fun, pivoting=True))
if not hasattr(fun, FLAG_REQUIRES_PIVOTING):
setattr(TridiagonalSolveOpTest, name + "_noPivoting",
decor(fun, pivoting=False))
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseAdd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import timeit
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class SparseAddTest(test.TestCase):
def _randomTensor(self, size, np_dtype, sparse=True):
n, m = size
x = np.random.randn(n, m).astype(np_dtype)
return _sparsify(x) if sparse else x
def _SparseTensorValue_3x3(self, negate=False):
# [ 1]
# [2 ]
# [3 4]
# ...or its cwise negation, if `negate`
ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]])
val = np.array([1, 2, 3, 4])
if negate:
val = -np.array([1, 2, 3, 4])
shape = np.array([3, 3])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x3(self, negate=False):
return sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_3x3(negate))
def _SparseTensor_3x3_v2(self):
# [ 1]
# [-1.9 ]
# [ 3 -4.2]
ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]])
val = np.array([1, -1.9, 3, -4.2])
shape = np.array([3, 3])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def testAddSelf(self):
with test_util.force_cpu():
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
for sp_b in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
sp_sum = sparse_ops.sparse_add(sp_a, sp_b)
self.assertAllEqual((3, 3), sp_sum.get_shape())
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [1, 0], [2, 0], [2, 1]])
self.assertAllEqual(sum_out.values, [2, 4, 6, 8])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testAddSelfAndNegation(self):
with test_util.force_cpu():
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x3(negate=True)
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, 0.1)
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, np.empty([0, 2]))
self.assertAllEqual(sum_out.values, [])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testSmallValuesShouldVanish(self):
with test_util.force_cpu():
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x3_v2()
# sum:
# [ 2]
# [.1 ]
# [ 6 -.2]
# two values should vanish: |.1| < .21, and |-.2| < .21
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.21)
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0]])
self.assertAllEqual(sum_out.values, [2, 6])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
# only .1 vanishes
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.11)
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0], [2, 1]])
self.assertAllClose(sum_out.values, [2, 6, -.2])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
@test_util.run_deprecated_v1
def testGradients(self):
np.random.seed(1618) # Make it reproducible.
with self.session(use_gpu=False):
for n in [10, 31]:
for m in [4, 17]:
sp_a, nnz_a = self._randomTensor([n, m], np.float32)
sp_b, nnz_b = self._randomTensor([n, m], np.float32)
sp_sum = sparse_ops.sparse_add(sp_a, sp_b)
nnz_sum = len(self.evaluate(sp_sum.values))
err = gradient_checker.compute_gradient_error(
[sp_a.values, sp_b.values], [(nnz_a,), (nnz_b,)], sp_sum.values,
(nnz_sum,))
self.assertLess(err, 1e-3)
def testAddSparseDense(self):
np.random.seed(1618) # Make it reproducible.
n, m = np.random.randint(30, size=2)
for dtype in [np.float32, np.float64, np.int64, np.complex64]:
for index_dtype in [np.int32, np.int64]:
rand_vals_np = np.random.randn(n, m).astype(dtype)
dense_np = np.random.randn(n, m).astype(dtype)
with test_util.force_cpu():
sparse, unused_nnz = _sparsify(rand_vals_np, index_dtype=index_dtype)
s = self.evaluate(
sparse_ops.sparse_add(sparse, constant_op.constant(dense_np)))
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
# check commutativity
s = self.evaluate(
sparse_ops.sparse_add(constant_op.constant(dense_np), sparse))
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
@test_util.run_deprecated_v1
def testSparseTensorDenseAddGradients(self):
np.random.seed(1618) # Make it reproducible.
n, m = np.random.randint(30, size=2)
rand_vals_np = np.random.randn(n, m).astype(np.float32)
dense_np = np.random.randn(n, m).astype(np.float32)
with self.session(use_gpu=False):
sparse, nnz = _sparsify(rand_vals_np)
dense = constant_op.constant(dense_np, dtype=dtypes.float32)
s = sparse_ops.sparse_add(sparse, dense)
err = gradient_checker.compute_gradient_error([sparse.values, dense],
[(nnz,), (n, m)], s, (n, m))
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testInvalidSparseTensor(self):
with test_util.force_cpu():
shape = [2, 2]
val = [0]
dense = constant_op.constant(np.zeros(shape, dtype=np.int32))
for bad_idx in [
[[-1, 0]], # -1 is invalid.
[[1, 3]], # ...so is 3.
]:
sparse = sparse_tensor.SparseTensorValue(bad_idx, val, shape)
s = sparse_ops.sparse_add(sparse, dense)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"invalid index"):
self.evaluate(s)
######################## Benchmarking code
def _s2d_add_vs_sparse_add(sparsity, n, m, num_iters=50):
np.random.seed(1618)
with session.Session(graph=ops.Graph()) as sess:
sp_vals = np.random.rand(n, m).astype(np.float32)
sp_t, unused_nnz = _sparsify(sp_vals, thresh=sparsity, index_dtype=np.int32)
vals = np.random.rand(n, m).astype(np.float32)
s2d = math_ops.add(
sparse_ops.sparse_tensor_to_dense(sp_t), constant_op.constant(vals))
sa = sparse_ops.sparse_add(sp_t, constant_op.constant(vals))
timeit.timeit(lambda: sess.run(s2d), number=3)
timeit.timeit(lambda: sess.run(sa), number=3)
s2d_total = timeit.timeit(lambda: sess.run(s2d), number=num_iters)
sa_total = timeit.timeit(lambda: sess.run(sa), number=num_iters)
# per-iter latency; secs to millis
return s2d_total * 1e3 / num_iters, sa_total * 1e3 / num_iters
class SparseAddBenchmark(test.Benchmark):
def benchmarkSparseAddDense(self):
print("SparseAddDense: add with sparse_to_dense vs. sparse_add")
print("%nnz \t n \t m \t millis(s2d) \t millis(sparse_add) \t speedup")
for sparsity in [0.99, 0.5, 0.01]:
for n in [1, 256, 50000]:
for m in [100, 1000]:
s2d_dt, sa_dt = _s2d_add_vs_sparse_add(sparsity, n, m)
print("%.2f \t %d \t %d \t %.4f \t %.4f \t %.2f" % (sparsity, n, m,
s2d_dt, sa_dt,
s2d_dt / sa_dt))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_add_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for neon kernel for depthwise convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def ConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 35, 35, 2],
[4, 147, 147, 2], [3, 299, 299, 3], [5, 183, 183, 1]]
filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [5, 5, 2, 1],
[3, 3, 2, 8], [2, 2, 3, 8], [5, 5, 1, 2]]
out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 35, 35, 2],
[4, 49, 49, 16], [3, 150, 150, 24], [5, 92, 92, 2]]
strides = [1, 1, 1, 1, 3, 2, 2]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, SAME, VALID, SAME, SAME, SAME]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def CheckGradConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
compute_gradient_error() is very expensive. So the configs should be
relatively small.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[2, 5, 8, 1], [4, 5, 5, 1], [2, 4, 4, 2], [1, 15, 15, 2],
[2, 15, 16, 1]]
filter_sizes = [[4, 4, 1, 2], [2, 2, 1, 2], [3, 1, 2, 2], [1, 3, 2, 1],
[3, 3, 1, 2]]
out_sizes = [[2, 5, 8, 2], [4, 2, 2, 2], [2, 4, 4, 4], [1, 15, 15, 2],
[2, 5, 5, 2]]
strides = [1, 2, 1, 1, 3]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, VALID, SAME, SAME, VALID]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
class DepthwiseConv2DTest(test.TestCase):
# This is testing that depthwise_conv2d and depthwise_conv2d_native
# produce the same results. It also tests that NCHW and NHWC
# formats agree, by comparing the depthwise_conv2d_native with
# 'NCHW' format (with transposition) matches the 'NHWC' format using
# the higher level interface.
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
use_gpu,
data_format="NHWC"):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
use_gpu: Whether to use GPU.
data_format: The data_format of the input. "NHWC" or "NCHW".
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.cached_session(use_gpu=use_gpu) as sess:
with sess.graph._kernel_label_map({"DepthwiseConv2dNative": "neon"}):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
native_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NHWC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
conv_native = nn_ops.depthwise_conv2d_native(
native_t1,
t2,
strides=strides,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
conv_interface = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
native_result = self.evaluate(conv_native)
interface_result = self.evaluate(conv_interface)
print("depthwise conv_2d: ", tensor_in_sizes, "*", filter_in_sizes,
", stride:", stride, ", padding: ", padding, ", max diff: ",
np.amax(np.absolute(native_result - interface_result)))
self.assertAllClose(
np.ravel(native_result), np.ravel(interface_result), 1e-5)
self.assertShapeEqual(native_result, conv_native)
self.assertShapeEqual(native_result, conv_interface)
@test_util.run_deprecated_v1
def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Processing ", index, "th config.")
if index == 2:
self._VerifyValues(
input_size, filter_size, stride, padding, use_gpu=True)
self._VerifyValues(
input_size, filter_size, stride, padding, use_gpu=False)
@test_util.run_deprecated_v1
def testDepthwiseConv2DFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Processing ", index, "th config.")
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
use_gpu=True,
data_format="NCHW")
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected, use_gpu):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether to use GPU.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.cached_session(use_gpu=use_gpu) as sess:
with sess.graph._kernel_label_map({"DepthwiseConv2dNative": "neon"}):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_ops.depthwise_conv2d_native(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = self.evaluate(conv)
print("value = ", value)
self.assertAllClose(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
@test_util.run_deprecated_v1
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=False)
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=True)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/neon_depthwise_conv_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fractional average pool operation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class FractionalAvgTest(test.TestCase):
# Random number generate with seed.
_PRNG = np.random.RandomState(341261000)
_SEED = 341261001
def _AvgPoolAlongRows(self, input_matrix, row_seq, overlapping):
"""Perform average pool along row of a 2-D matrix based on row_seq.
Args:
input_matrix: A 2-D matrix.
row_seq: Cumulative pooling sequence along row.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = len(row_seq)-1
* num_cols = input_matrix.num_cols.
"""
output_image = np.zeros(input_matrix.shape[1])
row_max = row_seq[-1]
for i in range(row_seq.shape[0] - 1):
row_start = row_seq[i]
row_end = row_seq[i + 1] + 1 if overlapping else row_seq[i + 1]
row_end = min(row_end, row_max)
output_image = np.vstack((output_image, np.mean(
input_matrix[row_start:row_end, :], axis=0))) # axis 0 is along row
# remove the sentinel row
return output_image[1:, :]
def _AvgPoolAlongCols(self, input_matrix, col_seq, overlapping):
"""Perform average pool along column of a 2-D matrix based on col_seq.
Args:
input_matrix: A 2-D matrix.
col_seq: Cumulative pooling sequence along column.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = input_matrix.num_rows
* num_cols = len(col_seq)-1.
"""
input_matrix = input_matrix.transpose()
output_matrix = self._AvgPoolAlongRows(input_matrix, col_seq, overlapping)
return output_matrix.transpose()
def _GetExpectedFractionalAvgPoolResult(self, input_tensor, row_seq, col_seq,
overlapping):
"""Get expected fractional average pooling result.
row_seq and col_seq together defines the fractional pooling region.
Args:
input_tensor: Original input tensor, assuming it is a 4-D tensor, with
dimension as [batch, height/row, width/column, channels/depth].
row_seq: Cumulative pooling sequence along row.
col_seq: Cumulative pooling sequence along column.
overlapping: Use overlapping when doing pooling.
Returns:
A 4-D tensor that is the result of average pooling on input_tensor based
on pooling region defined by row_seq and col_seq, conditioned on whether
or not overlapping is used.
"""
input_shape = input_tensor.shape
output_shape = (input_shape[0], len(row_seq) - 1, len(col_seq) - 1,
input_shape[3])
output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
for batch in range(input_shape[0]):
for channel in range(input_shape[3]):
two_dim_slice = input_tensor[batch, :, :, channel]
tmp = self._AvgPoolAlongRows(two_dim_slice, row_seq, overlapping)
output_tensor[batch, :, :, channel] = self._AvgPoolAlongCols(
tmp, col_seq, overlapping)
return output_tensor
def _ValidateFractionalAvgPoolResult(self, input_tensor, pooling_ratio,
pseudo_random, overlapping):
"""Validate FractionalAvgPool's result against expected.
Expected result is computed given input_tensor, and pooling region defined
by row_seq and col_seq.
Args:
input_tensor: A tensor or numpy ndarray.
pooling_ratio: A list or tuple of length 4, first and last element be 1.
pseudo_random: Use pseudo random method to generate pooling sequence.
overlapping: Use overlapping when pooling.
Returns:
None
"""
with self.cached_session() as sess:
p, r, c = nn_ops.fractional_avg_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random,
overlapping,
seed=self._SEED)
actual, row_seq, col_seq = self.evaluate([p, r, c])
expected = self._GetExpectedFractionalAvgPoolResult(input_tensor, row_seq,
col_seq, overlapping)
self.assertShapeEqual(expected, p)
self.assertAllClose(expected, actual)
def _testVisually(self):
"""Manual test by printing out intermediate result of a small random tensor.
Since _GetExpectedFractionalAvgPoolResult is 'automated', it feels safer to
have a test case that you can see what's happening.
This test will generate a small, random, int 2D matrix, and feed it to
FractionalAvgPool and _GetExpectedFractionalAvgPoolResult.
"""
num_rows = 6
num_cols = 6
tensor_shape = (1, num_rows, num_cols, 1)
pseudo_random = False
for overlapping in True, False:
print("-" * 70)
print("Testing FractionalAvgPool with overlapping = {}".format(
overlapping))
rand_mat = self._PRNG.randint(10, size=tensor_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(2), 1]
with self.cached_session() as sess:
p, r, c = nn_ops.fractional_avg_pool_v2(
rand_mat.astype(np.float32),
pooling_ratio,
pseudo_random,
overlapping,
seed=self._SEED)
tensor_output, row_seq, col_seq = self.evaluate([p, r, c])
expected_result = self._GetExpectedFractionalAvgPoolResult(
rand_mat.astype(np.float32), row_seq, col_seq, overlapping)
print("row sequence:")
print(row_seq)
print("column sequence:")
print(col_seq)
print("Input:")
# Print input with pooling region marked.
for i in range(num_rows):
row_to_print = []
for j in range(num_cols):
if j in col_seq:
row_to_print.append("|")
row_to_print.append(str(rand_mat[0, i, j, 0]))
row_to_print.append("|")
if i in row_seq:
print("-" * 2 * len(row_to_print))
print(" ".join(row_to_print))
print("-" * 2 * len(row_to_print))
print("Output from FractionalAvgPool:")
print(tensor_output[0, :, :, 0])
print("Expected result:")
print(expected_result[0, :, :, 0])
def testAllInputOptions(self):
"""Try all possible input options for fractional_avg_pool.
"""
num_batches = 5
num_channels = 3
num_rows = 20
num_cols = 30
for pseudo_random in True, False:
for overlapping in True, False:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testIntegerTensorInput(self):
"""Test FractionalAvgPool works fine when input tensor is integer type.
"""
pseudo_random = True
overlapping = True
tensor_shape = (1, 6, 6, 1)
# pyformat: disable
mat = np.array([
[2, 6, 4, 1, 3, 6],
[8, 9, 1, 6, 6, 8],
[3, 9, 8, 2, 5, 6],
[2, 7, 9, 5, 4, 5],
[8, 5, 0, 5, 7, 4],
[4, 4, 5, 9, 7, 2]
])
# pyformat: enable
self._ValidateFractionalAvgPoolResult(mat.reshape(tensor_shape),
[1, math.sqrt(3), math.sqrt(2), 1],
pseudo_random, overlapping)
def testDifferentTensorShapes(self):
"""Test different shapes of input tensor.
Mainly test different combinations of num_rows and num_cols.
"""
pseudo_random = True
overlapping = True
for num_batches in [1, 3]:
for num_channels in [1, 3]:
for num_rows in [10, 20, 50]:
for num_cols in [10, 20, 50]:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testLargePoolingRatio(self):
"""Test when pooling ratio is not within [1, 2).
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
for row_ratio in [math.sqrt(11), math.sqrt(37)]:
for col_ratio in [math.sqrt(11), math.sqrt(27)]:
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(rand_mat,
[1, row_ratio, col_ratio, 1],
pseudo_random, overlapping)
def testDivisiblePoolingRatio(self):
"""Test when num of rows/cols can evenly divide pooling ratio.
This is a case regular average pooling can handle. Should be handled by
fractional pooling as well.
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(rand_mat, [1, 2, 2, 1], pseudo_random,
overlapping)
@test_util.run_deprecated_v1
def testDifferentInputTensorShape(self):
"""Runs the operation in one session with different input tensor shapes."""
with self.cached_session() as sess:
input_holder = array_ops.placeholder(dtypes.float32,
[None, None, None, 3])
pooling_ratio = [1, 1.5, 1.5, 1]
pseudo_random = False
overlapping = False
p, r, c = nn_ops.fractional_avg_pool_v2(
input_holder,
pooling_ratio,
pseudo_random,
overlapping,
seed=self._SEED)
# First run.
input_a = np.zeros([3, 32, 32, 3])
actual, row_seq, col_seq = sess.run([p, r, c], {input_holder: input_a})
expected = self._GetExpectedFractionalAvgPoolResult(
input_a, row_seq, col_seq, overlapping)
self.assertSequenceEqual(expected.shape, actual.shape)
# Second run.
input_b = np.zeros([4, 60, 60, 3])
actual, row_seq, col_seq = sess.run([p, r, c], {input_holder: input_b})
expected = self._GetExpectedFractionalAvgPoolResult(
input_b, row_seq, col_seq, overlapping)
self.assertSequenceEqual(expected.shape, actual.shape)
class FractionalAvgPoolGradTest(test.TestCase):
"""Tests for FractionalAvgPoolGrad.
Two types of tests for FractionalAvgPoolGrad.
1) Test fractional_avg_pool_grad() directly.
This type of test relies on gen_nn_ops.avg_pool_grad() returns the
correct result. For example:
* input_tensor_shape = (1, 10, 10, 1)
* window_size = (1, 2, 2, 1)
* stride_size = (1, 2, 2, 1)
* padding: not really important, since 10/2 is divisible
avg pooling should generate the same result as fractional avg pooling with:
* row_sequence = [0, 2, 4, 6, 8, 10]
* col_sequence = [0, 2, 4, 6, 8, 10]
* overlapping = False
This also means their gradients in such case will be the same.
Similarly, when
* input_tensor_shape = (1, 7, 7, 1)
* window_size = (1, 3, 3, 1)
* stride_size = (1, 2, 2, 1)
* padding: not important
avg pooling should generate the same result as fractional avg pooling with:
* row_sequence = [0, 2, 4, 7]
* col_sequence = [0, 2, 4, 7]
* overlapping = True
2) Test through compute_gradient_error()
"""
_PRNG = np.random.RandomState(341261004)
_SEED = 341261005
def _GenerateRandomInputTensor(self, shape):
num_elements = 1
for dim_size in shape:
num_elements *= dim_size
x = self._PRNG.rand(num_elements) * 1000
return x.reshape(shape)
def testDirectNotUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = row_window_size * 5
num_cols = col_window_size * 7
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.cached_session() as _:
input_tensor = constant_op.constant(
self._GenerateRandomInputTensor(input_shape).astype(
np.float32))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size, col_window_size, 1]
padding = "VALID"
output_tensor = nn_ops.avg_pool(input_tensor, window_size,
stride_size, padding)
output_data = self.evaluate(output_tensor)
num_elements = 1
for dim_size in output_data.shape:
num_elements *= dim_size
output_backprop = (self._PRNG.rand(num_elements) *
1000).reshape(output_data.shape)
input_backprop_tensor = gen_nn_ops.avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = self.evaluate(input_backprop_tensor)
row_seq = list(range(0, num_rows + 1, row_window_size))
col_seq = list(range(0, num_cols + 1, col_window_size))
fap_input_backprop_tensor = gen_nn_ops.fractional_avg_pool_grad(
input_tensor.get_shape(),
output_backprop,
row_seq,
col_seq,
overlapping=False)
fap_input_backprop = self.evaluate(fap_input_backprop_tensor)
self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
self.assertAllClose(input_backprop, fap_input_backprop)
def testDirectUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = (row_window_size - 1) * 5 + 1
num_cols = (col_window_size - 1) * 7 + 1
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.cached_session() as _:
input_tensor = constant_op.constant(
self._GenerateRandomInputTensor(input_shape).astype(
np.float32))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
padding = "VALID"
output_tensor = nn_ops.avg_pool(input_tensor, window_size,
stride_size, padding)
output_data = self.evaluate(output_tensor)
num_elements = 1
for dim_size in output_data.shape:
num_elements *= dim_size
output_backprop = (self._PRNG.rand(num_elements) *
1000).reshape(output_data.shape)
input_backprop_tensor = gen_nn_ops.avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = self.evaluate(input_backprop_tensor)
row_seq = list(range(0, num_rows, row_window_size - 1))
col_seq = list(range(0, num_cols, col_window_size - 1))
row_seq[-1] += 1
col_seq[-1] += 1
fap_input_backprop_tensor = gen_nn_ops.fractional_avg_pool_grad(
input_tensor.get_shape(),
output_backprop,
row_seq,
col_seq,
overlapping=True)
fap_input_backprop = self.evaluate(fap_input_backprop_tensor)
self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
self.assertAllClose(input_backprop, fap_input_backprop)
@test_util.run_deprecated_v1
def testAllInputOptionsThroughGradientError(self):
input_shape = (1, 7, 13, 1)
input_data = self._GenerateRandomInputTensor(input_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(3), 1]
for pseudo_random in True, False:
for overlapping in True, False:
with self.cached_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
output_data = self.evaluate(output_tensor)
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
@test_util.run_deprecated_v1
def testDifferentTensorShapesThroughGradientError(self):
pseudo_random = True
overlapping = True
pooling_ratio = [1, math.sqrt(3), math.sqrt(2), 1]
for num_batches in [1, 2]:
for num_rows in [5, 13]:
for num_cols in [5, 11]:
for num_channels in [1, 3]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
input_data = self._GenerateRandomInputTensor(input_shape)
with self.cached_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
output_data = self.evaluate(output_tensor)
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
@test_util.run_deprecated_v1
def testLargePoolingRatioThroughGradientError(self):
input_shape = (1, 17, 23, 1)
input_data = self._GenerateRandomInputTensor(input_shape)
pooling_ratio = (1, math.sqrt(13), math.sqrt(7), 1)
output_shape = [int(a / b) for a, b in zip(input_shape, pooling_ratio)]
overlapping = True
pseudo_random = False
with self.cached_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.clip_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ClipTest(test.TestCase):
def DISABLED_testClipByValueGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs_1 = clip_ops.clip_by_value(inputs, 0.5, 3.5)
min_val = constant_op.constant([0.5, 0.5, 0.5, 0.5], dtype=dtypes.float32)
max_val = constant_op.constant([3.5, 3.5, 3.5, 3.5], dtype=dtypes.float32)
outputs_2 = clip_ops.clip_by_value(inputs, min_val, max_val)
with self.cached_session():
error_1 = gradient_checker.compute_gradient_error(inputs, [4], outputs_1,
[4])
self.assertLess(error_1, 1e-4)
error_2 = gradient_checker.compute_gradient_error(inputs, [4], outputs_2,
[4])
self.assertLess(error_2, 1e-4)
# ClipByValue test
def testClipByValue(self):
with self.session(use_gpu=True):
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
np_ans = [[-4.4, 2.0, 3.0], [4.0, 4.4, 4.4]]
clip_value = 4.4
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# [Tensor, Scalar, Scalar]
def DISABLED_testClipByValue0Type(self):
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16
]:
with self.cached_session(use_gpu=True):
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
np_ans = [[2, 2, 3], [4, 4, 4]]
clip_value_min = 2
clip_value_max = 4
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# [Tensor, Tensor, Scalar]
def DISABLED_testClipByValue1Type(self):
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16
]:
with self.cached_session(use_gpu=True):
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
np_ans = [[2, 2, 3], [4, 4, 4]]
clip_value_min = constant_op.constant(
[2, 2, 2, 3, 3, 3], shape=[2, 3], dtype=dtype)
clip_value_max = 4
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# [Tensor, Scalar, Tensor]
def DISABLED_testClipByValue2Type(self):
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16
]:
with self.cached_session(use_gpu=True):
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
np_ans = [[4, 4, 4], [4, 5, 6]]
clip_value_min = 4
clip_value_max = constant_op.constant(
[6, 6, 6, 6, 6, 6], shape=[2, 3], dtype=dtype)
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# [Tensor, Tensor, Tensor]
def DISABLED_testClipByValue3Type(self):
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16
]:
with self.cached_session(use_gpu=True):
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
np_ans = [[2, 2, 3], [5, 5, 6]]
clip_value_min = constant_op.constant(
[2, 2, 2, 5, 5, 5], shape=[2, 3], dtype=dtype)
clip_value_max = constant_op.constant(
[5, 5, 5, 7, 7, 7], shape=[2, 3], dtype=dtype)
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByValueBadShape(self):
with self.session(use_gpu=True):
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3, 1])
# Use a nonsensical shape.
clip = constant_op.constant([1.0, 2.0])
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_value(x, -clip, clip)
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_value(x, 1.0, clip)
def testClipByValueNonFinite(self):
# TODO(b/78016351): Enable test on GPU once the bug is fixed.
with self.cached_session():
x = constant_op.constant([float('NaN'), float('Inf'), -float('Inf')])
np_ans = [float('NaN'), 4.0, -4.0]
clip_value = 4.0
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def _testClipIndexedSlicesByValue(self, values, indices, shape,
clip_value_min, clip_value_max, expected):
with self.session(use_gpu=True) as sess:
values = constant_op.constant(values)
indices = constant_op.constant(indices)
shape = constant_op.constant(shape)
# IndexedSlices mode
indixed_slices = ops.IndexedSlices(values, indices, shape)
clipped = clip_ops.clip_by_value(indixed_slices, clip_value_min,
clip_value_max)
# clipped should be IndexedSlices
self.assertIsInstance(clipped, ops.IndexedSlices)
self.assertAllClose(clipped.values, expected)
def testClipByValueWithIndexedSlicesClipped(self):
values = [[[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]],
[[0.0, 2.0, 0.0], [0.0, 0.0, -1.0]]]
indices = [2, 6]
shape = [10, 2, 3]
# [-2.0, 2.0]
self._testClipIndexedSlicesByValue(values, indices, shape, -2.0, 2.0,
[[[-2.0, 0.0, 0.0], [2.0, 0.0, 0.0]],
[[0.0, 2.0, 0.0], [0.0, 0.0, -1.0]]])
# [1.0, 2.0]
self._testClipIndexedSlicesByValue(values, indices, shape, 1.0, 2.0,
[[[1.0, 1.0, 1.0], [2.0, 1.0, 1.0]],
[[1.0, 2.0, 1.0], [1.0, 1.0, 1.0]]])
# [-2.0, -1.0]
self._testClipIndexedSlicesByValue(
values, indices, shape, -2.0, -1.0,
[[[-2.0, -1.0, -1.0], [-1.0, -1.0, -1.0]],
[[-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]]])
# ClipByNorm tests
def testClipByNormClipped(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans_tensor = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
self.assertAllClose(np_ans, tf_ans_tensor)
@test_util.run_deprecated_v1
def testClipByNormGradientZeros(self):
with self.session(use_gpu=True):
x = array_ops.zeros([3])
b = clip_ops.clip_by_norm(x, 1.)
grad, = gradients_impl.gradients(b, x)
self.assertAllEqual(grad.eval(), [1., 1., 1.])
def testClipByNormBadShape(self):
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3, 1])
# Use a nonsensical shape.
clip = constant_op.constant([1.0, 2.0])
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_norm(x, clip)
def testClipByNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByNormZero(self):
# No norm clipping when norm = 0
with self.session(use_gpu=True):
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByNormClippedWithDim0(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[:, 0] = sqrt(3^2 + 4^2) = 5, x[:, 2] = 3
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 3.0]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm, [0])
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByNormClippedWithDim1(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [3.2, 0.0, 2.4]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByNormNotClippedWithAxes(self):
# No norm clipping when clip_norm >= 5
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 3.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# ClipByGlobalNorm tests
@test_util.run_deprecated_v1
def testClipByGlobalNormClipped(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
@test_util.run_deprecated_v1
def testClipByGlobalNormClippedTensor(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = constant_op.constant(4.0)
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
@test_util.run_deprecated_v1
def testClipByGlobalNormSupportsNone(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm((x0, None, x1, None), clip_norm)
self.assertTrue(ans[1] is None)
self.assertTrue(ans[3] is None)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[2].eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
@test_util.run_deprecated_v1
def testClipByGlobalNormWithIndexedSlicesClipped(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = ops.IndexedSlices(
constant_op.constant([1.0, -2.0]), constant_op.constant([3, 4]))
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].values.eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormPreservesDenseShape(self):
dense_shape = (1,)
slices = ops.IndexedSlices(
constant_op.constant([1.0]),
constant_op.constant([0]),
dense_shape=dense_shape)
ans, _ = clip_ops.clip_by_global_norm([slices], 1.0)
modified_slices = ans[0]
self.assertEqual(dense_shape, slices.dense_shape)
self.assertEqual(dense_shape, modified_slices.dense_shape)
@test_util.run_deprecated_v1
def testClipByGlobalNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.session(use_gpu=True):
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
np_ans_0 = [[-2.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
np_ans_1 = [1.0, -2.0]
clip_norm = 6.0
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
@test_util.run_deprecated_v1
def testClipByGlobalNormZero(self):
# No norm clipping when norm = 0
with self.session(use_gpu=True):
x0 = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([0.0, 0.0])
# Norm = 0, no changes
np_ans_0 = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
np_ans_1 = [0.0, 0.0]
clip_norm = 6.0
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 0.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
@test_util.run_deprecated_v1
def testClipByGlobalNormInf(self):
# Expect all NaNs when global norm is inf.
with self.session(use_gpu=True):
x0 = constant_op.constant([-2.0, 0.0, np.inf, 4.0, 0.0, 0.0],
shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
clip_norm = 6.0
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = self.evaluate(norm)
self.assertAllEqual(tf_norm, float('inf'))
self.assertAllEqual(tf_ans_1, np.full([2, 3], float('nan')))
self.assertAllEqual(tf_ans_2, np.full([2], float('nan')))
def testClipByAverageNormClipped(self):
# Norm clipping when average clip_norm < 0.83333333
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = 0.8
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormClippedTensor(self):
# Norm clipping when average clip_norm < 0.83333333
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = constant_op.constant(0.8)
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormNotClipped(self):
# No norm clipping when average clip_norm >= 0.83333333
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
clip_norm = 0.9
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormZero(self):
# No norm clipping when average clip_norm = 0
with self.session(use_gpu=True):
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Average norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
clip_norm = 0.9
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormReplacedWithClipByNorm(self):
# Check clip_by_average_norm(t) is the same as
# clip_by_norm(t, clip_norm * tf.compat.v1.to_float(tf.size(t)))
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
# expected answer [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = constant_op.constant(0.8)
with_norm = clip_ops.clip_by_average_norm(x, clip_norm)
without_norm = clip_ops.clip_by_norm(
x, clip_norm * math_ops.cast(array_ops.size(x), dtypes.float32))
clip_by_average_norm_ans = self.evaluate(with_norm)
clip_by_norm_ans = self.evaluate(without_norm)
self.assertAllClose(clip_by_average_norm_ans, clip_by_norm_ans)
@test_util.run_deprecated_v1
def testClipByValueEmptyTensor(self):
# Test case for GitHub issue 19337
zero = array_ops.placeholder(dtype=dtypes.float32, shape=None)
x = clip_ops.clip_by_value(zero, zero, zero)
y = clip_ops.clip_by_value(zero, 1.0, 1.0)
z = clip_ops.clip_by_value(zero, zero, 1.0)
w = clip_ops.clip_by_value(zero, 1.0, zero)
with self.session(use_gpu=True) as sess:
sess.run([x, y, z, w], feed_dict={zero: np.zeros((7, 0))})
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/clip_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for the ops to generate and execute vocab remapping."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_checkpoint_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import saver
FLAGS = flags.FLAGS
class GenerateVocabRemappingTest(test.TestCase):
"""Tests for the generate_vocab_remapping() method."""
def setUp(self):
self.new_vocab_file = os.path.join(self.get_temp_dir(),
'keyword_shifted.txt')
with open(self.new_vocab_file, 'w') as f:
f.write('\n'.join(['MISSING', 'knitting', 'eminem']) + '\n')
self.old_vocab_file = os.path.join(self.get_temp_dir(),
'keyword.txt')
with open(self.old_vocab_file, 'w') as f:
f.write('\n'.join(['knitting', 'eminem', 'MISSING']) + '\n')
@test_util.run_deprecated_v1
def test_generate_remapping_with_no_vocab_changes(self):
"""Tests where vocab does not change at all."""
remapping, num_present = gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=self.old_vocab_file,
old_vocab_file=self.old_vocab_file,
num_new_vocab=3,
new_vocab_offset=0)
expected_remapping = range(0, 3)
expected_num_present = 3
with self.cached_session():
self.assertAllEqual(expected_remapping, self.evaluate(remapping))
self.assertAllEqual(expected_num_present, self.evaluate(num_present))
def test_generate_remapping_with_shifted_vocab(self):
"""Tests where vocab is the same, but shifted / ordered differently."""
remapping, num_present = gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=self.new_vocab_file,
old_vocab_file=self.old_vocab_file,
num_new_vocab=3,
new_vocab_offset=0)
expected_remapping = [2, 0, 1]
expected_num_present = 3
with self.cached_session():
self.assertAllEqual(expected_remapping, self.evaluate(remapping))
self.assertAllEqual(expected_num_present, self.evaluate(num_present))
def test_generate_remapping_with_offset(self):
"""Tests offset and num_new_vocab logic."""
remapping, num_present = gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=self.new_vocab_file,
old_vocab_file=self.old_vocab_file,
num_new_vocab=1,
new_vocab_offset=1)
expected_remapping = [0]
expected_num_present = 1
with self.cached_session():
self.assertAllEqual(expected_remapping, self.evaluate(remapping))
self.assertAllEqual(expected_num_present, self.evaluate(num_present))
def test_generate_remapping_with_old_vocab_size(self):
"""Tests where old_vocab_size is specified."""
remapping, num_present = gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=self.new_vocab_file,
old_vocab_file=self.old_vocab_file,
num_new_vocab=3,
new_vocab_offset=0,
# Old vocabulary becomes ['knitting', 'eminem'].
old_vocab_size=2)
expected_remapping = [-1, 0, 1]
expected_num_present = 2
with self.cached_session():
self.assertAllEqual(expected_remapping, self.evaluate(remapping))
self.assertAllEqual(expected_num_present, self.evaluate(num_present))
class LoadAndRemapMatrixTest(test.TestCase):
"""Tests for the load_and_remap_matrix() op."""
def setUp(self):
ops.reset_default_graph()
self.old_num_rows = 5
self.old_num_cols = 16
self.matrix_value = np.reshape(
range(0, self.old_num_rows * self.old_num_cols), (self.old_num_rows,
self.old_num_cols))
with variable_scope.variable_scope('some_scope'):
matrix = variable_scope.get_variable(
'matrix',
dtype=dtypes.float32,
initializer=constant_op.constant(
self.matrix_value, dtype=dtypes.float32))
self.old_tensor_name = 'some_scope/matrix'
save = saver.Saver([matrix])
with self.cached_session() as sess:
self.evaluate(variables.global_variables_initializer())
self.bundle_file = os.path.join(test.get_temp_dir(), 'bundle_checkpoint')
save.save(sess, self.bundle_file)
def test_load_and_remap_no_missing(self):
"""Tests the op's load and remap where there are no missing entries."""
# No column remapping, new weight matrix has second row, then first row.
row_remapping = [1, 0]
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=row_remapping,
col_remapping=[],
initializing_values=[],
num_rows=2,
num_cols=self.old_num_cols)
with self.cached_session():
self.assertAllClose(self.matrix_value[row_remapping],
self.evaluate(remapped_matrix))
# No row remapping, new weight matrix has third col, then first col.
row_remapping = list(range(self.old_num_rows))
col_remapping = [2, 0]
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=row_remapping,
col_remapping=col_remapping,
initializing_values=[],
num_rows=len(row_remapping),
num_cols=len(col_remapping))
with self.cached_session():
self.assertAllClose(self.matrix_value[row_remapping][:, col_remapping],
self.evaluate(remapped_matrix))
# Both row and column remappings.
row_remapping = [1, 0, 4]
col_remapping = [1, 15]
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=row_remapping,
col_remapping=col_remapping,
initializing_values=[],
num_rows=len(row_remapping),
num_cols=len(col_remapping))
with self.cached_session():
self.assertAllClose(self.matrix_value[row_remapping][:, col_remapping],
self.evaluate(remapped_matrix))
def test_load_and_remap_with_init(self):
"""Tests the op's load and remap where there are missing entries."""
init_val = 42
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=[2, -1, 0],
col_remapping=[1, -1],
initializing_values=[init_val] * 4,
num_rows=3,
num_cols=2)
expected_remapped_matrix = np.reshape(
[33, init_val, init_val, init_val, 1, init_val], [3, 2])
with self.cached_session():
self.assertAllClose(expected_remapped_matrix,
self.evaluate(remapped_matrix))
def test_load_and_remap_all_missing_rows(self):
"""Tests when all the rows are missing and need to be initialized."""
num_rows = 7
initializing_values = [42] * num_rows * self.old_num_cols
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=[-1] * num_rows,
col_remapping=[],
initializing_values=initializing_values,
num_rows=num_rows,
num_cols=self.old_num_cols)
with self.cached_session():
self.assertAllClose(
np.reshape(initializing_values, (num_rows, self.old_num_cols)),
self.evaluate(remapped_matrix))
def test_load_and_remap_all_missing_rows_and_cols(self):
"""Tests when all the rows & cols are missing and need to be initialized."""
num_rows = 7
num_cols = 4
initializing_values = [42] * num_rows * num_cols
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=[-1] * num_rows,
col_remapping=[-1] * num_cols,
initializing_values=initializing_values,
num_rows=num_rows,
num_cols=num_cols)
with self.cached_session():
self.assertAllClose(
np.reshape(initializing_values, (num_rows, num_cols)),
self.evaluate(remapped_matrix))
@test_util.run_deprecated_v1
def test_load_and_remap_invalid_remapping(self):
"""Tests that errors are raised when an ID maps to multiple new IDs.
(This should usually not happen when using public APIs).
"""
invalid_remapping = [1, 0, 0, 0, 1, 2]
# Invalid row remapping.
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=invalid_remapping,
col_remapping=[],
initializing_values=[],
num_rows=len(invalid_remapping),
num_cols=self.old_num_cols)
with self.cached_session(), self.assertRaises(errors.UnimplementedError):
self.evaluate(remapped_matrix)
# Invalid column remapping.
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=list(range(self.old_num_rows)),
col_remapping=invalid_remapping,
initializing_values=[],
num_rows=self.old_num_rows,
num_cols=len(invalid_remapping))
with self.cached_session(), self.assertRaises(errors.UnimplementedError):
self.evaluate(remapped_matrix)
@test_util.run_deprecated_v1
def test_load_and_remap_incorrect_initializing_values(self):
"""Tests that errors are raised with incorrect number of init values."""
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=[2, -1, 0],
col_remapping=[1, -1],
# Too few initializing values - there should be 4. For some reason,
# initializing_values must contain no element (instead of 3 or fewer) to
# ensure that a seg fault would reliably occur if the check raising the
# InvalidArgumentError were not present.
initializing_values=[],
num_rows=3,
num_cols=2)
with self.cached_session(), self.assertRaises(errors.InvalidArgumentError):
self.evaluate(remapped_matrix)
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=[2, -1, 0],
col_remapping=[1, -1],
# Too many initializing values - there should be 4.
initializing_values=[0] * 5,
num_rows=3,
num_cols=2)
with self.cached_session(), self.assertRaises(errors.InvalidArgumentError):
self.evaluate(remapped_matrix)
class LoadAndRemapMatrixWithMaxRowsTest(test.TestCase):
"""Tests for the load_and_remap_matrix() op.
(Specifically focused on the max_rows_in_memory arg and its effects on
TensorBundle's BundleReader and TensorSlice logic).
"""
def _test_loading_variable_with_max_rows(self, np_value, partitioner,
max_rows_in_memory):
"""Helper function for various tests using max_rows_in_memory."""
ops.reset_default_graph()
old_tensor_name = 'matrix_to_load_and_remap'
matrix = variable_scope.get_variable(
old_tensor_name,
dtype=dtypes.float32,
initializer=constant_op.constant(np_value, dtype=dtypes.float32),
partitioner=partitioner)
with self.cached_session() as sess:
ckpt_path = os.path.join(test.get_temp_dir(), 'temp_ckpt')
save = saver.Saver([matrix])
self.evaluate(variables.global_variables_initializer())
save.save(sess, ckpt_path)
num_rows, num_cols = np_value.shape
# Tests loading the entire tensor (except reversed).
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
# Simply reverses the rows of the matrix.
row_remapping=list(range(num_rows - 1, -1, -1)),
col_remapping=[],
initializing_values=[],
num_rows=num_rows,
num_cols=num_cols,
max_rows_in_memory=max_rows_in_memory)
self.assertAllClose(np_value[::-1], self.evaluate(remapped_matrix))
# Tests loading the tensor (except for the first and last rows), with
# uninitialized values. Requires num_rows to be at least 3 since we're
# skipping the first and last rows.
self.assertGreater(num_rows, 2)
prefix_rows = 2
suffix_rows = 3
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
# Reverses the rows of the matrix, then prepends and appends
# uninitialized rows.
row_remapping=([-1] * prefix_rows + list(range(1, num_rows - 1)) +
[-1] * suffix_rows),
col_remapping=[],
initializing_values=[42] * (prefix_rows + suffix_rows) * num_cols,
num_rows=num_rows - 2 + prefix_rows + suffix_rows,
num_cols=num_cols,
max_rows_in_memory=max_rows_in_memory)
self.assertAllClose(
np.vstack([
np.tile(42, [prefix_rows, num_cols]), np_value[1:-1],
np.tile(42, [suffix_rows, num_cols])
]), self.evaluate(remapped_matrix))
# Tests when everything is taken from initializing_values.
new_rows = 7
initializing_values = [42] * new_rows * num_cols
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
# Nothing is loaded from the old tensor.
row_remapping=[-1] * new_rows,
col_remapping=[],
initializing_values=initializing_values,
num_rows=new_rows,
num_cols=num_cols,
max_rows_in_memory=max_rows_in_memory)
self.assertAllClose(
np.reshape(initializing_values, (new_rows, num_cols)),
self.evaluate(remapped_matrix))
@test_util.run_deprecated_v1
def test_loading_rows_divisible_by_max_rows(self):
"""Tests loading normal var when rows are evenly divisible by max_rows."""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=None,
# 9 is evenly divisible by 3.
max_rows_in_memory=3)
@test_util.run_deprecated_v1
def test_loading_rows_not_divisible_by_max_rows(self):
"""Tests loading normal var when rows aren't divisible by max_rows."""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=None,
# 9 is not evenly divisible by 4.
max_rows_in_memory=4)
@test_util.run_deprecated_v1
def test_loading_rows_less_than_max_rows(self):
"""Tests loading normal var as a single slice.
(When the specified max_rows_in_memory is larger than the number of rows)
"""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=None,
# 10 > 9.
max_rows_in_memory=10)
@test_util.run_deprecated_v1
def test_loading_no_max_rows(self):
"""Tests loading normal var as a single slice with no valid max_rows."""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 18)), (6, 3)),
partitioner=None,
max_rows_in_memory=-1)
@test_util.run_deprecated_v1
def test_loading_partitions_equals_max_rows(self):
"""Tests loading partitioned var sliced on partition boundary."""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=partitioned_variables.fixed_size_partitioner(3),
# With a tensor of shape [9, 3] and 3 partitions, each partition has
# exactly 3 rows.
max_rows_in_memory=3)
@test_util.run_deprecated_v1
def test_loading_partitions_greater_than_max_rows(self):
"""Tests loading partitioned var with more slices than partitions."""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=partitioned_variables.fixed_size_partitioner(3),
# Even though each partition has 3 rows, we'll only load the tensor one
# row at a time.
max_rows_in_memory=1)
@test_util.run_deprecated_v1
def test_loading_partitions_less_than_max_rows(self):
"""Tests loading partitioned var as a single slice.
(When the specified max_rows_in_memory is larger than the number of rows)
"""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=partitioned_variables.fixed_size_partitioner(3),
max_rows_in_memory=10)
@test_util.run_deprecated_v1
def test_loading_partitions_no_max_rows(self):
"""Tests loading partitioned var as single slice with no valid max_rows."""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=partitioned_variables.fixed_size_partitioner(3),
max_rows_in_memory=-1)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/checkpoint_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TopK op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class TopKTest(test.TestCase):
def _validateTopK(self,
inputs,
k,
expected_values,
expected_indices,
sorted=True): # pylint: disable=redefined-builtin
np_expected_values = np.array(expected_values)
np_expected_indices = np.array(expected_indices)
with self.cached_session(use_gpu=True) as sess:
values_op, indices_op = nn_ops.top_k(inputs, k, sorted=sorted)
values, indices = self.evaluate([values_op, indices_op])
self.assertShapeEqual(np_expected_values, values_op)
self.assertShapeEqual(np_expected_indices, indices_op)
if sorted:
self.assertAllClose(np_expected_values, values)
# Do some special casing of equality of indices: if indices
# are not the same, but values are floating type, ensure that
# the values are within epsilon of each other.
if not np.issubdtype(np_expected_values.dtype, np.floating):
# Values are not floating point type; check indices exactly
self.assertAllEqual(np_expected_indices, indices)
else:
# Values are floating point; indices may be swapped for
# values near each other.
indices_not_equal = np_expected_indices != indices
if np.any(indices_not_equal):
values_unsure = values[indices_not_equal]
expected_values_unsure = expected_values[indices_not_equal]
self.assertAllClose(expected_values_unsure, values_unsure)
else:
np_inputs = np.array(inputs)
# Check that the indices are valid.
for result_index, src_index in np.ndenumerate(indices):
value = values[result_index]
expected_value = np_inputs[result_index[0], src_index]
np.testing.assert_almost_equal(value, expected_value)
# Check that if two elements are equal, the lower-index element appears
# first.
shape = values.shape
for batch_index in range(shape[0]):
for index in range(shape[1] - 1):
if np.isclose(values[batch_index, index],
values[batch_index, index + 1]):
self.assertLess(indices[batch_index, index],
indices[batch_index, index + 1])
# Now check the results, ignoring order.
self.assertAllEqual(np.sort(np_expected_indices), np.sort(indices))
self.assertAllClose(np.sort(np_expected_values), np.sort(values))
def testTop1(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
self._validateTopK(inputs, 1, [[0.4], [0.3]], [[3], [1]])
def testTop2(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.4, 0.2]]
self._validateTopK(inputs, 2, [[0.4, 0.3], [0.4, 0.3]], [[3, 1], [2, 1]])
def testTop3(self):
k = 5
inputs = np.random.permutation(np.linspace(0, 100, 6140, dtype=np.float64))
indices = np.argsort(-inputs)[:k]
values = -np.sort(-inputs)[:k]
self._validateTopK(inputs, k, values, indices)
def _testLargeSort(self, dtype):
b = 10
n = 5000
inputs = np.random.permutation(
np.linspace(0, 100, b * n, dtype=dtype)).reshape(b, n)
indices = np.argsort(-inputs, axis=1)
values = -np.sort(-inputs, axis=1)
self._validateTopK(inputs, n, values, indices)
def testLargeSort(self):
self._testLargeSort(np.float32)
self._testLargeSort(np.float16)
def _testLargeTopK(self, dtype):
b = 10
n = 5000
k = n - 1
inputs = np.random.permutation(
np.linspace(0, 100, b * n, dtype=dtype)).reshape(b, n)
indices = np.argsort(-inputs, axis=1)[:, :k]
values = -np.sort(-inputs, axis=1)[:, :k]
self._validateTopK(inputs, k, values, indices)
def testLargeTopK(self):
self._testLargeTopK(np.float32)
self._testLargeTopK(np.float16)
def _testMediumTopK(self, dtype):
b = 5
n = 500
k = 50
inputs = np.random.permutation(
np.linspace(0, 100, b * n, dtype=dtype)).reshape(b, n)
indices = np.argsort(-inputs, axis=1)[:, :k]
values = -np.sort(-inputs, axis=1)[:, :k]
self._validateTopK(inputs, k, values, indices)
def testMediumTopK(self):
self._testMediumTopK(np.float32)
self._testMediumTopK(np.float16)
def testStableSort(self):
b = 5
n = 500
for k in [1, 5, 50, 500]:
# Lots of repeated integers taking values in [0, 3]
inputs = np.random.permutation(
np.linspace(0, 3, b * n, dtype=np.int32)).reshape(b, n)
# Use mergesort, a stable sort, to get the indices.
indices = np.argsort(-inputs, axis=1, kind="mergesort")[:, :k]
values = -np.sort(-inputs, axis=1)[:, :k]
self._validateTopK(inputs, k, values, indices)
def testTopAll(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
self._validateTopK(inputs, 4, [[0.4, 0.3, 0.2, 0.1], [0.3, 0.3, 0.2, 0.1]],
[[3, 1, 2, 0], [1, 2, 3, 0]])
def testTop3Unsorted(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.4, 0.3, 0.2]]
self._validateTopK(
inputs,
3, [[0.2, 0.3, 0.4], [0.2, 0.4, 0.3]], [[2, 1, 3], [3, 1, 2]],
sorted=False)
def testTop3Vector(self):
inputs = [3, 6, 15, 18, 6, 12, 1, 17, 3, 0, 4, 19, 1, 6]
self._validateTopK(inputs, 3, [19, 18, 17], [11, 3, 7])
def testTensorK(self):
inputs = [3, 6, 15, 18, 6, 12, 1, 17, 3, 0, 4, 19, 1, 6]
k = constant_op.constant(3)
self._validateTopK(inputs, k, [19, 18, 17], [11, 3, 7])
def testTop3ZeroRows(self):
inputs = np.zeros([0, 10], dtype=np.float32)
self._validateTopK(inputs, 3, np.zeros([0, 3], dtype=np.float32),
np.zeros([0, 3], dtype=np.int32))
@test_util.run_deprecated_v1
def testKNegative(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.session(use_gpu=True):
k = array_ops.placeholder(dtypes.int32)
values, _ = nn_ops.top_k(inputs, k)
with self.assertRaisesOpError("Need k >= 0, got -7"):
values.eval(feed_dict={k: -7})
@test_util.run_deprecated_v1
def testKTooLarge(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.assertRaisesRegexp(ValueError,
r"must have last dimension >= k = 4"):
nn_ops.top_k(inputs, 4)
@test_util.run_deprecated_v1
def testTopKGradients(self):
with self.session(use_gpu=True) as sess:
inputs = array_ops.placeholder(dtypes.float32, shape=[2, 5])
values, _ = nn_ops.top_k(inputs, 3)
grad = sess.run(
gradients_impl.gradients(
values, inputs, grad_ys=[[[1., 2., 3.], [4., 5., 6.]]]),
feed_dict={inputs: [[2., -1., 1000., 3., 4.],
[1., 5., 2., 4., 3.]]})[0]
self.assertEqual(
grad.tolist(), [[0., 0., 1., 3., 2.], [0., 4., 0., 5., 6.]])
class TopKBenchmark(test.Benchmark):
def benchmarkTopK(self):
for (m, n, p, use_gpu) in itertools.product(
[128],
[10, 100, 1000, 10000, 100000],
[0.001, 0.01, 0.5, 0.99, 1.0],
[False, True]):
k = int(p * n)
if k == 0:
continue
name = "m_%d_n_%d_k_%g_use_gpu_%s" % (m, n, k, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n))
v = resource_variable_ops.ResourceVariable(x)
op = nn_ops.top_k(v, k)
with session.Session() as sess:
v.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
throughput = gb_processed_input / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/topk_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Different gradient implementations for benchmark purposes
def _GradWithInverseL(l, l_inverse, grad):
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
def TriAngSolveCompositeGrad(l, grad):
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
# Compute ((l^{H} @ grad) * (tril(ones)-1/2*eye)) = middle
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
# Compute l^{-H} @ middle = z
l_inverse_middle = linalg_ops.matrix_triangular_solve(l, middle, adjoint=True)
# We need to compute z @ l^{-1}. With matrix_triangular_solve we
# actually compute l^{-H} @ z^{H} = grad. Since we later add grad^{H}
# we can ommit the conjugate transpose here.
z_h = math_ops.conj(array_ops.matrix_transpose(l_inverse_middle))
grad_a = linalg_ops.matrix_triangular_solve(l, z_h, adjoint=True)
grad_a += linalg.adjoint(grad_a)
return grad_a * 0.5
def MatrixInverseCompositeGrad(l, grad):
l_inverse = linalg_ops.matrix_inverse(l)
return _GradWithInverseL(l, l_inverse, grad)
def TriAngInvCompositeGrad(l, grad):
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(l,
linalg_ops.eye(
num_rows,
batch_shape=batch_shape,
dtype=l.dtype))
return _GradWithInverseL(l, l_inverse, grad)
class CholeskyOpTest(test.TestCase):
def _verifyCholeskyBase(self, sess, x, chol, verification):
chol_np, verification_np = self.evaluate([chol, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, chol)
# Check that the cholesky is lower triangular, and has positive diagonal
# elements.
if chol_np.shape[-1] > 0:
chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
chol_np.shape[-1]))
for chol_matrix in chol_reshaped:
self.assertAllClose(chol_matrix, np.tril(chol_matrix))
self.assertTrue((np.diag(chol_matrix) > 0.0).all())
def _verifyCholesky(self, x):
# Verify that LL^T == x.
with self.cached_session(use_gpu=True) as sess:
chol = linalg_ops.cholesky(x)
verification = math_ops.matmul(chol, chol, adjoint_b=True)
self._verifyCholeskyBase(sess, x, chol, verification)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]])
for dtype in (np.float32, np.float64):
self._verifyCholesky(data.astype(dtype))
for dtype in (np.complex64, np.complex128):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyCholesky(complex_data)
def testBatch(self):
simple_array = np.array([[[1., 0.], [0., 5.]]]) # shape (1, 2, 2)
self._verifyCholesky(simple_array)
self._verifyCholesky(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array)))
# Generate random positive-definite matrices.
matrices = np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T, matrices[i])
self._verifyCholesky(matrices)
# Generate random complex valued positive-definite matrices.
matrices = np.random.rand(10, 5, 5) + 1j * np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T.conj(), matrices[i])
self._verifyCholesky(matrices)
@test_util.run_deprecated_v1
def testNonSquareMatrix(self):
with self.assertRaises(ValueError):
linalg_ops.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
with self.assertRaises(ValueError):
linalg_ops.cholesky(
np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]
]))
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self):
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
# The below invalid Cholesky call returns an error with TF Classic and just
# returns NaNs with XLA.
@test_util.disable_xla("b/123337890")
def testNotInvertibleCPU(self):
# The input should be invertible.
with self.session(use_gpu=True):
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Cholesky decomposition was not successful. The"
" input might not be valid."):
# All rows of the matrix below add to zero
self._verifyCholesky(
np.array([[1., -1., 0.], [-1., 1., -1.], [0., -1., 1.]]))
def testEmpty(self):
self._verifyCholesky(np.empty([0, 2, 2]))
self._verifyCholesky(np.empty([2, 0, 0]))
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
matrix1 = math_ops.matmul(matrix1, matrix1, adjoint_a=True)
matrix2 = math_ops.matmul(matrix2, matrix2, adjoint_a=True)
c1 = linalg_ops.cholesky(matrix1)
c2 = linalg_ops.cholesky(matrix2)
c1_val, c2_val = self.evaluate([c1, c2])
self.assertAllClose(c1_val, c2_val)
class CholeskyGradTest(test.TestCase):
_backprop_block_size = 32
def getShapes(self, shapeList):
return ((elem, int(np.floor(1.2 * elem))) for elem in shapeList)
@test_util.run_deprecated_v1
def testSmallMatrices(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32, dtypes_lib.float64))
@test_util.run_deprecated_v1
def testSmallMatricesComplex(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64, dtypes_lib.complex128))
@test_util.run_deprecated_v1
def testOneBlockMatrices(self):
np.random.seed(0)
shapes = self.getShapes([self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64),
scalarTest=True)
@test_util.run_deprecated_v1
def testTwoBlockMatrixFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32,), scalarTest=True)
@test_util.run_deprecated_v1
def testTwoBlockMatrixDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float64,), scalarTest=True)
@test_util.run_v1_only("b/120545219")
def testTwoBlockMatrixComplexFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64,), scalarTest=True)
@test_util.run_deprecated_v1
def testTwoBlockMatrixComplexDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex128,), scalarTest=True)
def runFiniteDifferences(self,
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128),
scalarTest=False):
with self.session(use_gpu=True):
for shape in shapes:
for batch in False, True:
for dtype in dtypes:
if not scalarTest:
data = np.random.randn(shape[0], shape[1])
if dtype.is_complex:
data = data.astype(np.complex64)
data += 1j * np.random.randn(shape[0], shape[1])
x = constant_op.constant(data, dtype)
tensor = math_ops.matmul(
x, math_ops.conj(array_ops.transpose(x))) / shape[0]
else:
# This is designed to be a faster test for larger matrices.
data = np.random.randn()
if dtype.is_complex:
data = np.complex64(data)
data += 1j * np.random.randn()
x = constant_op.constant(data, dtype)
R = constant_op.constant(
np.random.randn(shape[0], shape[1]), dtype)
e = math_ops.multiply(R, x)
tensor = math_ops.matmul(
e, math_ops.conj(array_ops.transpose(e))) / shape[0]
# Inner-most matrices in tensor are positive definite.
if batch:
tensor = array_ops.tile(
array_ops.expand_dims(tensor, 0), [4, 1, 1])
y = linalg_ops.cholesky(tensor)
if scalarTest:
y = math_ops.reduce_mean(y)
error = gradient_checker.compute_gradient_error(
x, x._shape_as_list(), y, y._shape_as_list())
tf_logging.info("error = %f", error)
if dtype == dtypes_lib.float64:
self.assertLess(error, 1e-5)
elif dtype == dtypes_lib.complex128:
self.assertLess(error, 5e-5)
else:
self.assertLess(error, 5e-3)
class CholeskyBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 2, 2),
(513, 8, 8),
(513, 256, 256),
(4, 513, 2, 2),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return np.tile(matrix, batch_shape + (1, 1))
def benchmarkCholeskyOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_gpu_{shape}".format(shape=shape))
def benchmarkGradVariants(self):
def _BenchmarkGrad(grad_fn, name, device):
for shape in self.shapes:
matrix = self._GenerateMatrix(shape)
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device(device):
l = variables.Variable(np.linalg.cholesky(matrix))
grad_matrix = variables.Variable(
np.random.randn(*matrix.shape).astype(np.float32))
grad = grad_fn(l, grad_matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
grad,),
min_iters=25,
name="{name}_{dev}_{shape}".format(
name=name, dev=grad.device, shape=shape))
if test.is_gpu_available(True):
_BenchmarkGrad(MatrixInverseCompositeGrad, "composite_matrix_inverse",
"/device:GPU:0")
_BenchmarkGrad(TriAngInvCompositeGrad, "composite_tri_ang_inverse",
"/device:GPU:0")
_BenchmarkGrad(TriAngSolveCompositeGrad, "composite_triangular_solve",
"/device:GPU:0")
_BenchmarkGrad(MatrixInverseCompositeGrad, "composite_matrix_inverse",
"/cpu:0")
_BenchmarkGrad(TriAngInvCompositeGrad, "composite_tri_ang_inverse",
"/cpu:0")
_BenchmarkGrad(TriAngSolveCompositeGrad, "composite_triangular_solve",
"/cpu:0")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/cholesky_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseConcat."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseConcatTest(test.TestCase):
def _SparseTensor_UnknownShape(self,
ind_shape=None,
val_shape=None,
shape_shape=None):
return sparse_tensor.SparseTensor(
array_ops.placeholder(
dtypes.int64, shape=ind_shape),
array_ops.placeholder(
dtypes.float32, shape=val_shape),
array_ops.placeholder(
dtypes.int64, shape=shape_shape))
def _SparseTensorValue_3x3(self):
# [ 1]
# [2 ]
# [3 4]
ind = np.array([[0, 2], [1, 0], [2, 0], [2, 2]])
val = np.array([1, 2, 3, 4])
shape = np.array([3, 3])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x3(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x3())
def _SparseTensorValue_3x5(self):
# [ ]
# [ 1 ]
# [2 1 0]
ind = np.array([[1, 1], [2, 0], [2, 3], [2, 4]])
val = np.array([1, 2, 1, 0])
shape = np.array([3, 5])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x5(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x5())
def _SparseTensor_3x2(self):
# [ ]
# [1 ]
# [2 ]
ind = np.array([[1, 0], [2, 0]])
val = np.array([1, 2])
shape = np.array([3, 2])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3(self):
# [ 1 ]
# [1 2]
ind = np.array([[0, 1], [1, 0], [1, 2]])
val = np.array([1, 1, 2])
shape = np.array([2, 3])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3x4(self):
ind = np.array([
[0, 0, 1],
[0, 1, 0], [0, 1, 2],
[1, 0, 3],
[1, 1, 1], [1, 1, 3],
[1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_String3x3(self):
# [ a]
# [b ]
# [c d]
ind = np.array([[0, 2], [1, 0], [2, 0], [2, 2]])
val = np.array(["a", "b", "c", "d"])
shape = np.array([3, 3])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_String3x5(self):
# [ ]
# [ e ]
# [f g h]
ind = np.array([[1, 1], [2, 0], [2, 3], [2, 4]])
val = np.array(["e", "f", "g", "h"])
shape = np.array([3, 5])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def testConcat1(self):
with self.session(use_gpu=False) as sess:
# concat(A):
# [ 1]
# [2 ]
# [3 4]
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
# Note that we ignore concat_dim in this case since we short-circuit the
# single-input case in python.
for concat_dim in (-2000, 1, 2000):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a])
self.assertEqual(sp_concat.indices.get_shape(), [4, 2])
self.assertEqual(sp_concat.values.get_shape(), [4])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(concat_out.indices,
[[0, 2], [1, 0], [2, 0], [2, 2]])
self.assertAllEqual(concat_out.values, [1, 2, 3, 4])
self.assertAllEqual(concat_out.dense_shape, [3, 3])
def testConcat2(self):
with self.session(use_gpu=False) as sess:
# concat(A, B):
# [ 1 ]
# [2 1 ]
# [3 4 2 1 0]
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
for sp_b in (self._SparseTensorValue_3x5(), self._SparseTensor_3x5()):
for concat_dim in (-1, 1):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b])
self.assertEqual(sp_concat.indices.get_shape(), [8, 2])
self.assertEqual(sp_concat.values.get_shape(), [8])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(concat_out.indices, [[0, 2], [1, 0], [1, 4],
[2, 0], [2, 2], [2, 3],
[2, 6], [2, 7]])
self.assertAllEqual(concat_out.values, [1, 2, 1, 3, 4, 2, 1, 0])
self.assertAllEqual(concat_out.dense_shape, [3, 8])
def testConcatDim0(self):
with self.session(use_gpu=False) as sess:
# concat(A, D):
# [ 1]
# [2 ]
# [3 4]
# [ 1 ]
# [1 2]
sp_a = self._SparseTensor_3x3()
sp_d = self._SparseTensor_2x3()
for concat_dim in (-2, 0):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_d])
self.assertEqual(sp_concat.indices.get_shape(), [7, 2])
self.assertEqual(sp_concat.values.get_shape(), [7])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(
concat_out.indices,
[[0, 2], [1, 0], [2, 0], [2, 2], [3, 1], [4, 0], [4, 2]])
self.assertAllEqual(concat_out.values, np.array([1, 2, 3, 4, 1, 1, 2]))
self.assertAllEqual(concat_out.dense_shape, np.array([5, 3]))
def testConcat3(self):
with self.session(use_gpu=False) as sess:
# concat(A, B, C):
# [ 1 ]
# [2 1 1 ]
# [3 4 2 1 0 2 ]
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x5()
sp_c = self._SparseTensor_3x2()
for concat_dim in (-1, 1):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b, sp_c])
self.assertEqual(sp_concat.indices.get_shape(), [10, 2])
self.assertEqual(sp_concat.values.get_shape(), [10])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(concat_out.indices, [[0, 2], [1, 0], [1, 4], [1, 8],
[2, 0], [2, 2], [2, 3], [2, 6],
[2, 7], [2, 8]])
self.assertAllEqual(concat_out.values, [1, 2, 1, 1, 3, 4, 2, 1, 0, 2])
self.assertAllEqual(concat_out.dense_shape, [3, 10])
def testConcatNonNumeric(self):
with self.session(use_gpu=False) as sess:
# concat(A, B):
# [ a ]
# [b e ]
# [c d f g h]
sp_a = self._SparseTensor_String3x3()
sp_b = self._SparseTensor_String3x5()
for concat_dim in (-1, 1):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b])
self.assertEqual(sp_concat.indices.get_shape(), [8, 2])
self.assertEqual(sp_concat.values.get_shape(), [8])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(
concat_out.indices,
[[0, 2], [1, 0], [1, 4], [2, 0], [2, 2], [2, 3], [2, 6], [2, 7]])
self.assertAllEqual(concat_out.values,
[b"a", b"b", b"e", b"c", b"d", b"f", b"g", b"h"])
self.assertAllEqual(concat_out.dense_shape, [3, 8])
@test_util.run_deprecated_v1
def testMismatchedRank(self):
with self.session(use_gpu=False):
sp_a = self._SparseTensor_3x3()
sp_e = self._SparseTensor_2x3x4()
# Rank mismatches can be caught at shape-inference time
for concat_dim in (-1, 1):
with self.assertRaises(ValueError):
sparse_ops.sparse_concat(concat_dim, [sp_a, sp_e])
@test_util.run_deprecated_v1
def testMismatchedRankExpandNonconcatDim(self):
with self.session(use_gpu=False):
sp_a = self._SparseTensor_3x3()
sp_e = self._SparseTensor_2x3x4()
# Rank mismatches should be caught at shape-inference time, even for
# expand_nonconcat_dim=True.
for concat_dim in (-1, 1):
with self.assertRaises(ValueError):
sparse_ops.sparse_concat(
concat_dim, [sp_a, sp_e], expand_nonconcat_dim=True)
@test_util.run_deprecated_v1
def testMismatchedShapes(self):
with self.session(use_gpu=False) as sess:
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x5()
sp_c = self._SparseTensor_3x2()
sp_d = self._SparseTensor_2x3()
for concat_dim in (-1, 1):
sp_concat = sparse_ops.sparse_concat(concat_dim,
[sp_a, sp_b, sp_c, sp_d])
# Shape mismatches can only be caught when the op is run
with self.assertRaisesOpError("Input shapes must match"):
self.evaluate(sp_concat)
def testMismatchedShapesExpandNonconcatDim(self):
with self.session(use_gpu=False) as sess:
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x5()
sp_c = self._SparseTensor_3x2()
sp_d = self._SparseTensor_2x3()
for concat_dim0 in (-2, 0):
for concat_dim1 in (-1, 1):
sp_concat_dim0 = sparse_ops.sparse_concat(
concat_dim0, [sp_a, sp_b, sp_c, sp_d], expand_nonconcat_dim=True)
sp_concat_dim1 = sparse_ops.sparse_concat(
concat_dim1, [sp_a, sp_b, sp_c, sp_d], expand_nonconcat_dim=True)
sp_concat_dim0_out = self.evaluate(sp_concat_dim0)
sp_concat_dim1_out = self.evaluate(sp_concat_dim1)
self.assertAllEqual(sp_concat_dim0_out.indices,
[[0, 2], [1, 0], [2, 0], [2, 2], [4, 1], [5, 0],
[5, 3], [5, 4], [7, 0], [8, 0], [9, 1], [10, 0],
[10, 2]])
self.assertAllEqual(sp_concat_dim0_out.values,
[1, 2, 3, 4, 1, 2, 1, 0, 1, 2, 1, 1, 2])
self.assertAllEqual(sp_concat_dim0_out.dense_shape, [11, 5])
self.assertAllEqual(sp_concat_dim1_out.indices,
[[0, 2], [0, 11], [1, 0], [1, 4], [1, 8], [1, 10],
[1, 12], [2, 0], [2, 2], [2, 3], [2, 6], [2, 7],
[2, 8]])
self.assertAllEqual(sp_concat_dim1_out.values,
[1, 1, 2, 1, 1, 1, 2, 3, 4, 2, 1, 0, 2])
self.assertAllEqual(sp_concat_dim1_out.dense_shape, [3, 13])
@test_util.run_deprecated_v1
def testShapeInferenceUnknownShapes(self):
with self.session(use_gpu=False):
sp_inputs = [
self._SparseTensor_UnknownShape(),
self._SparseTensor_UnknownShape(val_shape=[3]),
self._SparseTensor_UnknownShape(ind_shape=[1, 3]),
self._SparseTensor_UnknownShape(shape_shape=[3])
]
for concat_dim in (-2, 0):
sp_concat = sparse_ops.sparse_concat(concat_dim, sp_inputs)
self.assertEqual(sp_concat.indices.get_shape().as_list(), [None, 3])
self.assertEqual(sp_concat.values.get_shape().as_list(), [None])
self.assertEqual(sp_concat.dense_shape.get_shape(), [3])
def testConcatShape(self):
# Test case for GitHub 21964.
x = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=[1, 2], dense_shape=[2, 2])
y = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=[1, 2], dense_shape=[2, 2])
z = sparse_ops.sparse_concat(-1, [x, y])
self.assertEqual(z.get_shape().as_list(), [2, 4])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_concat_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
def _AddTest(test_class, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test_class, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test_class, test_name, fn)
class QrOpTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self):
# The input to qr should be a tensor of at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 0"):
linalg_ops.qr(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 1"):
linalg_ops.qr(vector)
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
all_ops = []
for full_matrices_ in True, False:
for rows_ in 4, 5:
for cols_ in 4, 5:
matrix1 = random_ops.random_normal([rows_, cols_], seed=42)
matrix2 = random_ops.random_normal([rows_, cols_], seed=42)
q1, r1 = linalg_ops.qr(matrix1, full_matrices=full_matrices_)
q2, r2 = linalg_ops.qr(matrix2, full_matrices=full_matrices_)
all_ops += [q1, r1, q2, r2]
val = self.evaluate(all_ops)
for i in range(8):
q = 4 * i
self.assertAllClose(val[q], val[q + 2]) # q1 == q2
self.assertAllClose(val[q + 1], val[q + 3]) # r1 == r2
def _GetQrOpTest(dtype_, shape_, full_matrices_, use_static_shape_):
is_complex = dtype_ in (np.complex64, np.complex128)
is_single = dtype_ in (np.float32, np.complex64)
def CompareOrthogonal(self, x, y, rank):
if is_single:
atol = 5e-4
else:
atol = 5e-14
# We only compare the first 'rank' orthogonal vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Q is only unique up to sign (complex phase factor for complex matrices),
# so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertAllClose(x, y, atol=atol)
def CheckApproximation(self, a, q, r):
if is_single:
tol = 1e-5
else:
tol = 1e-14
# Tests that a ~= q*r.
a_recon = math_ops.matmul(q, r)
self.assertAllClose(a_recon, a, rtol=tol, atol=tol)
def CheckUnitary(self, x):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = math_ops.matmul(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
if is_single:
tol = 1e-5
else:
tol = 1e-14
self.assertAllClose(identity, xx, atol=tol)
@test_util.run_v1_only("b/120545219")
def Test(self):
np.random.seed(1)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if is_complex:
x_np += 1j * np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
with self.session(use_gpu=True) as sess:
if use_static_shape_:
x_tf = constant_op.constant(x_np)
else:
x_tf = array_ops.placeholder(dtype_)
q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices_)
if use_static_shape_:
q_tf_val, r_tf_val = self.evaluate([q_tf, r_tf])
else:
q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})
q_dims = q_tf_val.shape
np_q = np.ndarray(q_dims, dtype_)
np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
new_first_dim = np_q_reshape.shape[0]
x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
for i in range(new_first_dim):
if full_matrices_:
np_q_reshape[i, :, :], _ = np.linalg.qr(
x_reshape[i, :, :], mode="complete")
else:
np_q_reshape[i, :, :], _ = np.linalg.qr(
x_reshape[i, :, :], mode="reduced")
np_q = np.reshape(np_q_reshape, q_dims)
CompareOrthogonal(self, np_q, q_tf_val, min(shape_[-2:]))
CheckApproximation(self, x_np, q_tf_val, r_tf_val)
CheckUnitary(self, q_tf_val)
return Test
class QrGradOpTest(test.TestCase):
pass
def _GetQrGradOpTest(dtype_, shape_, full_matrices_):
@test_util.run_v1_only("b/120545219")
def Test(self):
np.random.seed(42)
a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
if dtype_ in [np.float32, np.complex64]:
tol = 3e-2
else:
tol = 1e-6
with self.session(use_gpu=True):
tf_a = constant_op.constant(a)
tf_b = linalg_ops.qr(tf_a, full_matrices=full_matrices_)
for b in tf_b:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class QRBenchmark(test.Benchmark):
shapes = [
(4, 4),
(8, 8),
(16, 16),
(101, 101),
(256, 256),
(1024, 1024),
(2048, 2048),
(1024, 2),
(1024, 32),
(1024, 128),
(1024, 512),
(1, 8, 8),
(10, 8, 8),
(100, 8, 8),
(1, 256, 256),
(10, 256, 256),
(100, 256, 256),
]
def benchmarkQROp(self):
for shape_ in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix_value = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(np.float32)
matrix = variables.Variable(matrix_value)
q, r = linalg_ops.qr(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(q, r),
min_iters=25,
name="QR_cpu_{shape}".format(shape=shape_))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix_value = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(np.float32)
matrix = variables.Variable(matrix_value)
q, r = linalg_ops.qr(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(q, r),
min_iters=25,
name="QR_gpu_{shape}".format(shape=shape_))
if __name__ == "__main__":
for dtype in np.float32, np.float64, np.complex64, np.complex128:
for rows in 1, 2, 5, 10, 32, 100:
for cols in 1, 2, 5, 10, 32, 100:
for full_matrices in False, True:
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
# TF2 does not support placeholders under eager so we skip it
for use_static_shape in set([True, tf2.enabled()]):
shape = batch_dims + (rows, cols)
name = "%s_%s_full_%s_static_%s" % (dtype.__name__,
"_".join(map(str, shape)),
full_matrices,
use_static_shape)
_AddTest(QrOpTest, "Qr", name,
_GetQrOpTest(dtype, shape, full_matrices,
use_static_shape))
# TODO(pfau): Get working with complex types.
# TODO(pfau): Get working with full_matrices when rows != cols
# TODO(pfau): Get working when rows < cols
# TODO(pfau): Get working with shapeholders (dynamic shapes)
for full_matrices in False, True:
for dtype in np.float32, np.float64:
for rows in 1, 2, 5, 10:
for cols in 1, 2, 5, 10:
if rows == cols or (not full_matrices and rows > cols):
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
shape = batch_dims + (rows, cols)
name = "%s_%s_full_%s" % (dtype.__name__,
"_".join(map(str, shape)),
full_matrices)
_AddTest(QrGradOpTest, "QrGrad", name,
_GetQrGradOpTest(dtype, shape, full_matrices))
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/qr_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the gradient of `tf.sparse.sparse_dense_matmul()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SparseTensorDenseMatMulGradientTest(test.TestCase):
def _sparsify(self, x, indices_dtype=np.int64):
x[x < 0.5] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(indices_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
def _randomTensor(self,
size,
values_dtype,
adjoint=False,
sparse=False,
indices_dtype=np.int64):
n, m = size
x = np.random.randn(n, m).astype(values_dtype)
if adjoint:
x = x.transpose()
if sparse:
return self._sparsify(x, indices_dtype=indices_dtype)
else:
return constant_op.constant(x, dtype=values_dtype)
def _testGradients(self, adjoint_a, adjoint_b, name, values_dtype,
indices_dtype):
n, k, m = np.random.randint(1, 10, size=3)
sp_t, nnz = self._randomTensor(
[n, k],
values_dtype,
adjoint=adjoint_a,
sparse=True,
indices_dtype=indices_dtype)
dense_t = self._randomTensor([k, m], values_dtype, adjoint=adjoint_b)
matmul = sparse_ops.sparse_tensor_dense_matmul(
sp_t, dense_t, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=name)
with self.cached_session(use_gpu=True):
dense_t_shape = [m, k] if adjoint_b else [k, m]
sp_t_val_shape = [nnz]
err = gradient_checker.compute_gradient_error(
[dense_t, sp_t.values], [dense_t_shape, sp_t_val_shape], matmul,
[n, m])
print("%s gradient err = %s" % (name, err))
self.assertLess(err, 1e-3)
def _testGradientsType(self, values_dtype, indices_dtype):
for adjoint_a in [True, False]:
for adjoint_b in [True, False]:
name = "sparse_tensor_dense_matmul_%s_%s_%s_%s" % (
adjoint_a, adjoint_b, values_dtype.__name__, indices_dtype.__name__)
self._testGradients(adjoint_a, adjoint_b, name, values_dtype,
indices_dtype)
@test_util.run_deprecated_v1
def testGradients(self):
np.random.seed(5) # Fix seed to avoid flakiness
self._testGradientsType(np.float32, np.int64)
self._testGradientsType(np.float64, np.int64)
self._testGradientsType(np.float32, np.int32)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_grad_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for make_template used with MirroredStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.framework import test_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class TemplateMirroredStrategyTest(test.TestCase):
@test_util.run_deprecated_v1
def test_merge_call(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
def fn():
var1 = variable_scope.get_variable(
"var1", shape=[], initializer=init_ops.constant_initializer(21.))
ds_context.get_replica_context().merge_call(lambda _: ())
var2 = variable_scope.get_variable(
"var2", shape=[], initializer=init_ops.constant_initializer(2.))
return var1 * var2
temp = template.make_template("my_template", fn)
strategy = mirrored_strategy.MirroredStrategy(["/cpu:0", "/gpu:0"])
out = strategy.experimental_local_results(
strategy.experimental_run_v2(temp))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([42., 42.], self.evaluate(out))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/template_mirrored_strategy_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""critical section tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import critical_section_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
# TODO(ebrevdo): Re-enable once CriticalSection is in core.
# from tensorflow.python.training import saver as saver_lib
@test_util.with_control_flow_v2
class CriticalSectionTest(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def testCreateCriticalSection(self):
cs = critical_section_ops.CriticalSection(shared_name="cs")
v = resource_variable_ops.ResourceVariable(0.0, name="v")
def fn(a, b):
c = v.value()
with ops.control_dependencies([c]):
nv = v.assign_add(a * b)
with ops.control_dependencies([nv]):
return array_ops.identity(c)
num_concurrent = 100
r = [cs.execute(lambda: fn(1.0, 2.0)) for _ in range(num_concurrent)]
self.evaluate(v.initializer)
r_value = self.evaluate(r)
self.assertAllClose([2.0 * i for i in range(num_concurrent)],
sorted(r_value))
@parameterized.named_parameters(
("Inner%sOuter%s" % (inner, outer), inner, outer)
for (inner, outer) in itertools.product(*([(False, True)] * 2)))
@test_util.disable_control_flow_v2("b/135070612")
@test_util.run_in_graph_and_eager_modes
@test_util.xla_allow_fallback("b/128495870")
def testCriticalSectionWithControlFlow(self, outer_cond, inner_cond):
cs = critical_section_ops.CriticalSection(shared_name="cs")
v = resource_variable_ops.ResourceVariable(0.0, name="v")
num_concurrent = 100
# pylint: disable=cell-var-from-loop
def fn(a, b):
c = v.read_value()
def true_fn():
with ops.control_dependencies([c]):
nv = v.assign_add(a * b)
with ops.control_dependencies([nv]):
return array_ops.identity(c)
return control_flow_ops.cond(
array_ops.identity(inner_cond), true_fn, lambda: c)
def execute():
return cs.execute(lambda: fn(1.0, 2.0))
r = [
control_flow_ops.cond(array_ops.identity(outer_cond),
execute,
v.read_value)
for _ in range(num_concurrent)
]
# pylint: enable=cell-var-from-loop
self.evaluate(v.initializer)
r_value = self.evaluate(r)
if inner_cond and outer_cond:
self.assertAllClose([2.0 * i for i in range(num_concurrent)],
sorted(r_value))
else:
self.assertAllClose([0] * num_concurrent, r_value)
@test_util.run_v1_only("b/123990562 Sees CancelledError on some calls")
def testCriticalSectionInParallelDoesntDeadlockOnError(self):
# No eager mode execution of this test because eager does not
# run fn() in parallel, which is where the deadlock could
# potentially occur (in graph mode).
cs = critical_section_ops.CriticalSection(shared_name="cs")
v = resource_variable_ops.ResourceVariable(0.0, name="v")
def fn(i):
error = control_flow_ops.Assert((i % 2) == 1, ["Error"])
with ops.control_dependencies([error]):
return v.read_value()
num_concurrent = 2
@def_function.function(autograph=False)
def run_concurrently():
return [cs.execute(lambda: fn(i)) for i in range(num_concurrent)]
if not context.executing_eagerly():
run_concurrently = run_concurrently()
self.evaluate(v.initializer)
for _ in range(100):
with self.assertRaisesOpError("Error"):
if context.executing_eagerly():
run_concurrently()
else:
self.evaluate(run_concurrently)
@test_util.run_in_graph_and_eager_modes
def testCreateCriticalSectionFnReturnsOp(self):
cs = critical_section_ops.CriticalSection(shared_name="cs")
v = resource_variable_ops.ResourceVariable(0.0, name="v")
def fn_return_op(a, b):
c = v.read_value()
with ops.control_dependencies([c]):
nv = v.assign_add(a * b)
with ops.control_dependencies([nv]):
return control_flow_ops.no_op()
num_concurrent = 100
r = [cs.execute(lambda: fn_return_op(1.0, 2.0))
for _ in range(num_concurrent)]
self.evaluate(v.initializer)
self.evaluate(r)
final_v = self.evaluate(v)
self.assertAllClose(2.0 * num_concurrent, final_v)
@test_util.run_v1_only("Collections don't exist in TF2")
def testCollection(self):
cs = critical_section_ops.CriticalSection(shared_name="cs")
self.assertIn(
cs, ops.get_collection(critical_section_ops.CRITICAL_SECTIONS))
add = lambda x: x + 1
execute = cs.execute(lambda: add(1.0), name="my_execute")
execute_op = [
x for x in execute.graph.get_operations()
if "my_execute" in x.name and "MutexLock" in x.type
][0]
self.assertIn(
execute_op,
[signature.op for signature in
ops.get_collection(critical_section_ops.CRITICAL_SECTION_EXECUTIONS)])
@test_util.run_v1_only("b/123955885 Can't identify deadlocks in eager mode")
def testRecursiveCriticalSectionAccessIsIllegal(self):
# This does not work properly in eager mode. Eager users will
# just hit a deadlock if they do this. But at least it'll be easier
# to debug.
cs = critical_section_ops.CriticalSection()
add = lambda y: y + 1
def fn(x):
return cs.execute(lambda: add(x))
with self.assertRaisesRegexp(
ValueError,
r"attempts to directly access the CriticalSection in which it "
r"would be running"):
cs.execute(lambda: fn(1.0))
def testRecursiveCriticalSectionAccessViaCapturedTensorIsProtected(self):
# This one is subtle; and we're being overly cautious here. The
# deadlock we are ensuring we catch is:
#
# to_capture = CS[lambda x: x + 1](1.0)
# deadlocked = CS[lambda x: x + to_capture](1.0)
#
# This would have caused a deadlock because executing `deadlocked` will
# lock the mutex on CS; but then due to dependencies, will attempt
# to compute `to_capture`. This computation requires locking CS,
# but that is not possible now because CS is already locked by
# `deadlocked`.
#
# We check that CriticalSection.execute properly inserts new
# control dependencies to its lock to ensure all captured
# operations are finished before anything runs within the critical section.
cs = critical_section_ops.CriticalSection(shared_name="cs")
fn = array_ops.identity
to_capture = cs.execute(lambda: fn(1.0))
fn_captures = lambda x: x + to_capture
to_capture_too = array_ops.identity(to_capture)
ex_0 = cs.execute(lambda: fn_captures(1.0))
with ops.control_dependencies([to_capture]):
# This is OK because to_capture will execute before this next call
ex_1 = cs.execute(lambda: fn_captures(1.0))
dependency = array_ops.identity(to_capture)
fn_captures_dependency = lambda x: x + dependency
ex_2 = cs.execute(lambda: fn_captures_dependency(1.0))
with ops.control_dependencies([to_capture_too]):
ex_3 = cs.execute(lambda: fn_captures_dependency(1.0))
# Ensure there's no actual deadlock on to_execute.
self.assertEquals(2.0, self.evaluate(ex_0))
self.assertEquals(2.0, self.evaluate(ex_1))
self.assertEquals(2.0, self.evaluate(ex_2))
self.assertEquals(2.0, self.evaluate(ex_3))
def testRecursiveCriticalSectionAccessWithinLoopIsProtected(self):
cs = critical_section_ops.CriticalSection(shared_name="cs")
def body_implicit_capture(i, j):
# This would have caused a deadlock if not for logic in execute
# that inserts additional control dependencies onto the lock op:
# * Loop body argument j is captured by fn()
# * i is running in parallel to move forward the execution
# * j is not being checked by the predicate function
# * output of cs.execute() is returned as next j.
fn = lambda: j + 1
return (i + 1, cs.execute(fn))
(i_n, j_n) = control_flow_ops.while_loop(
lambda i, _: i < 1000,
body_implicit_capture,
[0, 0],
parallel_iterations=25)
# For consistency between eager and graph mode.
i_n = array_ops.identity(i_n)
logging.warn(
"\n==============\nRunning "
"'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
"body_implicit_capture'\n"
"==============\n")
self.assertEquals((1000, 1000), self.evaluate((i_n, j_n)))
logging.warn(
"\n==============\nSuccessfully finished running "
"'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
"body_implicit_capture'\n"
"==============\n")
def body_implicit_capture_protected(i, j):
# This version is ok because we manually add a control
# dependency on j, which is an argument to the while_loop body
# and captured by fn.
fn = lambda: j + 1
with ops.control_dependencies([j]):
return (i + 1, cs.execute(fn))
(i_n, j_n) = control_flow_ops.while_loop(
lambda i, _: i < 1000,
body_implicit_capture_protected,
[0, 0],
parallel_iterations=25)
# For consistency between eager and graph mode.
i_n = array_ops.identity(i_n)
logging.warn(
"\n==============\nRunning "
"'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
"body_implicit_capture_protected'\n"
"==============\n")
self.assertEquals((1000, 1000), self.evaluate((i_n, j_n)))
logging.warn(
"\n==============\nSuccessfully finished running "
"'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
"body_implicit_capture_protected'\n"
"==============\n")
def body_args_capture(i, j):
# This version is ok because j is an argument to fn and we can
# ensure there's a control dependency on j.
fn = lambda x: x + 1
return (i + 1, cs.execute(lambda: fn(j)))
(i_n, j_n) = control_flow_ops.while_loop(
lambda i, _: i < 1000,
body_args_capture,
[0, 0],
parallel_iterations=25)
# For consistency between eager and graph mode.
i_n = array_ops.identity(i_n)
logging.warn(
"\n==============\nRunning "
"'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
"body_args_capture'\n"
"==============\n")
self.assertEquals((1000, 1000), self.evaluate((i_n, j_n)))
logging.warn(
"\n==============\nSuccessfully finished running "
"'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
"body_args_capture'\n"
"==============\n")
@test_util.run_v1_only("b/123955885 Can't identify deadlocks in eager mode")
def testRecursiveCriticalSectionAccessIsIllegalSameSharedName(self):
# This does not work properly in eager mode. Eager users will
# just hit a deadlock if they do this. But at least it'll be easier
# to debug.
cs = critical_section_ops.CriticalSection(shared_name="cs")
cs_same = critical_section_ops.CriticalSection(shared_name="cs")
add = lambda x: x + 1
def fn(x):
return cs_same.execute(lambda: add(x))
with self.assertRaisesRegexp(
ValueError,
r"attempts to directly access the CriticalSection in which it "
r"would be running"):
cs.execute(lambda: fn(1.0))
@test_util.run_v1_only("b/123955885 Can't identify deadlocks in eager mode")
def testMultipleCSExecutionsRequestSameResource(self):
cs0 = critical_section_ops.CriticalSection()
cs1 = critical_section_ops.CriticalSection()
v = resource_variable_ops.ResourceVariable(0.0, name="v")
cs0.execute(lambda: v + 1)
# It's OK for the same CriticalSection to access this resource.
cs0.execute(lambda: v - 1)
# It's *not* OK for a different CriticalSection to access it by
# default.
with self.assertRaisesRegexp(
ValueError, "requested exclusive resource access"):
cs1.execute(lambda: v + 1)
# It's not even OK if the second call doesn't request exclusive access.
with self.assertRaisesRegexp(
ValueError, "requested exclusive resource access"):
cs1.execute(lambda: v + 1, exclusive_resource_access=False)
v2 = resource_variable_ops.ResourceVariable(0.0, name="v2")
cs0.execute(lambda: v2 + 1, exclusive_resource_access=False)
# It's OK if neither requests exclusive resource access.
cs1.execute(lambda: v2 + 1, exclusive_resource_access=False)
# It's not OK if the second request requires exlusive resource
# access.
with self.assertRaisesRegexp(
ValueError, "requested exclusive resource access"):
cs1.execute(lambda: v2 + 1)
def testControlDependencyFromOutsideWhileLoopMixedWithInsideLoop(self):
cs = critical_section_ops.CriticalSection()
v = resource_variable_ops.ResourceVariable(0, name="v")
# Make sure that the control dependencies on v do not cause issues
# in the lock_op's automatic control dependency adder.
#
# Note, here v must be a resource variable (or something similar),
# otherwise it gets hoisted into the while_loop by the time we add
# control dependencies to the lock_op.
def body(i):
add_j = lambda j: v + j + 1
return cs.execute(lambda: add_j(i))
out = control_flow_ops.while_loop(
lambda i: i < 10, body, [0])
self.evaluate(v.initializer)
self.assertEqual(10, self.evaluate(out))
@test_util.run_in_graph_and_eager_modes
def testInsideFunction(self):
if test_util.is_gpu_available():
self.skipTest(
"b/123899495: Colocation errors for critical sections in map on GPU")
cs = critical_section_ops.CriticalSection()
with ops.device("/gpu:0" if test_util.is_gpu_available() else "/cpu:0"):
v = resource_variable_ops.ResourceVariable(1)
def fn():
return v.read_value()
# map() creates a TensorFlow function.
ds = dataset_ops.Dataset.range(1)
if test_util.is_gpu_available():
ds = (ds.apply(prefetching_ops.copy_to_device("/gpu:0"))
.apply(prefetching_ops.map_on_gpu(lambda _: cs.execute(fn))))
else:
ds = ds.map(lambda _: cs.execute(fn))
def get_first():
if context.executing_eagerly():
return self.evaluate(ds.make_one_shot_iterator().get_next())
itr = ds.make_initializable_iterator()
self.evaluate([v.initializer, itr.initializer])
return self.evaluate(itr.get_next())
self.assertEqual(1, get_first())
# TODO(ebrevdo): Re-enable once CriticalSection is in core.
#
# def testCriticalSectionAndExecuteOpSaverRoundTrip(self):
# cs = critical_section_ops.CriticalSection()
# r = cs.execute(lambda x: x + 1, 1.0)
# graph = ops.get_default_graph()
# meta_graph = saver_lib.export_meta_graph(
# graph=graph, collection_list=graph.get_all_collection_keys())
# graph_copy = ops.Graph()
# with graph_copy.as_default():
# _ = saver_lib.import_meta_graph(meta_graph, import_scope="imported")
# restored_cs = ops.get_collection(critical_section_ops.CRITICAL_SECTIONS)
# restored_exec = ops.get_collection(
# critical_section_ops.CRITICAL_SECTION_EXECUTIONS)
# self.assertEqual(1, len(restored_cs))
# self.assertEqual(1, len(restored_exec))
# self.assertEqual(restored_cs[0].name, "imported/%s" % cs.name)
# self.assertEqual(restored_exec[0].op.name, "imported/%s" % r.op.name)
# def testToProto(self):
# cs = critical_section_ops.CriticalSection(shared_name="cs")
# proto = cs.to_proto()
# self.assertEqual(proto.critical_section_name, cs._handle.name)
# cs_copy = critical_section_ops.CriticalSection.from_proto(proto)
# self.assertEqual(cs_copy._handle, cs._handle)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/critical_section_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for unicode_transcode op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
# Note: for now only tests for algorithmic converters since no file-based
# converters can be loaded. TODO(gbillock): add ability to include at least
# the ucmcore converters from the conversion data sets.
class UnicodeTranscodeOpTest(test.TestCase, parameterized.TestCase):
def test_transcode_utf8_simple(self):
strings = [[b"a", b"abc"], [b"ABC", b"DEF"]]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, strings)
outputs = string_ops.unicode_transcode(
strings,
input_encoding="ISO-8859-1",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, strings)
outputs = string_ops.unicode_transcode(
strings,
input_encoding="US-ASCII",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, strings)
def test_transcode_utf16_to_utf8(self):
strings = [b"\x00a\x00b\x20\xAC", b"\xD8\x01\xDC\x37"] # U+10437
expected = [s.decode("UTF-16-BE").encode("UTF-8") for s in strings]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-16",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, expected)
def test_transcode_bad_utf8(self):
bad_string = b"\x00\xff"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=True)
values = self.evaluate(outputs)
self.assertAllEqual(values, b" ")
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, b"\x00 ")
def test_transcode_bad_utf8_with_some_good(self):
bad_string = b"abc\xffabcdefg"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, b"abc abcdefg")
def test_transcode_bad_utf8_with_defaults(self):
bad_string = b"\x00\xff"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string, input_encoding="UTF-8", output_encoding="UTF-8")
values = self.evaluate(outputs)
self.assertAllEqual(values, b"\x00\xef\xbf\xbd")
def test_transcode_bad_utf8_with_space_replacement(self):
bad_string = b"\x00\xff"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string, input_encoding="UTF-8", output_encoding="UTF-8",
replacement_char=ord(" "))
values = self.evaluate(outputs)
self.assertAllEqual(values, b"\x00 ")
@test_util.run_deprecated_v1
def test_transcode_bad_utf8_with_strict_errors(self):
bad_string = b"\x00\xff"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="strict")
with self.assertRaisesOpError(
"Invalid formatting on input string"):
self.evaluate(outputs)
@test_util.run_deprecated_v1
def test_transcode_bad_utf8_start_with_strict_errors(self):
bad_string = b"\xffabcd"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="strict")
with self.assertRaisesOpError(
"Invalid formatting on input string"):
self.evaluate(outputs)
def test_transcode_bad_utf8_with_elision_of_malformatting(self):
bad_string = b"\x00\xff"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="ignore")
values = self.evaluate(outputs)
self.assertAllEqual(values, b"\x00")
def test_transcode_bad_utf8_with_elision_including_control_chars(self):
bad_string = b"\x00\xff"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="ignore",
replace_control_characters=True)
values = self.evaluate(outputs)
self.assertAllEqual(values, b"")
def test_transcode_bad_utf8_termination_with_defaults(self):
bad_string = b"a\xf0"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string, input_encoding="UTF-8", output_encoding="UTF-8")
values = self.evaluate(outputs)
self.assertAllEqual(values, b"a\xef\xbf\xbd") # 0xFFFD
def test_transcode_utf8_with_replacement_char(self):
strings = [b"a\xef\xbf\xbd"]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings, input_encoding="UTF-8", output_encoding="UTF-8",
errors="strict")
values = self.evaluate(outputs)
self.assertAllEqual(values, [b"a\xef\xbf\xbd"])
outputs = string_ops.unicode_transcode(
strings, input_encoding="UTF-8", output_encoding="UTF-8",
errors="replace", replacement_char=ord("?"))
values = self.evaluate(outputs)
self.assertAllEqual(values, [b"a\xef\xbf\xbd"])
def test_transcode_utf8_to_utf16(self):
strings = [b"ab\xe2\x82\xac", b"\xf0\x90\x90\xb7"] # U+10437
expected = [s.decode("UTF-8").encode("UTF-16-BE") for s in strings]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-8",
output_encoding="UTF-16-BE",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
print("values=", values)
self.assertAllEqual(values, expected)
def test_transcode_utf32_to_utf8(self):
strings = [
b"\x00\x00\x00a\x00\x00\x00b\x00\x00\x20\xAC", b"\x00\x01\x04\x37"
] # U+10437
expected = [s.decode("UTF-32-BE").encode("UTF-8") for s in strings]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-32",
output_encoding="UTF-8",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, expected)
def test_transcode_utf8_to_utf32(self):
strings = [b"ab\xe2\x82\xac", b"\xf0\x90\x90\xb7"]
expected = [s.decode("UTF-8").encode("UTF-32-BE") for s in strings]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-8",
output_encoding="UTF-32-BE",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, expected)
# Documentation in ICU suggests that getNextUChar may produce a different
# error code if the input sequence contains particular non-coding sequences.
# This test checks that condition.
def test_transcode_ascii_with_shift_chars(self):
strings = [b"\x0e\x0e", b"\x0f\x0f"]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="US-ASCII",
output_encoding="UTF-8",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, strings)
def test_transcode_utf8_with_bom(self):
bom_string = b"\xef\xbb\xbfabcdefg"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bom_string, input_encoding="UTF-8", output_encoding="UTF-8")
values = self.evaluate(outputs)
self.assertAllEqual(values, b"\xef\xbb\xbfabcdefg") # BOM preserved
outputs = string_ops.unicode_transcode(
bom_string, input_encoding="UTF-8", output_encoding="UTF-16-BE")
values = self.evaluate(outputs)
utf16expected = bom_string.decode("UTF-8").encode("UTF-16-BE")
self.assertAllEqual(values, utf16expected)
def test_transcode_utf16_le_be_with_bom(self):
bom_string = b"\xfe\xff\x00\x61" # Big-endian BOM with 'a' encoded
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bom_string, input_encoding="UTF-16-BE", output_encoding="UTF-8")
values = self.evaluate(outputs)
# BOM is preserved in output
self.assertAllEqual(values, b"\xef\xbb\xbfa")
outputs = string_ops.unicode_transcode(
bom_string, input_encoding="UTF-16-LE", output_encoding="UTF-8")
values = self.evaluate(outputs)
# mangled BOM and value from (incorrect) LE encoding
self.assertAllEqual(values, b"\xef\xbf\xbe\xe6\x84\x80")
bom_string = b"\xff\xfe\x61\x00" # Little-endian BOM with 'a' encoded
outputs = string_ops.unicode_transcode(
bom_string, input_encoding="UTF-16-LE", output_encoding="UTF-8")
values = self.evaluate(outputs)
self.assertAllEqual(values, b"\xef\xbb\xbfa")
@parameterized.parameters(
# BOM is stripped if it is used to decide the byte order of the input.
(b"\xfe\xff\x00*", "UTF-16", b"*"),
(b"\xff\xfe*\x00", "UTF-16", b"*"),
# BOM is *not* stripped if it is not used to decide the byte order of
# the input.
(b"\xef\xbb\xbf*", "UTF-8", b"\xef\xbb\xbf*"),
(b"\xfe\xff\x00*", "UTF-16-BE", b"\xef\xbb\xbf*"),
(b"\xff\xfe*\x00", "UTF-16-LE", b"\xef\xbb\xbf*"),
# If the encoding is UTF-16, and no BOM is present, then UTF-16-BE
# is assumed.
(b"\x00*", "UTF-16", b"*"),
# BOM is never stripped from any position other than the beginning of
# the string, for any encoding.
(b"<\xef\xbb\xbf>", "UTF-8", b"<\xef\xbb\xbf>"),
(b"\x00<\xfe\xff\x00>", "UTF-16", b"<\xef\xbb\xbf>"),
(b"\x00<\xfe\xff\x00>", "UTF-16-BE", b"<\xef\xbb\xbf>"),
(b"<\x00\xff\xfe>\x00", "UTF-16-LE", b"<\xef\xbb\xbf>"),
(b"\xfe\xff\x00<\xfe\xff\x00>", "UTF-16", b"<\xef\xbb\xbf>"),
(b"\xff\xfe<\x00\xff\xfe>\x00", "UTF-16", b"<\xef\xbb\xbf>"),
)
@test_util.run_deprecated_v1
def test_bom_handling(self, string, input_encoding, expected):
with self.test_session():
output = string_ops.unicode_transcode(
string, input_encoding=input_encoding, output_encoding="UTF-8")
self.assertAllEqual(output.eval(), expected)
@test_util.run_deprecated_v1
def test_invalid_encoding_causes_errors(self):
strings = [[b"a", b"abc"], [b"ABC", b"DEF"]]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="invalid",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
with self.assertRaisesOpError(
"Could not create converter for input encoding: invalid"):
self.evaluate(outputs)
with self.assertRaisesRegexp(ValueError, "Op passed string 'invalid'"):
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-8",
output_encoding="invalid",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
self.evaluate(outputs)
@test_util.run_deprecated_v1
def test_invalid_error_policy_causes_errors(self):
strings = [[b"a", b"abc"], [b"ABC", b"DEF"]]
with self.assertRaisesRegexp(
ValueError, "'invalid' not in: \"strict\", \"replace\", \"ignore\"."):
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="invalid",
replacement_char=ord(" "),
replace_control_characters=False)
self.evaluate(outputs)
def test_forwarding(self):
with self.cached_session():
# Generate an input that is uniquely consumed by the transcode op.
# This exercises code paths which are optimized for this case
# (e.g., using forwarding).
inp = string_ops.substr(
constant_op.constant([b"AbCdEfG", b"HiJkLmN"], dtypes.string),
pos=0,
len=5)
transcoded = string_ops.unicode_transcode(
inp, input_encoding="UTF-8", output_encoding="UTF-8")
self.assertAllEqual([b"AbCdE", b"HiJkL"], transcoded)
@test_util.run_deprecated_v1
def test_cjk_encodings(self):
strings_ja = [
b"\x5c\x5c", # Yen sign
b"\x8f\x70", # kanji character "waza"
b"\x83\x4f"
] # katakana character "gu"
strings_zh_cn = [b"\xca\xf5"] # simplified "shu4"
strings_zh_tw = [b"\xb3\x4e"] # traditional "shu4"
strings_ko = [b"\xc7\xd1\xb9\xce"] # hangul "hanmin"
expected_ja = [s.decode("shift_jis").encode("UTF-8") for s in strings_ja]
expected_zh_cn = [
s.decode("gb18030").encode("UTF-8") for s in strings_zh_cn
]
expected_zh_tw = [s.decode("big5").encode("UTF-8") for s in strings_zh_tw]
expected_ko = [s.decode("euc_kr").encode("UTF-8") for s in strings_ko]
with self.cached_session() as sess:
outputs_ja = string_ops.unicode_transcode(
strings_ja,
input_encoding="shift_jis",
output_encoding="UTF-8",
replacement_char=ord(" "),
replace_control_characters=False)
outputs_zh_cn = string_ops.unicode_transcode(
strings_zh_cn,
input_encoding="gb18030",
output_encoding="UTF-8",
replacement_char=ord(" "),
replace_control_characters=False)
outputs_zh_tw = string_ops.unicode_transcode(
strings_zh_tw,
input_encoding="big5",
output_encoding="UTF-8",
replacement_char=ord(" "),
replace_control_characters=False)
outputs_ko = string_ops.unicode_transcode(
strings_ko,
input_encoding="euc_kr",
output_encoding="UTF-8",
replacement_char=ord(" "),
replace_control_characters=False)
result_ja, result_zh_cn, result_zh_tw, result_ko = sess.run(
[outputs_ja, outputs_zh_cn, outputs_zh_tw, outputs_ko])
self.assertAllEqual(result_ja, expected_ja)
self.assertAllEqual(result_zh_cn, expected_zh_cn)
self.assertAllEqual(result_zh_tw, expected_zh_tw)
self.assertAllEqual(result_ko, expected_ko)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/unicode_transcode_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.functional_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import def_function as eager_def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.util import compat
# pylint: disable=invalid-name
def simple_scoped_fn(a, x):
"""Simple function: (a, x) -> 2(x+a), but with "2" as a variable in scope."""
with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
two = variable_scope.get_variable(
"two", [],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(2))
return math_ops.multiply(math_ops.add(a, x), two)
@test_util.with_control_flow_v2
class FunctionalOpsTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testFoldl_Simple(self):
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems)
self.assertAllEqual(208, self.evaluate(r))
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems,
initializer=10)
self.assertAllEqual(880, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldl_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array([1, -1.0])
r = functional_ops.foldl(lambda a, x: a + x, elems, initializer)
r_value = self.evaluate(r)
self.assertAllEqual(22, r_value[0])
self.assertAllEqual(20, r_value[1])
@test_util.run_in_graph_and_eager_modes
def testFoldl_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
r = functional_ops.foldl(lambda a, x: a + x[0] + x[1], (elems, -elems),
initializer)
self.assertAllEqual(1, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldl_MultiInputDifferentDimsSingleOutput(self):
elems = np.array([[1.0, 1.0, 1.0], [2.0, 3.0, 4.0]])
other_elems = np.array([-1.0, 1.0])
initializer = np.array([0.0, 0.0, 0.0])
r = functional_ops.foldl(lambda a, x: a + x[0] * x[1],
(elems, other_elems), initializer)
self.assertAllEqual([1.0, 2.0, 3.0], self.evaluate(r))
@test_util.run_deprecated_v1
def testFoldl_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldl(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(208, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.foldl(simple_scoped_fn, elems, initializer=10)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(880, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldr_Simple(self):
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems)
self.assertAllEqual(450, self.evaluate(r))
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems,
initializer=10)
self.assertAllEqual(1282, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldr_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array([1, -1.0])
r = functional_ops.foldr(lambda a, x: a + x, elems, initializer)
r_value = self.evaluate(r)
self.assertAllEqual(22, r_value[0])
self.assertAllEqual(20, r_value[1])
@test_util.run_in_graph_and_eager_modes
def testFoldr_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
r = functional_ops.foldr(lambda a, x: a + x[0] + x[1], (elems, -elems),
initializer)
self.assertAllEqual(1, self.evaluate(r))
@test_util.run_deprecated_v1
def testFoldr_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldr(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(450, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.foldr(simple_scoped_fn, elems, initializer=10)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(1282, self.evaluate(r))
# pylint: disable=unnecessary-lambda
@test_util.run_deprecated_v1
def testFold_Grad(self):
with self.cached_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, self.evaluate(r))
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testScan_Simple(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(lambda a, x: math_ops.multiply(a, x), elems)
self.assertAllEqual([1., 2., 6., 24., 120., 720.], self.evaluate(r))
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
self.assertAllEqual([2., 4., 12., 48., 240., 1440.], self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testScan_Reverse(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(lambda a, x: math_ops.multiply(a, x), elems,
reverse=True)
self.assertAllEqual([720., 720., 360., 120., 30., 6.], self.evaluate(r))
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v,
reverse=True)
self.assertAllEqual([1440., 1440., 720., 240., 60., 12.],
self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testScan_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = (np.array(1.0), np.array(-1.0))
r = functional_ops.scan(lambda a, x: (a[0] * x, -a[1] * x), elems,
initializer)
r_value = self.evaluate(r)
self.assertAllEqual([1.0, 2.0, 6.0, 24.0, 120.0, 720.0], r_value[0])
self.assertAllEqual([1.0, -2.0, 6.0, -24.0, 120.0, -720.0], r_value[1])
@test_util.run_in_graph_and_eager_modes
def testScan_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
# Multiply a * 1 each time
r = functional_ops.scan(lambda a, x: a * (x[0] + x[1]),
(elems + 1, -elems), initializer)
self.assertAllEqual([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testScan_MultiInputSameTypeOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
r = functional_ops.scan(lambda a, x: (a[0] + x[0], a[1] + x[1]),
(elems, -elems))
r_value = self.evaluate(r)
self.assertAllEqual(np.cumsum(elems), r_value[0])
self.assertAllEqual(np.cumsum(-elems), r_value[1])
@test_util.run_in_graph_and_eager_modes
def testScan_MultiOutputMismatchedInitializer(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
# Multiply a * 1 each time
with self.assertRaisesRegexp(
ValueError, "two structures don't have the same nested structure"):
functional_ops.scan(lambda a, x: (a, -a), elems, initializer)
@test_util.run_deprecated_v1
def testScan_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.scan(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
results = np.array([1, 6, 18, 44, 98, 208])
self.assertAllEqual(results, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.scan(simple_scoped_fn, elems, initializer=2)
self.assertEqual(len(variables.trainable_variables()), 1)
results = np.array([6, 16, 38, 84, 178, 368])
self.assertAllEqual(results, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testScanFoldl_Nested(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0], name="data")
inner_elems = constant_op.constant([0.5, 0.5], name="data")
def r_inner(a, x):
return functional_ops.foldl(
lambda b, y: b * y * x, inner_elems, initializer=a)
r = functional_ops.scan(r_inner, elems)
# t == 0 (returns 1)
# t == 1, a == 1, x == 2 (returns 1)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1
# t_1 == 1, b == 1, y == 0.5, returns b * y * x = 1
# t == 2, a == 1, x == 3 (returns 1.5*1.5 == 2.25)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1.5
# t_1 == 1, b == 1.5, y == 0.5, returns b * y * x = 1.5*1.5
# t == 3, a == 2.25, x == 4 (returns 9)
# t_0 == 0, b == a == 2.25, y == 0.5, returns b * y * x = 4.5
# t_1 == 1, b == 4.5, y == 0.5, returns b * y * x = 9
self.assertAllClose([1., 1., 2.25, 9.], self.evaluate(r))
@test_util.run_deprecated_v1
def testScan_Control(self):
with self.cached_session() as sess:
s = array_ops.placeholder(dtypes.float32, shape=[None])
b = array_ops.placeholder(dtypes.bool)
with ops.control_dependencies([b]):
c = functional_ops.scan(lambda a, x: x * a, s)
self.assertAllClose(
np.array([1.0, 3.0, 9.0]), sess.run(c, {s: [1, 3, 3],
b: True}))
@test_util.run_deprecated_v1
def testScan_Grad(self):
with self.cached_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
# pylint: enable=unnecessary-lambda
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(873.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testScanGradientWithPartStopGradient(self):
a = variables.Variable(0.0, name="a")
b = variables.Variable(0.0, name="b")
elems = array_ops.zeros(5)
l0, l1 = functional_ops.scan(
lambda elem_, input_: (a, b), elems, initializer=(0., 0.))
loss = l0 + array_ops.stop_gradient(l1)
grad = gradients_impl.gradients(ys=[loss], xs=[a, b])
with self.test_session(use_gpu=True) as sess:
self.evaluate(variables.global_variables_initializer())
self.evaluate(grad)
@test_util.run_in_graph_and_eager_modes
def testFoldShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
def fn(_, current_input):
return current_input
initializer = constant_op.constant([0, 0, 0])
y = functional_ops.foldl(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
@test_util.run_in_graph_and_eager_modes
def testScanShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
def fn(_, current_input):
return current_input
initializer = constant_op.constant([0, 0, 0])
y = functional_ops.scan(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
# TODO(akshayka): this test fails in eager: the iterable is of length 0 so
# so the body of the while loop never executes
@test_util.run_deprecated_v1
def testScanEmptyTensor(self):
with self.cached_session():
x = functional_ops.scan(
lambda x, _: x, math_ops.range(0), initializer=array_ops.ones([2, 4]))
self.assertAllEqual([0, 2, 4], x.get_shape())
self.assertAllEqual(x.get_shape(), self.evaluate(x).shape)
@test_util.run_deprecated_v1
def testScanUnknownShape(self):
x = array_ops.placeholder(dtypes.float32)
initializer = array_ops.placeholder(dtypes.float32)
def fn(_, current_input):
return current_input
y = functional_ops.scan(fn, x, initializer=initializer)
self.assertIs(None, y.get_shape().dims)
@test_util.run_deprecated_v1
def testScanVaryingShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 2])
x_t = array_ops.transpose(x)
# scan over dimension 0 (with shape None)
result = functional_ops.scan(lambda a, x: a + x, x)
# scanned over transposed dimension 0 (with shape 2)
result_t = functional_ops.scan(lambda a, x: a + x, x_t, infer_shape=False)
# ensure gradients can be calculated
result_grad = gradients_impl.gradients(result, [x])[0]
result_t_grad = gradients_impl.gradients(result_t, [x_t])[0]
# smoke test to ensure they all evaluate
sess.run([result, result_t, result_grad, result_t_grad],
feed_dict={x: [[1.0, 2.0]]})
@test_util.run_deprecated_v1
def testRemoteFunction(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
worker, _ = test_util.create_local_cluster(
1, 1, worker_config=worker_config)
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:ps/task:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/job:worker/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.int32],
f=_remote_fn,
target="/job:worker/replica:0/task:0/cpu:1")
with session.Session(worker[0].target) as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, [6])
@test_util.run_deprecated_v1
def testRemoteFunctionDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.int32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/cpu:1")
with self.test_session(config=worker_config) as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, [6])
@test_util.run_deprecated_v1
def testRemoteFunctionSameDeviceDirectSession(self):
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/cpu:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b], Tout=[dtypes.int32], f=_remote_fn, target="/cpu:0")
with self.cached_session() as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, [6])
@test_util.run_deprecated_v1
def testRemoteFunctionCPUGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/device:GPU:0")[0] + 3.0
with self.cached_session() as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, 9.0)
@test_util.run_deprecated_v1
def testRemoteFunctionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/cpu:0")[0] + 3.0
with self.cached_session() as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, 9.0)
@test_util.run_deprecated_v1
def testRemoteFunctionGPUCPUStrings(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.string)
def _remote_fn(inp):
return array_ops.identity(inp)
a = array_ops.constant("a")
with ops.device("/gpu:0"):
remote_op = functional_ops.remote_call(
args=[a], Tout=[dtypes.string], f=_remote_fn, target="/cpu:0")
with self.cached_session() as sess:
ret = self.evaluate(remote_op)
self.assertAllEqual(ret, [b"a"])
@test_util.run_deprecated_v1
def testRemoteFunctionCrossProcess(self):
workers, _ = test_util.create_local_cluster(2, 1)
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:ps/task:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:worker/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:worker/replica:0/task:1/cpu:0")[0] + 3.0
with session.Session(workers[0].target) as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, 9)
@test_util.run_deprecated_v1
def testIf(self):
@function.Defun(dtypes.float32)
def Twice(x):
return x * 2
@function.Defun(dtypes.float32)
def Thrice(x):
return x * 3 + 1
with self.test_session(use_gpu=False) as sess:
x = array_ops.placeholder(dtypes.float32)
ret = functional_ops.If(math_ops.greater(x, 0), [x], Twice, Thrice)[0]
self.assertAllEqual(sess.run(ret, feed_dict={x: 9.}), 18.)
self.assertAllEqual(sess.run(ret, feed_dict={x: -8.}), -23.)
self.assertAllEqual(sess.run(ret, feed_dict={x: 0.}), 1.)
def testWhile(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
def Run(sess, n):
return sess.run(functional_ops.While([n, 0.], Cond, Body))[1]
with self.session(graph=g, use_gpu=use_gpu) as sess:
self.assertAllEqual(Run(sess, 20.), 210.)
self.assertAllEqual(Run(sess, 100.), 5050.)
# Like above, but using int32 in order to ensure that int32 tensors don't get
# copied to the GPU during the application of the while.
def testWhileInt32(self):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.int32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.int32] * 2)
def Body(n, x):
return n - 1, x + n
def Run(sess, n):
return sess.run(functional_ops.While([n, 0], Cond, Body))[1]
with self.session(graph=g, use_gpu=True) as sess:
self.assertAllEqual(Run(sess, 20), 210)
self.assertAllEqual(Run(sess, 100), 5050)
@test_util.run_deprecated_v1
def testWhileLowering(self):
def Run(n, fetch_by_name):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
# outputs: [0, n*(n+1)/2]
outputs = functional_ops.While([n, 0.], Cond, Body, name="my_while")
# `outputs` is the list of output tensors of the While op. We
# arbitrarily choose the 0th tensor to get the While op and set the
# lowering attribute on it.
outputs[0].op._set_attr("_lower_using_switch_merge",
attr_value_pb2.AttrValue(b=True))
if not fetch_by_name:
fetch = outputs[1]
else:
fetch = "my_while:1"
with self.session(graph=g, use_gpu=use_gpu) as sess:
return self.evaluate(fetch)
self.assertAllEqual(Run(20., False), 210.)
self.assertAllEqual(Run(20., True), 210.)
self.assertAllEqual(Run(100., False), 5050.)
self.assertAllEqual(Run(100., True), 5050.)
@test_util.run_v1_only("b/120545219")
@test_util.disable_xla("b/123337890") # Different error message
def testWhileError(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def CondReturnsTooManyArgs(n, x):
return n > 0, x
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
@function.Defun(*[dtypes.float32] * 2)
def BodyReturnsTooManyArgs(n, x):
return n - 1, x + n, x
with self.session(graph=g, use_gpu=use_gpu):
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Expected a single scalar.*got 2 tensors."):
functional_ops.While([5., 0.], CondReturnsTooManyArgs,
Body)[0].eval()
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"While loop body returned 3 arguments. Expected: 2"):
functional_ops.While([5., 0.], Cond,
BodyReturnsTooManyArgs)[0].eval()
def testWhileInMultipleSubgraphs(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, x): # pylint: disable=unused-argument
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
with self.session(graph=g, use_gpu=use_gpu) as sess:
n = array_ops.placeholder(dtypes.float32)
_, result = functional_ops.While([n, 0.], Cond, Body)
c = constant_op.constant(37.)
self.assertAllEqual(210., sess.run(result, feed_dict={n: 20.}))
self.assertAllEqual(5050., sess.run(result, feed_dict={n: 100.}))
# Test that the result is the same when we run a different subgraph.
self.assertAllEqual(5050.,
sess.run([result, c], feed_dict={n: 100.})[0])
# pylint: disable=cell-var-from-loop
def testWhileCapturedInputs(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
v = variables.Variable(1.0)
def TestCond(n, *args):
del args
return n < 10
@function.Defun(*[dtypes.float32] * 2)
def TestUnary(n, x):
return math_ops.add(n, 1), x + n + v
@function.Defun(*[dtypes.float32] * 3)
def TestBinary(n, x, x2):
return math_ops.add(n, 1), x + n + v, x2 + v
with self.session(graph=g, use_gpu=use_gpu) as sess:
result_unary = functional_ops.While(
[1.0, 0.],
function.Defun(*[dtypes.float32] * 2)(TestCond), TestUnary)
result_binary = functional_ops.While(
[1.0, 0., 0.],
function.Defun(*[dtypes.float32] * 3)(TestCond), TestBinary)
self.evaluate(variables.global_variables_initializer())
assert len(result_unary) == 2
self.assertEqual([10.0, 54.0], self.evaluate(result_unary))
assert len(result_binary) == 3
self.assertEqual([10.0, 54.0, 9.0], self.evaluate(result_binary))
def TestCondCapture(n, *args):
del args
return math_ops.cast(n, dtypes.float32) + v < 10
with self.assertRaises(ValueError):
_ = functional_ops.While(
[1],
function.Defun(dtypes.int32)(TestCondCapture),
function.Defun(dtypes.int32, dtypes.float32)(TestUnary))
# pylint: enable=cell-var-from-loop
def _tfSum(self, use_gpu, rewrite_with_while):
with ops.Graph().as_default() as g:
with self.session(graph=g, use_gpu=use_gpu) as sess:
@function.Defun(dtypes.int32, dtypes.float32)
def Body(n, x):
return x + math_ops.cast(n, dtypes.float32)
xs = [
# 1 + 2 + ... + 20
functional_ops.For(
1, 21, 1, [0.], Body, rewrite_with_while=rewrite_with_while)[0],
# 100 + 99 + ... + 1
functional_ops.For(
100, 0, -1, [0.], Body, rewrite_with_while=rewrite_with_while)
[0],
]
xvals = self.evaluate(xs)
self.assertAllEqual(210, xvals[0])
self.assertAllEqual(5050, xvals[1])
def testFor(self):
for use_gpu in (True, False):
self._tfSum(use_gpu, False)
def testForWithWhile(self):
for use_gpu in (True, False):
self._tfSum(use_gpu, True)
def testForWithWhileNaming(self):
g = ops.Graph()
with g.as_default():
@function.Defun(dtypes.int32, dtypes.float32, func_name="TestBody")
def TestBody(n, x):
return x + math_ops.cast(n, dtypes.float32)
_ = functional_ops.For(
1, 21, 1, [0.], TestBody, rewrite_with_while=True)[0]
names = []
for func in g.as_graph_def().library.function:
names.append(func.signature.name)
self.assertTrue("TestBody" in names)
self.assertTrue("TestBody_Cond" in names)
self.assertTrue("TestBody_Body" in names)
@test_util.run_deprecated_v1
def testForCapturedInputs(self):
v = variables.Variable(1.0)
@function.Defun(dtypes.int32)
def TestNullary(n):
v + math_ops.cast(n, dtypes.float32) # pylint: disable=expression-not-assigned
@function.Defun(dtypes.int32, dtypes.float32)
def TestUnary(n, x):
return x + math_ops.cast(n, dtypes.float32) + v
@function.Defun(dtypes.int32, dtypes.float32, dtypes.float32)
def TestBinary(n, x, x2):
return x + math_ops.cast(n, dtypes.float32) + v, x2 + v
for rewrite_with_while in (True, False):
use_gpu = not rewrite_with_while
with self.test_session(use_gpu=use_gpu) as sess:
result_nullary = functional_ops.For(
1, 10, 1, [], TestNullary,
rewrite_with_while=rewrite_with_while)
result_unary = functional_ops.For(
1, 10, 1, [0.], TestUnary,
rewrite_with_while=rewrite_with_while)
result_binary = functional_ops.For(
1, 10, 1, [0., 0.], TestBinary,
rewrite_with_while=rewrite_with_while)
self.evaluate(variables.global_variables_initializer())
assert not result_nullary
# The nullary variant doesn't return anything so we can't easily run it.
# As a total hack, fetch the operation by name and run it.
sess.run(ops.get_default_graph().get_operation_by_name(
"While" if rewrite_with_while else "For"))
assert len(result_unary) == 1
self.assertEqual([54.0], self.evaluate(result_unary))
assert len(result_binary) == 2
self.assertEqual([54.0, 9.0], self.evaluate(result_binary))
def _tfMLP(self, xval, wsval, bsval, rewrite_with_while):
# On GPU, don't rewrite using a while loop.
use_gpu = not rewrite_with_while
with self.test_session(use_gpu=use_gpu):
@function.Defun(dtypes.int32, *[dtypes.float64] * 3)
def MLP(i, a, ws, bs):
a = math_ops.tanh(math_ops.matmul(a, ws[i, :]) + bs[i, :])
return a, ws, bs
ret = functional_ops.For(
0,
wsval.shape[0],
1, [xval, wsval, bsval],
MLP,
rewrite_with_while=rewrite_with_while)[0]
return self.evaluate(ret)
def _npMLP(self, xval, wsval, bsval):
for i in range(wsval.shape[0]):
xval = np.tanh(np.dot(xval, wsval[i, :]) + bsval[i, :])
return xval
def _testForMLP(self, rewrite_with_while):
# We construct a 5-layer Multi-Layer Perceptron network here.
# Each layer have the same number of hidden unites (3), and the
# activation function is tanh(). We feed the input (xval) with
# batch size 2.
xval = np.random.normal(size=(2, 3))
wsval = np.random.normal(size=(5, 3, 3))
bsval = np.random.normal(size=(5, 3))
np_ans = self._npMLP(xval, wsval, bsval)
tf_for_ans = self._tfMLP(xval, wsval, bsval, rewrite_with_while)
self.assertAllClose(np_ans, tf_for_ans)
@test_util.run_deprecated_v1
def testForMLP(self):
self._testForMLP(False)
@test_util.run_deprecated_v1
def testForMLPWhile(self):
self._testForMLP(True)
@test_util.run_v1_only("b/120545219")
def testForError(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(i, v):
return math_ops.cast(i, dtypes.float32) + v
@function.Defun(dtypes.int32, dtypes.float32)
def ReturnsTooManyArgs(unused_i, v):
return v, v
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"must be a scalar"):
functional_ops.For([0], 10, 1, [0.0], Foo)[0].eval()
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Invalid start/limit/delta"):
functional_ops.For(0, 10, -1, [0.0], Foo)[0].eval()
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"For loop body returned 2 arguments. Expected: 1"):
functional_ops.For(0, 10, 1, [0.0], ReturnsTooManyArgs)[0].eval()
@test_util.run_deprecated_v1
def testGradient(self):
@function.Defun(dtypes.float32)
def Poly(x):
# y = 2x^3+3x^2+4x+8
return 2 * x * x * x + 3 * x * x + 4 * x + 8
@function.Defun(dtypes.float32)
def Grad(x):
# dy/dx = dy/dy * dy/dx = 1.0 * (6x^2+6x+4)
return functional_ops.Gradient([x, 1.0], Poly)[0]
with self.test_session(use_gpu=False) as sess:
a = constant_op.constant(0.)
avals = [Poly(a), Grad(a)]
b = constant_op.constant(1.)
bvals = [Poly(b), Grad(b)]
self.assertAllEqual(self.evaluate(avals), [8., 4.])
self.assertAllEqual(self.evaluate(bvals), [17., 16.])
# TODO(akshayka): Replace `function.Defun` with tf.contrib.eager.defun` in the
# below test cases.
class PartitionedCallTest(test.TestCase):
@test_util.run_deprecated_v1
def testRemoteDeviceInPartitionedCallOp(self):
workers, _ = test_util.create_local_cluster(2, 0)
worker0_device = "/job:worker/replica:0/task:0/cpu:0"
worker1_device = "/job:worker/replica:0/task:1/cpu:0"
@eager_def_function.function
def f(a, b):
return a + b
with session.Session(workers[0].target) as sess:
with ops.device(worker0_device):
a = variable_scope.get_variable(
"a", initializer=constant_op.constant(1.), use_resource=True)
with ops.device(worker1_device):
b = variable_scope.get_variable(
"b", initializer=constant_op.constant(1.), use_resource=True)
sess.run(variables.global_variables_initializer())
config = config_pb2.ConfigProto()
config.experimental.share_cluster_devices_in_session = True
with session.Session(workers[0].target, config=config) as sess:
res = sess.run(f(a, b))
self.assertEqual(res, 2)
@test_util.run_deprecated_v1
def testBasicSingleDevice(self):
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
with ops.device("/cpu:0"):
a = x + x
b = y + y
return a + b
output, = self.evaluate(
functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body))
self.assertEqual(output, 6.)
@test_util.run_deprecated_v1
def testBasicMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
# if x = 1, y = 2, ...
with ops.device("/cpu:0"):
# a:= 1 + 1 = 2
a = x + x
with ops.device("/cpu:1"):
# b:= 2 + 2 = 4
b = a + y
with ops.device("/cpu:2"):
# c:= 2 + 4 = 6
c = a + b
# a + b + c = 2 + 4 + 6 = 12
return a + b + c
with self.test_session(config=config):
output, = functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body)
self.assertEqual(output.eval(), 12.)
@test_util.run_deprecated_v1
def testBasicMultiDeviceGPU(self):
if not test_util.is_gpu_available():
return
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
with ops.device("/gpu:0"):
a = x + x
b = y + y
with ops.device("/cpu:0"):
c = a + b
return c
output, = self.evaluate(
functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body))
self.assertEqual(output, 6.)
@test_util.run_deprecated_v1
def testBasicNoDeviceAnnotations(self):
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
a = x + x
b = y + y
return a + b
output, = self.evaluate(
functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body))
self.assertEqual(output, 6.)
@test_util.run_deprecated_v1
def testShardsRunOnRequestedDevices(self):
config = config_pb2.ConfigProto(device_count={"CPU": 4})
@function.Defun()
def Body():
# Serialize DT_RESOURCE handles as DT_STRINGs, which encode the device on
# which the resource was created, so that we can verify that ops were
# actually run on the requested devices.
#
# TODO(akshayka): Provide a cleaner, more idiomatic API for obtaining the
# name of the device on which a resource lives / for determining the
# device on which an op ran.
with ops.device("/cpu:0"):
s1 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.device("/cpu:1"):
s2 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.device("/cpu:2"):
s3 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
return s1, s2, s3
with self.test_session(config=config, use_gpu=True) as sess:
outputs = sess.run(functional_ops.partitioned_call(args=[], f=Body))
self.assertIn(compat.as_bytes("CPU:0"), outputs[0])
self.assertIn(compat.as_bytes("CPU:1"), outputs[1])
self.assertIn(compat.as_bytes("CPU:2"), outputs[2])
@test_util.run_deprecated_v1
def testAssignAddResourceVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.Defun()
def AssignAdd():
v.assign_add(1.0)
op = functional_ops.partitioned_call(
args=AssignAdd.captured_inputs, f=AssignAdd)
_ = self.evaluate(variables.global_variables_initializer())
_ = self.evaluate(op)
value = self.evaluate(v.read_value())
self.assertEqual(value, 2.0)
@test_util.run_deprecated_v1
def testFunctionWithResourcesOnDifferentDevices(self):
if not test_util.is_gpu_available():
self.skipTest("No GPUs available.")
with ops.device("/cpu:0"):
v_cpu_zero = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name="v_cpu_zero")
with ops.device("/cpu:1"):
v_cpu_one = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name="v_cpu_one")
with ops.device("/gpu:0"):
v_gpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name="v_gpu")
def sum_gather():
cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_zero, [1, 2]))
also_cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_one, [1, 2]))
gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
return cpu_result, also_cpu_result, gpu_result
defined = function.Defun()(sum_gather)
with self.test_session(
config=config_pb2.ConfigProto(
allow_soft_placement=False,
log_device_placement=True,
device_count={"CPU": 2})) as sess:
self.evaluate(variables.global_variables_initializer())
expected = self.evaluate(sum_gather())
result = sess.run(
functional_ops.partitioned_call(
args=defined.captured_inputs, f=defined))
self.assertAllEqual(expected, result)
# Use an invalid executor name to test the plumbing of the executor_type attr.
@test_util.run_v1_only("b/120545219")
def testExecutorTypeAttrExecutorNotFound(self):
@function.Defun(dtypes.int32)
def AddFive(x):
return x + 5
op = functional_ops.partitioned_call(
args=[constant_op.constant([1, 2, 3], dtype=dtypes.int32)],
f=AddFive,
executor_type="NON_EXISTENT_EXECUTOR")
with self.assertRaisesRegexp(errors.NotFoundError,
"NON_EXISTENT_EXECUTOR"):
self.evaluate(op)
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class FunctionalOpsCaseTest(test.TestCase):
def testCase(self):
@eager_function.defun
def two(x):
return x * 2
@eager_function.defun
def three(x):
return x * 3
@eager_function.defun
def four(x):
return x * 4
def f(branch, x):
tmpl = array_ops.zeros_like(x)
return array_ops.identity(gen_functional_ops.case(
branch, input=[x], Tout=[dtypes.float32],
branches=[f.get_concrete_function(tmpl)
for f in (two, three, four)])[0])
one = array_ops.ones([])
self.assertAllEqual(np.float32(2), self.evaluate(f(0, one)))
self.assertAllEqual(np.float32(3), self.evaluate(f(1, one)))
self.assertAllEqual(np.float32(4), self.evaluate(f(2, one)))
self.assertAllEqual(np.float32(4), self.evaluate(f(-1, one))) # <0 default
self.assertAllEqual(np.float32(4), self.evaluate(f(6, one))) # >=N default
@test_util.run_deprecated_v1
@test_util.disable_xla("Don't lower for XLA")
def testSkipEagerCaseLoweringPreservesNameForFetch(self):
for use_gpu in (True, False):
def Run(branch, x, fetch_by_name, use_gpu=use_gpu):
with ops.Graph().as_default() as g:
@function.Defun(dtypes.float32)
def two(x):
return -1, x * 2
@function.Defun(dtypes.float32)
def three(x):
return 0, x * 3
@function.Defun(dtypes.float32)
def four(x):
return 1, x * 4
outputs = gen_functional_ops.case(branch, input=[x],
Tout=[dtypes.int32, dtypes.float32],
branches=[two, three, four],
name="my_case")
# `outputs` is the list of output tensors of the Case op. We
# arbitrarily choose the 0th tensor to get the Case op and set the
# lowering attribute on it.
outputs[0].op._set_attr("_lower_using_switch_merge",
attr_value_pb2.AttrValue(b=True))
outputs = array_ops.identity_n(outputs)
with self.session(graph=g, use_gpu=use_gpu) as sess:
return sess.run("my_case:1" if fetch_by_name else outputs[1])
self.assertAllEqual(2 * 1., Run(0, 1., False))
self.assertAllEqual(2 * 1., Run(0, 1., True))
self.assertAllEqual(3 * 7., Run(1, 7., False))
self.assertAllEqual(3 * 7., Run(1, 7., True))
self.assertAllEqual(4 * -3., Run(2, -3., False))
self.assertAllEqual(4 * -3., Run(2, -3., True))
self.assertAllEqual(4 * -4., Run(7, -4., False)) # >= N default
self.assertAllEqual(4 * -4., Run(7, -4., True)) # >= N default
self.assertAllEqual(4 * -5., Run(-1, -5., False)) # <0 default
self.assertAllEqual(4 * -5., Run(-1, -5., True)) # <0 default
@test_util.disable_xla("Don't lower for XLA")
def testCaseLowering(self):
for use_gpu in (True, False):
@eager_function.defun
def Run(branch, x):
@function.Defun(dtypes.float32)
def two(x):
return -1, x * 2
@function.Defun(dtypes.float32)
def three(x):
return 0, x * 3
@function.Defun(dtypes.float32)
def four(x):
return 1, x * 4
outputs = gen_functional_ops.case(branch, input=[x],
Tout=[dtypes.int32, dtypes.float32],
branches=[two, three, four])
# `outputs` is the list of output tensors of the Case op. We
# arbitrarily choose the 0th tensor to get the Case op and set the
# lowering attribute on it.
outputs[0].op._set_attr("_lower_using_switch_merge",
attr_value_pb2.AttrValue(b=True))
outputs = array_ops.identity_n(outputs)
return outputs[1]
with ops.device(test.gpu_device_name() if use_gpu else "CPU:0"):
self.assertAllEqual(2 * 1., self.evaluate(Run(0, 1.)))
self.assertAllEqual(3 * 7., self.evaluate(Run(1, 7.)))
self.assertAllEqual(4 * -3., self.evaluate(Run(2, -3.)))
self.assertAllEqual(4 * -4., self.evaluate(Run(7, -4.))) # >=N default
self.assertAllEqual(4 * -5., self.evaluate(Run(-1, -5.))) # <0 default
if __name__ == "__main__":
test.main()
# pylint: enable=invalid-name
|
tensorflow-master
|
tensorflow/python/kernel_tests/functional_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class SparseToIndicatorTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self, dtype):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3x4(self, dtype):
# Includes two entries with the form [1, 1, x] : 150.
ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 0],
[1, 1, 1], [1, 1, 2], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 150, 149, 150, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def testInt32(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_5x6(dtypes.int32)
output = sparse_ops.sparse_to_indicator(sp_input, 50)
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = ((0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33))
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testInt64(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_5x6(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 50)
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testHigherRank(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_2x3x4(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 200)
expected_output = np.zeros((2, 3, 200), dtype=np.bool)
expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12), (1, 0, 103),
(1, 1, 149), (1, 1, 150), (1, 2, 122)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
class SparseMergeTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices = np.array([0, 13, 10, 33, 32, 14])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return indices, values
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
indices, values = self._SparseTensorValue_3x50(indices_dtype, values_dtype)
return (sparse_tensor.SparseTensor.from_value(indices),
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 10], [1, 13], [1, 14], [2, 32], [2, 33]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def _AssertResultsNotSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 13], [1, 10], [2, 33], [2, 32], [1, 14]])
self.assertAllEqual(output.values, [-3, 4, 1, 9, 5, 1])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def testInt32AndFloat32(self):
vocab_size = 50
indices_v, values_v = self._SparseTensorValue_3x50(np.int32, np.float32)
with test_util.force_cpu():
for indices in (indices_v,
sparse_tensor.SparseTensor.from_value(indices_v)):
for values in (values_v,
sparse_tensor.SparseTensor.from_value(values_v)):
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat32(self):
vocab_size = 50
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = 50
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt32AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int32, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = self.evaluate(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = self.evaluate(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat64NonCanonicalOrder(self):
vocab_size = 50
vocab_size_tensor = constant_op.constant(vocab_size, dtypes.int64)
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size_tensor, already_sorted=True)
output = self.evaluate(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testShouldSetLastDimensionInDynamicShape(self):
with ops.Graph().as_default():
shape = constant_op.constant([2, 2], dtype=dtypes.int64)
dynamic_shape = array_ops.placeholder_with_default(shape, shape=[2])
ids = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]],
values=[1, 3],
dense_shape=dynamic_shape)
values = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]],
values=[0.4, 0.7],
dense_shape=dynamic_shape)
merged = sparse_ops.sparse_merge(
sp_ids=ids, sp_values=values, vocab_size=5)
self.assertEqual(5, merged.get_shape()[1])
class SparseMergeHighDimTest(test_util.TensorFlowTestCase):
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices0 = np.array([0, 13, 10, 33, 32, 14])
indices1 = np.array([12, 4, 0, 0, 1, 30])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices0 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices0, indices_dtype), np.array(shape, np.int64))
indices1 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices1, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return ([sparse_tensor.SparseTensor.from_value(indices0),
sparse_tensor.SparseTensor.from_value(indices1)],
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(
output.indices,
[[0, 0, 12], [1, 10, 0], [1, 13, 4], [1, 14, 30], [2, 32, 1],
[2, 33, 0]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3] + vocab_size)
def testInt64AndFloat32(self):
vocab_size = [50, 31]
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = [50, 31]
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64Shape(self):
vocab_size = [50, 30]
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
class SparseRetainTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.int32), np.array(shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def testBasic(self):
with test_util.force_cpu():
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]])
self.assertAllEqual(output.values, [0, 14, 32])
self.assertAllEqual(output.dense_shape, [5, 6])
def testRetainNone(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_5x6()
to_retain = np.zeros((6,), dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, np.array([]).reshape((0, 2)))
self.assertAllEqual(output.values, [])
self.assertAllEqual(output.dense_shape, [5, 6])
def testMismatchedRetainShape(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 0], dtype=np.bool)
with self.assertRaises(ValueError):
sparse_ops.sparse_retain(sp_input, to_retain)
class SparseResetShapeTest(test_util.TensorFlowTestCase):
_IND_2_5_6 = np.array(
[[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4], [1, 3, 2], [1, 3, 3]],
dtype=np.int64)
_VAL_2_5_6 = np.array([0, 10, 13, 14, 32, 33], dtype=np.int32)
_SHP_2_5_6 = np.array([2, 5, 6], dtype=np.int64)
def _SparseTensor_2x5x6(self):
return sparse_tensor.SparseTensor(
constant_op.constant(self._IND_2_5_6, dtypes.int64),
constant_op.constant(self._VAL_2_5_6, dtypes.int32),
constant_op.constant(self._SHP_2_5_6, dtypes.int64))
def _SparseTensor_2x5x6_Empty(self):
return sparse_tensor.SparseTensor(
constant_op.constant(
np.empty(shape=[0, 3], dtype=np.int64), dtypes.int64),
constant_op.constant(np.empty(shape=[0], dtype=np.int32), dtypes.int32),
constant_op.constant(self._SHP_2_5_6, dtypes.int64))
def _SparseTensorValue_2x5x6(self):
return sparse_tensor.SparseTensorValue(self._IND_2_5_6, self._VAL_2_5_6,
self._SHP_2_5_6)
def testStaticShapeInfoPreservedWhenNewShapeIsProvidedAndStatic(self):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
self.assertAllEqual([3, 6, 7], sp_output.get_shape())
def testBasic(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testInputUnavailableInGraphConstructionOk(self):
with test_util.force_cpu():
sp_input = self._SparseTensorValue_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
@test_util.run_deprecated_v1
def testFeedInputUnavailableInGraphConstructionOk(self):
with self.session(use_gpu=False) as sess:
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output,
feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testTightBoundingBox(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_2x5x6()
sp_output = sparse_ops.sparse_reset_shape(sp_input)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [2, 4, 5])
def testTightBoundingBoxEmpty(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_2x5x6_Empty()
sp_output = sparse_ops.sparse_reset_shape(sp_input)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices.shape, [0, 3])
self.assertAllEqual(output.values.shape, [0])
self.assertAllEqual(output.dense_shape, [0, 0, 0])
def testInvalidRank(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7], dtype=np.int64)
with self.assertRaises(ValueError):
sparse_ops.sparse_reset_shape(sp_input, new_shape)
@test_util.run_deprecated_v1
def testInvalidRankNewShapeUnavailableInGraphConstruction(self):
with self.session(use_gpu=False) as sess:
new_shape = array_ops.placeholder(dtype=dtypes.int64)
sp_input = self._SparseTensor_2x5x6()
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x == y did not hold element-wise"):
sess.run(out, feed_dict={new_shape: np.array([3, 7], dtype=np.int64)})
def testInvalidDimensionSizeStatic(self):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7, 5], dtype=np.int64)
with self.assertRaisesRegexp(ValueError, "should have dimension sizes"):
sparse_ops.sparse_reset_shape(sp_input, new_shape)
@test_util.run_deprecated_v1
def testInvalidDimensionSizeDynamic(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
new_shape = array_ops.placeholder(dtype=dtypes.int32)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out, feed_dict={new_shape: [3, 7, 5]})
@test_util.run_deprecated_v1
def testInvalidDimensionSizeInputUnavailableInGraphConstruction(self):
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
with self.session(use_gpu=False) as sess:
new_shape = np.array([3, 7, 5], dtype=np.int64)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out, feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self, dtype=np.int32):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64), np.array(val, dtype), np.array(
shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def _SparseTensor_String5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
def testFillNumber(self):
with test_util.force_cpu():
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
@test_util.run_deprecated_v1
def testFillFloat(self):
with self.session(use_gpu=False):
values = constant_op.constant(
[0.0, 10.0, 13.0, 14.0, 32.0, 33.0], dtype=dtypes.float64)
default_value = constant_op.constant(-1.0, dtype=dtypes.float64)
sp_input = sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]),
values=values,
dense_shape=np.array([5, 6]))
sp_output, empty_row_indicator = (sparse_ops.sparse_fill_empty_rows(
sp_input, default_value))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4],
[2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllClose(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
values_grad_err = gradient_checker.compute_gradient_error(
values, values.shape.as_list(), sp_output.values, [8], delta=1e-8)
self.assertGreater(values_grad_err, 0)
self.assertLess(values_grad_err, 1e-8)
default_value_grad_err = gradient_checker.compute_gradient_error(
default_value,
default_value.shape.as_list(),
sp_output.values, [8],
delta=1e-8)
self.assertGreater(default_value_grad_err, 0)
self.assertLess(default_value_grad_err, 1e-8)
def testFillString(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_String5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, ""))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values,
[b"a", b"b", b"c", b"d", b"", b"e", b"f", b""])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testNoEmptyRows(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_2x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]])
self.assertAllEqual(output.values, [0, 10, 13, 14])
self.assertAllEqual(output.dense_shape, [2, 6])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool))
class SparseAddTest(test_util.TensorFlowTestCase):
def testValuesInVariable(self):
indices = constant_op.constant([[1]], dtype=dtypes.int64)
values = variables.Variable([1], trainable=False, dtype=dtypes.float32)
shape = constant_op.constant([1], dtype=dtypes.int64)
sp_input = sparse_tensor.SparseTensor(indices, values, shape)
sp_output = sparse_ops.sparse_add(sp_input, sp_input)
with test_util.force_cpu():
self.evaluate(variables.global_variables_initializer())
output = self.evaluate(sp_output)
self.assertAllEqual(output.values, [2])
class SparseReduceTest(test_util.TensorFlowTestCase):
# [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
ind = np.array([[0, 0], [0, 2], [1, 1]]).astype(np.int64)
vals = np.array([1, 1, 1]).astype(np.int32)
dense_shape = np.array([2, 3]).astype(np.int64)
def _compare(self, sp_t, reduction_axes, ndims, keep_dims, do_sum):
densified = self.evaluate(sparse_ops.sparse_tensor_to_dense(sp_t))
np_ans = densified
if reduction_axes is None:
if do_sum:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
np_ans = np.max(np_ans, keepdims=keep_dims)
else:
if not isinstance(reduction_axes, list): # Single scalar.
reduction_axes = [reduction_axes]
reduction_axes = np.array(reduction_axes).astype(np.int32)
# Handles negative axes.
reduction_axes = (reduction_axes + ndims) % ndims
# Loop below depends on sorted.
reduction_axes.sort()
for ra in reduction_axes.ravel()[::-1]:
if do_sum:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
else:
np_ans = np.max(np_ans, axis=ra, keepdims=keep_dims)
with self.cached_session():
if do_sum:
tf_dense_ans = sparse_ops.sparse_reduce_sum(sp_t, reduction_axes,
keep_dims)
else:
tf_dense_ans = sparse_ops.sparse_reduce_max(sp_t, reduction_axes,
keep_dims)
out_dense = self.evaluate(tf_dense_ans)
if do_sum:
tf_sparse_ans = sparse_ops.sparse_reduce_sum_sparse(sp_t,
reduction_axes,
keep_dims)
else:
tf_sparse_ans = sparse_ops.sparse_reduce_max_sparse(sp_t,
reduction_axes,
keep_dims)
# Convert to dense for comparison purposes.
out_sparse = sparse_ops.sparse_tensor_to_dense(tf_sparse_ans)
self.assertAllClose(np_ans, out_dense)
self.assertAllClose(np_ans, out_sparse)
def _compare_all(self, sp_t, reduction_axes, ndims):
self._compare(sp_t, reduction_axes, ndims, False, False)
self._compare(sp_t, reduction_axes, ndims, False, True)
self._compare(sp_t, reduction_axes, ndims, True, False)
self._compare(sp_t, reduction_axes, ndims, True, True)
# (TODO:b/133851381): Re-enable this test.
def disabledtestSimpleAndRandomInputs(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with test_util.force_cpu():
self._compare_all(sp_t, None, ndims=2)
self._compare_all(sp_t, 0, ndims=2)
self._compare_all(sp_t, [1], ndims=2)
self._compare_all(sp_t, [0, 1], ndims=2)
self._compare_all(sp_t, [1, 0], ndims=2)
self._compare_all(sp_t, [-1], ndims=2)
self._compare_all(sp_t, [1, -2], ndims=2)
np.random.seed(1618)
test_dims = [(1618, 1, 11, 7, 1), (1,), (1, 1, 1)]
with test_util.force_cpu():
for dims in test_dims:
sp_t, unused_nnz = _sparsify(np.random.randn(*dims))
# reduce all using None
self._compare_all(sp_t, None, ndims=len(dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
self._compare_all(sp_t, axes, ndims=len(dims))
def testInvalidAxes(self):
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with test_util.force_cpu():
with self.assertRaisesOpError("Invalid reduction dimension -3"):
self.evaluate(sparse_ops.sparse_reduce_sum(sp_t, -3))
with self.assertRaisesOpError("Invalid reduction dimension 2"):
self.evaluate(sparse_ops.sparse_reduce_sum(sp_t, 2))
with self.assertRaisesOpError("Invalid reduction dimension -3"):
self.evaluate(sparse_ops.sparse_reduce_max(sp_t, -3))
with self.assertRaisesOpError("Invalid reduction dimension 2"):
self.evaluate(sparse_ops.sparse_reduce_max(sp_t, 2))
@test_util.run_deprecated_v1
def testGradient(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
np.random.seed(8161)
test_dims = [(11, 1, 5, 7, 1), (2, 2)]
with self.session(use_gpu=False):
for dims in test_dims:
sp_t, nnz = _sparsify(np.random.randn(*dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
reduced = sparse_ops.sparse_reduce_sum(sp_t, axes)
err = gradient_checker.compute_gradient_error(
sp_t.values, (nnz,), reduced,
self.evaluate(reduced).shape)
self.assertLess(err, 1e-3)
# Tests for negative axes.
reduced = sparse_ops.sparse_reduce_sum(sp_t, -1)
err = gradient_checker.compute_gradient_error(
sp_t.values, (nnz,), reduced,
self.evaluate(reduced).shape)
self.assertLess(err, 1e-3)
def _testSparseReduceShape(self, sp_t, reduction_axes, ndims, keep_dims,
do_sum):
densified = self.evaluate(sparse_ops.sparse_tensor_to_dense(sp_t))
np_op = np.sum
tf_op = sparse_ops.sparse_reduce_sum
if not do_sum:
np_op = np.max
tf_op = sparse_ops.sparse_reduce_max
np_ans = densified
if reduction_axes is None:
np_ans = np_op(np_ans, keepdims=keep_dims)
else:
if not isinstance(reduction_axes, list): # Single scalar.
reduction_axes = [reduction_axes]
reduction_axes = np.array(reduction_axes).astype(np.int32)
# Handles negative axes.
reduction_axes = (reduction_axes + ndims) % ndims
# Loop below depends on sorted.
reduction_axes.sort()
for ra in reduction_axes.ravel()[::-1]:
np_ans = np_op(np_ans, axis=ra, keepdims=keep_dims)
tf_ans = tf_op(sp_t, reduction_axes, keep_dims)
self.assertAllEqual(np_ans.shape, tf_ans.get_shape().as_list())
# (TODO:b/133851381): Re-enable this test
def disabledtestSparseReduceSumOrMaxShape(self):
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with test_util.force_cpu():
for do_sum in [True, False]:
for keep_dims in [True, False]:
self._testSparseReduceShape(sp_t, None, 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, 0, 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [1], 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [0, 1], 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [1, 0], 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [-1], 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [1, -2], 2, keep_dims, do_sum)
class SparseMathOpsTest(test_util.TensorFlowTestCase):
def _check(self, result_tensor, result_np, input_sp_t):
self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
self.assertAllEqual(input_sp_t.indices, result_tensor.indices)
self.assertAllEqual(input_sp_t.dense_shape, result_tensor.dense_shape)
res_densified = sparse_ops.sparse_to_dense(
result_tensor.indices, result_tensor.dense_shape, result_tensor.values)
self.assertAllEqual(result_np, res_densified)
@test_util.run_deprecated_v1
def testCwiseShapeValidation(self):
# Test case for GitHub 24072.
with test_util.force_cpu():
a = array_ops.ones([3, 4, 1], dtype=dtypes.int32)
b = sparse_tensor.SparseTensor([[0, 0, 1, 0], [0, 0, 3, 0]], [10, 20],
[1, 1, 4, 2])
c = a * b
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"broadcasts dense to sparse only; got incompatible shapes"):
self.evaluate(c)
def testCwiseDivAndMul(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with test_util.force_cpu():
for dtype in [np.float32, np.float64, np.int32, np.int64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, unused_nnz = _sparsify(sp_vals_np, thresh=1.5)
sp_t_densified = sparse_ops.sparse_tensor_to_dense(sp_t)
dense_t = constant_op.constant(dense_vals_np)
self._check(sp_t / dense_t, sp_t_densified / dense_vals_np, sp_t)
# Check commutative.
self._check(sp_t * dense_t, sp_t_densified * dense_vals_np, sp_t)
self._check(dense_t * sp_t, sp_t_densified * dense_vals_np, sp_t)
if dtype in [np.int32, np.int64]:
res = sp_t / dense_t # should invoke "__truediv__"
self.assertEqual(res.values.dtype, np.float64)
def testCwiseAdd(self):
with test_util.force_cpu():
# Identity(2) + AllOnes(2,2). Should be equal to 2 * Identity(2).
indices = [[0, 0], [1, 1]]
vals = [1, 1]
shape = (2, 2)
sp_t = sparse_tensor.SparseTensor(indices, vals, shape)
dense_t = array_ops.ones(shape, dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
# Variant of above, but broadcasts the dense side.
dense_t = array_ops.ones([1], dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
@test_util.run_deprecated_v1
def testGradients(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.session(use_gpu=False):
for dtype in [np.float32, np.float64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, nnz = _sparsify(sp_vals_np, thresh=1.5)
dense_t = constant_op.constant(dense_vals_np)
cmul = sp_t * dense_t
err = gradient_checker.compute_gradient_error([sp_t.values, dense_t],
[(nnz,), dense_shape],
cmul.values, (nnz,))
self.assertLess(err, 1e-4)
cdiv = sp_t / dense_t
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
cdiv.values, (nnz,))
self.assertLess(err, 1e-4)
err = gradient_checker.compute_gradient_error(
dense_t,
dense_shape,
cdiv.values, (nnz,),
x_init_value=dense_vals_np)
self.assertLess(err, 2e-4)
class SparseSoftmaxTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testEquivalentToDensified(self):
np.random.seed(1618)
n, m = np.random.choice(20, size=2)
for dtype in [np.float32, np.float64]:
sp_vals_np = np.random.rand(n, m).astype(dtype)
batched_sp_t, unused_nnz1 = _sparsify(
sp_vals_np.reshape((1, n, m)), thresh=0.) # No masking.
with test_util.force_cpu():
densified = constant_op.constant(sp_vals_np)
sp_result = self.evaluate(
sparse_ops.sparse_softmax(batched_sp_t)).values.reshape((n, m))
dense_result = nn_ops.softmax(densified)
self.assertAllClose(dense_result, sp_result)
def testHigherRanks(self):
# For the first shape:
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
#
# The softmax results should be:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
#
# The second shape: same input data, but with a higher-rank shape.
shapes = [[2, 2, 2], [2, 1, 2, 2]]
for shape in shapes:
values = np.asarray(
[0., np.e, 1., 0., np.e, 0., np.e, np.e]).reshape(shape)
sp_t, unused_nnz = _sparsify(values, thresh=1e-2)
expected_values = [1., 1., 1., .5, .5]
with test_util.force_cpu():
result = sparse_ops.sparse_softmax(sp_t)
self.assertAllEqual(expected_values, result.values)
self.assertAllEqual(sp_t.indices, result.indices)
self.assertAllEqual(shape, result.dense_shape)
@test_util.run_deprecated_v1
def testGradient(self):
x_shape = [2, 5, 10]
with self.cached_session(use_gpu=False):
for dtype in [np.float32, np.float64]:
x_np = np.random.randn(*x_shape).astype(dtype)
x_tf, nnz = _sparsify(x_np)
y_tf = sparse_ops.sparse_softmax(x_tf)
err = gradient_checker.compute_gradient_error(x_tf.values, (nnz,),
y_tf.values, (nnz,))
self.assertLess(err, 1e-4)
class SparseMinimumMaximumTest(test_util.TensorFlowTestCase):
def _assertSparseTensorValueEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testBasic(self):
with test_util.force_cpu():
# 1-D, values at index 0.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_one)
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_one)
self._assertSparseTensorValueEqual(sp_one, max_tf)
self._assertSparseTensorValueEqual(sp_zero, min_tf)
# Values at different indices.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_zero_2 = sparse_tensor.SparseTensor([[1]], [0], [7])
expected = sparse_tensor.SparseTensor([[0], [1]], [0, 0], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_zero_2)
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_zero_2)
self._assertSparseTensorValueEqual(expected, max_tf)
self._assertSparseTensorValueEqual(expected, min_tf)
@test_util.run_deprecated_v1
def testRandom(self):
np.random.seed(1618)
shapes = [(13,), (6, 8), (1, 7, 1)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
a_np = np.random.randn(*shape).astype(dtype)
b_np = np.random.randn(*shape).astype(dtype)
sp_a, unused_a_nnz = _sparsify(a_np, thresh=-.5)
sp_b, unused_b_nnz = _sparsify(b_np, thresh=-.5)
with self.cached_session(use_gpu=False):
maximum_tf = sparse_ops.sparse_maximum(sp_a, sp_b)
maximum_tf_densified = sparse_ops.sparse_tensor_to_dense(
maximum_tf).eval()
minimum_tf = sparse_ops.sparse_minimum(sp_a, sp_b)
minimum_tf_densified = sparse_ops.sparse_tensor_to_dense(
minimum_tf).eval()
a_densified = sparse_ops.sparse_tensor_to_dense(sp_a).eval()
b_densified = sparse_ops.sparse_tensor_to_dense(sp_b).eval()
self.assertAllEqual(
np.maximum(a_densified, b_densified), maximum_tf_densified)
self.assertAllEqual(
np.minimum(a_densified, b_densified), minimum_tf_densified)
def testMismatchedShapes(self):
with test_util.force_cpu():
sp_zero = sparse_tensor.SparseTensor([[0, 0]], [0], [1, 1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands do not have the same ranks"):
self.evaluate(sparse_ops.sparse_maximum(sp_zero, sp_one))
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands' shapes do not match"):
self.evaluate(sparse_ops.sparse_maximum(sp_zero, sp_one))
class SparseTransposeTest(test.TestCase):
def testTranspose(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
with test_util.force_cpu():
np.random.seed(1618)
shapes = [np.random.randint(1, 10, size=rank) for rank in range(1, 6)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float32, np.float64]:
dn_input = np.random.randn(*shape).astype(dtype)
rank = self.evaluate(array_ops.rank(dn_input))
perm = np.random.choice(rank, rank, False)
sp_input, unused_a_nnz = _sparsify(dn_input)
sp_trans = sparse_ops.sparse_transpose(sp_input, perm=perm)
dn_trans = sparse_ops.sparse_tensor_to_dense(sp_trans)
expected_trans = array_ops.transpose(dn_input, perm=perm)
self.assertAllEqual(expected_trans.shape, sp_trans.get_shape())
self.assertAllEqual(dn_trans, expected_trans)
class SparsePlaceholderTest(test.TestCase):
@test_util.run_deprecated_v1
def testPlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=(10, 47))
self.assertAllEqual([10, 47], foo.get_shape())
self.assertAllEqual([None, 2], foo.indices.get_shape().as_list())
@test_util.run_deprecated_v1
def testPartialShapePlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=(None, 47))
self.assertAllEqual([None, None], foo.get_shape().as_list())
self.assertAllEqual([None, 2], foo.indices.get_shape().as_list())
@test_util.run_deprecated_v1
def testNoShapePlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=None)
self.assertAllEqual(None, foo.get_shape())
self.assertAllEqual([None, None], foo.indices.get_shape().as_list())
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for division with division imported from __future__.
This file should be exactly the same as division_past_test.py except
for the __future__ division line.
"""
from __future__ import absolute_import
# from __future__ import division # Intentionally skip this import
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class DivisionTestCase(test.TestCase):
def testDivision(self):
"""Test all the different ways to divide."""
values = [1, 2, 7, 11]
functions = (lambda x: x), constant_op.constant
dtypes = np.int8, np.int16, np.int32, np.int64, np.float32, np.float64
tensors = []
checks = []
def check(x, y):
x = ops.convert_to_tensor(x)
y = ops.convert_to_tensor(y)
tensors.append((x, y))
def f(x, y):
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x, y)
checks.append(f)
with self.cached_session() as sess:
for dtype in dtypes:
for x in map(dtype, values):
for y in map(dtype, values):
for fx in functions:
for fy in functions:
tf_x = fx(x)
tf_y = fy(y)
div = x / y
tf_div = tf_x / tf_y
check(div, tf_div)
floordiv = x // y
tf_floordiv = tf_x // tf_y
check(floordiv, tf_floordiv)
# Do only one sess.run for speed
for f, (x, y) in zip(checks, self.evaluate(tensors)):
f(x, y)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/division_past_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the actual serialized proto output of the V1 tf.summary ops.
The tensor, audio, and image ops have dedicated tests in adjacent files. The
overall tf.summary API surface also has its own tests in summary_test.py that
check calling the API methods but not the exact serialized proto output.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import logging_ops
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
class SummaryV1OpsTest(test.TestCase):
def _AsSummary(self, s):
summ = summary_pb2.Summary()
summ.ParseFromString(s)
return summ
def testScalarSummary(self):
with self.cached_session() as sess:
const = constant_op.constant([10.0, 20.0])
summ = logging_ops.scalar_summary(["c1", "c2"], const, name="mysumm")
value = self.evaluate(summ)
self.assertEqual([], summ.get_shape())
self.assertProtoEquals("""
value { tag: "c1" simple_value: 10.0 }
value { tag: "c2" simple_value: 20.0 }
""", self._AsSummary(value))
def testScalarSummaryDefaultName(self):
with self.cached_session() as sess:
const = constant_op.constant([10.0, 20.0])
summ = logging_ops.scalar_summary(["c1", "c2"], const)
value = self.evaluate(summ)
self.assertEqual([], summ.get_shape())
self.assertProtoEquals("""
value { tag: "c1" simple_value: 10.0 }
value { tag: "c2" simple_value: 20.0 }
""", self._AsSummary(value))
@test_util.run_deprecated_v1
def testMergeSummary(self):
with self.cached_session() as sess:
const = constant_op.constant(10.0)
summ1 = summary.histogram("h", const)
summ2 = logging_ops.scalar_summary("c", const)
merge = summary.merge([summ1, summ2])
value = self.evaluate(merge)
self.assertEqual([], merge.get_shape())
self.assertProtoEquals("""
value {
tag: "h"
histo {
min: 10.0
max: 10.0
num: 1.0
sum: 10.0
sum_squares: 100.0
bucket_limit: 9.93809490288
bucket_limit: 10.9319043932
bucket_limit: 1.7976931348623157e+308
bucket: 0.0
bucket: 1.0
bucket: 0.0
}
}
value { tag: "c" simple_value: 10.0 }
""", self._AsSummary(value))
def testMergeAllSummaries(self):
with ops.Graph().as_default():
const = constant_op.constant(10.0)
summ1 = summary.histogram("h", const)
summ2 = summary.scalar("o", const, collections=["foo_key"])
summ3 = summary.scalar("c", const)
merge = summary.merge_all()
self.assertEqual("MergeSummary", merge.op.type)
self.assertEqual(2, len(merge.op.inputs))
self.assertEqual(summ1, merge.op.inputs[0])
self.assertEqual(summ3, merge.op.inputs[1])
merge = summary.merge_all("foo_key")
self.assertEqual("MergeSummary", merge.op.type)
self.assertEqual(1, len(merge.op.inputs))
self.assertEqual(summ2, merge.op.inputs[0])
self.assertTrue(summary.merge_all("bar_key") is None)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/summary_v1_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_triangular_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test
class MatrixTriangularSolveOpTest(test.TestCase):
def _verifySolveAllWays(self, x, y, dtypes, batch_dims=None):
for lower in True, False:
for adjoint in True, False:
for use_placeholder in True, False:
self._verifySolve(
x,
y,
lower=lower,
adjoint=adjoint,
batch_dims=batch_dims,
use_placeholder=use_placeholder,
dtypes=dtypes)
def _verifySolveAllWaysReal(self, x, y, batch_dims=None):
self._verifySolveAllWays(x, y, (np.float32, np.float64), batch_dims)
def _verifySolveAllWaysComplex(self, x, y, batch_dims=None):
self._verifySolveAllWays(x, y, (np.complex64, np.complex128), batch_dims)
def _verifySolve(self,
x,
y,
lower=True,
adjoint=False,
batch_dims=None,
use_placeholder=False,
dtypes=(np.float32, np.float64)):
for np_type in dtypes:
a = x.astype(np_type)
b = y.astype(np_type)
# For numpy.solve we have to explicitly zero out the strictly
# upper or lower triangle.
if lower and a.size > 0:
a_np = np.tril(a)
elif a.size > 0:
a_np = np.triu(a)
else:
a_np = a
if adjoint:
a_np = np.conj(np.transpose(a_np))
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
with self.cached_session(use_gpu=True) as sess:
if use_placeholder:
a_tf = array_ops.placeholder(a.dtype)
b_tf = array_ops.placeholder(b.dtype)
tf_ans = linalg_ops.matrix_triangular_solve(
a_tf, b_tf, lower=lower, adjoint=adjoint)
tf_val = sess.run(tf_ans, feed_dict={a_tf: a, b_tf: b})
np_ans = np.linalg.solve(a_np, b)
else:
a_tf = constant_op.constant(a)
b_tf = constant_op.constant(b)
tf_ans = linalg_ops.matrix_triangular_solve(
a_tf, b_tf, lower=lower, adjoint=adjoint)
tf_val = self.evaluate(tf_ans)
np_ans = np.linalg.solve(a_np, b)
self.assertEqual(np_ans.shape, tf_ans.get_shape())
self.assertEqual(np_ans.shape, tf_val.shape)
self.assertAllClose(np_ans, tf_val)
@test_util.run_deprecated_v1
def testSolve(self):
# 1x1 matrix, single rhs.
matrix = np.array([[0.1]])
rhs0 = np.array([[1.]])
self._verifySolveAllWaysReal(matrix, rhs0)
# 2x2 matrices, single right-hand side.
matrix = np.array([[1., 2.], [3., 4.]])
rhs0 = np.array([[1.], [1.]])
self._verifySolveAllWaysReal(matrix, rhs0)
# 2x2 matrices, 3 right-hand sides.
rhs1 = np.array([[1., 0., 1.], [0., 1., 1.]])
self._verifySolveAllWaysReal(matrix, rhs1)
@test_util.run_deprecated_v1
def testSolveComplex(self):
# 1x1 matrix, single rhs.
matrix = np.array([[0.1 + 1j * 0.1]])
rhs0 = np.array([[1. + 1j]])
self._verifySolveAllWaysComplex(matrix, rhs0)
# 2x2 matrices, single right-hand side.
matrix = np.array([[1., 2.], [3., 4.]]).astype(np.complex64)
matrix += 1j * matrix
rhs0 = np.array([[1.], [1.]]).astype(np.complex64)
rhs0 += 1j * rhs0
self._verifySolveAllWaysComplex(matrix, rhs0)
# 2x2 matrices, 3 right-hand sides.
rhs1 = np.array([[1., 0., 1.], [0., 1., 1.]]).astype(np.complex64)
rhs1 += 1j * rhs1
self._verifySolveAllWaysComplex(matrix, rhs1)
@test_util.run_deprecated_v1
def testSolveBatch(self):
matrix = np.array([[1., 2.], [3., 4.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
# Batch of 2x3x2x2 matrices, 2x3x2x3 right-hand sides.
self._verifySolveAllWaysReal(matrix, rhs, batch_dims=[2, 3])
# Batch of 3x2x2x2 matrices, 3x2x2x3 right-hand sides.
self._verifySolveAllWaysReal(matrix, rhs, batch_dims=[3, 2])
@test_util.run_deprecated_v1
def testSolveBatchComplex(self):
matrix = np.array([[1., 2.], [3., 4.]]).astype(np.complex64)
matrix += 1j * matrix
rhs = np.array([[1., 0., 1.], [0., 1., 1.]]).astype(np.complex64)
rhs += 1j * rhs
# Batch of 2x3x2x2 matrices, 2x3x2x3 right-hand sides.
self._verifySolveAllWaysComplex(matrix, rhs, batch_dims=[2, 3])
# Batch of 3x2x2x2 matrices, 3x2x2x3 right-hand sides.
self._verifySolveAllWaysComplex(matrix, rhs, batch_dims=[3, 2])
@test_util.run_deprecated_v1
def testNonSquareMatrix(self):
# A non-square matrix should cause an error.
matrix = np.array([[1., 2., 3.], [3., 4., 5.]])
with self.cached_session():
with self.assertRaises(ValueError):
self._verifySolve(matrix, matrix)
with self.assertRaises(ValueError):
self._verifySolve(matrix, matrix, batch_dims=[2, 3])
@test_util.run_deprecated_v1
def testWrongDimensions(self):
# The matrix should have the same number of rows as the
# right-hand sides.
matrix = np.array([[1., 0.], [0., 1.]])
rhs = np.array([[1., 0.]])
with self.cached_session():
with self.assertRaises(ValueError):
self._verifySolve(matrix, rhs)
with self.assertRaises(ValueError):
self._verifySolve(matrix, rhs, batch_dims=[2, 3])
@test_util.run_deprecated_v1
def testNotInvertible(self):
# The input should be invertible.
# The matrix is singular because it has a zero on the diagonal.
singular_matrix = np.array([[1., 0., -1.], [-1., 0., 1.], [0., -1., 1.]])
with self.cached_session():
with self.assertRaisesOpError("Input matrix is not invertible."):
self._verifySolve(singular_matrix, singular_matrix)
with self.assertRaisesOpError("Input matrix is not invertible."):
self._verifySolve(singular_matrix, singular_matrix, batch_dims=[2, 3])
def testEmpty(self):
self._verifySolve(np.empty([0, 2, 2]), np.empty([0, 2, 2]), lower=True)
self._verifySolve(np.empty([2, 0, 0]), np.empty([2, 0, 0]), lower=True)
self._verifySolve(np.empty([2, 0, 0]), np.empty([2, 0, 0]), lower=False)
self._verifySolve(
np.empty([2, 0, 0]), np.empty([2, 0, 0]), lower=True, batch_dims=[3, 2])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class WhereOpTest(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None, fn=array_ops.where):
with self.cached_session(use_gpu=True):
ans = fn(x)
self.assertTrue(ans.get_shape().is_compatible_with([None, x.ndim]))
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testWrongNumbers(self, fn=array_ops.where):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
fn([False, True], [1, 2], None)
with self.assertRaises(ValueError):
fn([False, True], None, [1, 2])
def _testBasicVec(self, fn=array_ops.where):
x = np.asarray([True, False])
truth = np.asarray([[0]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
x = np.asarray([False, True, False])
truth = np.asarray([[1]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
x = np.asarray([False, False, True, False, True])
truth = np.asarray([[2], [4]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
def _testRandomVec(self, fn=array_ops.where):
x = np.random.rand(1000000) > 0.5
truth = np.vstack([np.where(x)[0].astype(np.int64)]).T
self._testWhere(x, truth, None, fn)
def _testBasicMat(self, fn=array_ops.where):
x = np.asarray([[True, False], [True, False]])
# Ensure RowMajor mode
truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
def _testBasic3Tensor(self, fn=array_ops.where):
x = np.asarray([[[True, False], [True, False]],
[[False, True], [False, True]],
[[False, False], [False, True]]])
# Ensure RowMajor mode
truth = np.asarray(
[[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
def _testRandom(self, dtype, expected_err_re=None, fn=array_ops.where):
shape = [127, 33, 53]
x = np.random.randn(*shape) + 1j * np.random.randn(*shape)
x = (np.random.randn(*shape) > 0).astype(dtype)
truth = np.where(np.abs(x) > 0) # Tuples of indices by axis.
truth = np.vstack(truth).T # Convert to [num_true, indices].
self._testWhere(x, truth, expected_err_re, fn)
def _testThreeArgument(self, fn=array_ops.where):
x = np.array([[-2, 3, -1], [1, -3, -3]])
np_val = np.where(x > 0, x * x, -x)
with self.test_session(use_gpu=True):
tf_val = self.evaluate(fn(constant_op.constant(x) > 0, x * x, -x))
self.assertAllEqual(tf_val, np_val)
def testWrongNumbers(self):
self._testWrongNumbers()
@test_util.run_deprecated_v1
def testBasicVec(self):
self._testBasicVec()
@test_util.run_deprecated_v1
def testRandomVec(self):
self._testRandomVec()
@test_util.run_deprecated_v1
def testBasicMat(self):
self._testBasicMat()
@test_util.run_deprecated_v1
def testBasic3Tensor(self):
self._testBasic3Tensor()
@test_util.run_deprecated_v1
def testRandomBool(self):
self._testRandom(np.bool)
@test_util.run_deprecated_v1
def testRandomInt32(self):
self._testRandom(np.int32)
@test_util.run_deprecated_v1
def testRandomInt64(self):
self._testRandom(np.int64)
@test_util.run_deprecated_v1
def testRandomFloat(self):
self._testRandom(np.float32)
@test_util.run_deprecated_v1
def testRandomDouble(self):
self._testRandom(np.float64)
@test_util.run_deprecated_v1
def testRandomComplex64(self):
self._testRandom(np.complex64)
@test_util.run_deprecated_v1
def testRandomComplex128(self):
self._testRandom(np.complex128)
@test_util.run_deprecated_v1
def testRandomUint8(self):
self._testRandom(np.uint8)
@test_util.run_deprecated_v1
def testRandomInt8(self):
self._testRandom(np.int8)
@test_util.run_deprecated_v1
def testRandomInt16(self):
self._testRandom(np.int16)
@test_util.run_deprecated_v1
def testThreeArgument(self):
self._testThreeArgument()
def testV2WrongNumbers(self):
self._testWrongNumbers(array_ops.where_v2)
def testV2BasicVec(self):
self._testBasicVec(array_ops.where_v2)
def testV2RandomVec(self):
self._testRandomVec(array_ops.where_v2)
def testV2BasicMat(self):
self._testBasicMat(array_ops.where_v2)
def testV2Basic3Tensor(self):
self._testBasic3Tensor(array_ops.where_v2)
def testV2RandomBool(self):
self._testRandom(np.bool, None, array_ops.where_v2)
def testV2RandomInt32(self):
self._testRandom(np.int32, None, array_ops.where_v2)
def testV2RandomInt64(self):
self._testRandom(np.int64, None, array_ops.where_v2)
def testV2RandomFloat(self):
self._testRandom(np.float32, None, array_ops.where_v2)
def testV2RandomDouble(self):
self._testRandom(np.float64, None, array_ops.where_v2)
def testV2RandomComplex64(self):
self._testRandom(np.complex64, None, array_ops.where_v2)
def testV2RandomComplex128(self):
self._testRandom(np.complex128, None, array_ops.where_v2)
def testV2RandomUint8(self):
self._testRandom(np.uint8, None, array_ops.where_v2)
def testV2RandomInt8(self):
self._testRandom(np.int8, None, array_ops.where_v2)
def testV2RandomInt16(self):
self._testRandom(np.int16, None, array_ops.where_v2)
def testV2ThreeArgument(self):
self._testThreeArgument(array_ops.where_v2)
def testV2Broadcasting(self):
f = np.random.normal(0, 1, (3, 5, 1, 1))
x = np.zeros((7, 11))
y = np.ones((7, 11))
np_val = np.where(f < 0, x, y)
with self.test_session(use_gpu=True):
tf_val = self.evaluate(
array_ops.where_v2(constant_op.constant(f) < 0, x, y))
self.assertAllEqual(tf_val, np_val)
def testV2ScalarBroadcasting(self):
x = np.zeros((7, 11))
y = np.ones((7, 11))
np_val = np.where(True, x, y)
with self.test_session(use_gpu=True):
tf_val = self.evaluate(
array_ops.where_v2(
constant_op.constant(True, dtype=dtypes.bool), x, y))
self.assertAllEqual(tf_val, np_val)
def testV2VectorBroadcasting(self):
x = np.zeros(7)
y = np.ones(7)
np_val = np.where([True], x, y)
with self.test_session(use_gpu=True):
tf_val = self.evaluate(
array_ops.where_v2(
constant_op.constant([True], dtype=dtypes.bool), x, y))
self.assertAllEqual(tf_val, np_val)
def testV2PredBroadcasting(self):
pred = np.array([1, 0, 0]).reshape((3, 1))
x = np.random.randn(3, 4)
y = np.random.randn(3, 4)
np_val = np.where(pred, x, y)
with self.test_session(use_gpu=True):
tf_val = self.evaluate(array_ops.where_v2(pred, x, y))
self.assertAllClose(tf_val, np_val)
@test_util.run_deprecated_v1
def testBatchSelect(self):
x = np.array([[-2, 3, -1] * 64, [1, -3, -3] * 64] * 8192) # [16384, 192]
c_mat = np.array([[False] * 192, [True] * 192] * 8192) # [16384, 192]
c_vec = np.array([False, True] * 8192) # [16384]
np_val = np.where(c_mat, x * x, -x)
with self.session(use_gpu=True):
tf_val = array_ops.where(c_vec, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
class WhereBenchmark(test.Benchmark):
def benchmarkWhere(self):
for (m, n, p, use_gpu) in itertools.product(
[10],
[10, 100, 1000, 10000, 100000, 1000000],
[0.01, 0.5, 0.99],
[False, True]):
name = "m_%d_n_%d_p_%g_use_gpu_%s" % (m, n, p, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n), dtype=dtypes.float32) <= p
v = resource_variable_ops.ResourceVariable(x)
op = array_ops.where(v)
with session.Session(config=benchmark.benchmark_config()) as sess:
v.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
# approximate size of output: m*n*p int64s for each axis.
gb_processed_output = 2 * 8 * m * n * p / 1.0e9
gb_processed = gb_processed_input + gb_processed_output
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
def benchmarkBatchSelect(self):
for (m, n, use_gpu) in itertools.product([1000, 10000, 100000],
[10, 100, 1000], [False, True]):
name = "m_%d_n_%d_use_gpu_%s" % (m, n, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
y_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
c_gen = random_ops.random_uniform([m], dtype=dtypes.float32) <= 0.5
x = resource_variable_ops.ResourceVariable(x_gen)
y = resource_variable_ops.ResourceVariable(y_gen)
c = resource_variable_ops.ResourceVariable(c_gen)
op = array_ops.where(c, x, y)
with session.Session(config=benchmark.benchmark_config()) as sess:
x.initializer.run()
y.initializer.run()
c.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
# approximate size of output: m*n*2 floats for each axis.
gb_processed = m * n * 8 / 1.0e9
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/where_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class BetaincTest(test.TestCase):
def _testBetaInc(self, a_s, b_s, x_s, dtype):
try:
from scipy import special # pylint: disable=g-import-not-at-top
np_dt = dtype.as_numpy_dtype
# Test random values
a_s = a_s.astype(np_dt) # in (0, infty)
b_s = b_s.astype(np_dt) # in (0, infty)
x_s = x_s.astype(np_dt) # in (0, 1)
tf_a_s = constant_op.constant(a_s, dtype=dtype)
tf_b_s = constant_op.constant(b_s, dtype=dtype)
tf_x_s = constant_op.constant(x_s, dtype=dtype)
tf_out_t = math_ops.betainc(tf_a_s, tf_b_s, tf_x_s)
with self.cached_session():
tf_out = self.evaluate(tf_out_t)
scipy_out = special.betainc(a_s, b_s, x_s).astype(np_dt)
# the scipy version of betainc uses a double-only implementation.
# TODO(ebrevdo): identify reasons for (sometime) precision loss
# with doubles
tol = 1e-4 if dtype == dtypes.float32 else 5e-5
self.assertAllCloseAccordingToType(scipy_out, tf_out, rtol=tol, atol=0)
# Test out-of-range values (most should return nan output)
combinations = list(itertools.product([-1, 0, 0.5, 1.0, 1.5], repeat=3))
a_comb, b_comb, x_comb = np.asarray(list(zip(*combinations)), dtype=np_dt)
with self.cached_session():
tf_comb = math_ops.betainc(a_comb, b_comb, x_comb).eval()
scipy_comb = special.betainc(a_comb, b_comb, x_comb).astype(np_dt)
self.assertAllCloseAccordingToType(scipy_comb, tf_comb)
# Test broadcasting between scalars and other shapes
with self.cached_session():
self.assertAllCloseAccordingToType(
special.betainc(0.1, b_s, x_s).astype(np_dt),
math_ops.betainc(0.1, b_s, x_s).eval(),
rtol=tol,
atol=0)
self.assertAllCloseAccordingToType(
special.betainc(a_s, 0.1, x_s).astype(np_dt),
math_ops.betainc(a_s, 0.1, x_s).eval(),
rtol=tol,
atol=0)
self.assertAllCloseAccordingToType(
special.betainc(a_s, b_s, 0.1).astype(np_dt),
math_ops.betainc(a_s, b_s, 0.1).eval(),
rtol=tol,
atol=0)
self.assertAllCloseAccordingToType(
special.betainc(0.1, b_s, 0.1).astype(np_dt),
math_ops.betainc(0.1, b_s, 0.1).eval(),
rtol=tol,
atol=0)
self.assertAllCloseAccordingToType(
special.betainc(0.1, 0.1, 0.1).astype(np_dt),
math_ops.betainc(0.1, 0.1, 0.1).eval(),
rtol=tol,
atol=0)
with self.assertRaisesRegexp(ValueError, "must be equal"):
math_ops.betainc(0.5, [0.5], [[0.5]])
with self.cached_session():
with self.assertRaisesOpError("Shapes of .* are inconsistent"):
a_p = array_ops.placeholder(dtype)
b_p = array_ops.placeholder(dtype)
x_p = array_ops.placeholder(dtype)
math_ops.betainc(a_p, b_p, x_p).eval(
feed_dict={a_p: 0.5,
b_p: [0.5],
x_p: [[0.5]]})
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
@test_util.run_deprecated_v1
def testBetaIncFloat(self):
a_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float32)
@test_util.run_deprecated_v1
def testBetaIncDouble(self):
a_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float64)
@test_util.run_deprecated_v1
def testBetaIncDoubleVeryLargeValues(self):
a_s = np.abs(np.random.randn(10, 10) * 1e15) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 1e15) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float64)
@test_util.run_deprecated_v1
def testBetaIncDoubleVerySmallValues(self):
a_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float64)
@test_util.run_deprecated_v1
def testBetaIncFloatVerySmallValues(self):
a_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float32)
@test_util.run_deprecated_v1
def testBetaIncFpropAndBpropAreNeverNAN(self):
with self.cached_session() as sess:
space = np.logspace(-8, 5).tolist()
space_x = np.linspace(1e-16, 1 - 1e-16).tolist()
ga_s, gb_s, gx_s = zip(*list(itertools.product(space, space, space_x)))
# Test grads are never nan
ga_s_t = constant_op.constant(ga_s, dtype=dtypes.float32)
gb_s_t = constant_op.constant(gb_s, dtype=dtypes.float32)
gx_s_t = constant_op.constant(gx_s, dtype=dtypes.float32)
tf_gout_t = math_ops.betainc(ga_s_t, gb_s_t, gx_s_t)
tf_gout, grads_x = sess.run(
[tf_gout_t,
gradients_impl.gradients(tf_gout_t, [ga_s_t, gb_s_t, gx_s_t])[2]])
# Equivalent to `assertAllFalse` (if it existed).
self.assertAllEqual(np.zeros_like(grads_x).astype(np.bool),
np.isnan(tf_gout))
self.assertAllEqual(np.zeros_like(grads_x).astype(np.bool),
np.isnan(grads_x))
@test_util.run_deprecated_v1
def testBetaIncGrads(self):
err_tolerance = 1e-3
with self.cached_session():
# Test gradient
ga_s = np.abs(np.random.randn(2, 2) * 30) # in (0, infty)
gb_s = np.abs(np.random.randn(2, 2) * 30) # in (0, infty)
gx_s = np.random.rand(2, 2) # in (0, 1)
tf_ga_s = constant_op.constant(ga_s, dtype=dtypes.float64)
tf_gb_s = constant_op.constant(gb_s, dtype=dtypes.float64)
tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64)
tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)
err = gradient_checker.compute_gradient_error(
[tf_gx_s], [gx_s.shape], tf_gout_t, gx_s.shape)
tf_logging.info("betainc gradient err = %g " % err)
self.assertLess(err, err_tolerance)
# Test broadcast gradient
gx_s = np.random.rand() # in (0, 1)
tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64)
tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)
err = gradient_checker.compute_gradient_error(
[tf_gx_s], [()], tf_gout_t, ga_s.shape)
tf_logging.info("betainc gradient err = %g " % err)
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/betainc_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ops used with embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import compat
def _AsLong(array):
"""Casts arrays elements to long type. Used to convert from numpy tf."""
return [int(x) for x in array]
class ScatterAddSubTest(test.TestCase):
def _TestCase(self, shape, indices, scatter_op=state_ops.scatter_add):
"""Run a random test case with the given shape and indices.
Args:
shape: Shape of the parameters array.
indices: One-dimensional array of ints, the indices of the last dimension
of the parameters to update.
scatter_op: ScatterAdd or ScatterSub.
"""
super(ScatterAddSubTest, self).setUp()
with self.cached_session(use_gpu=False):
# Create a random parameter array of given shape
p_init = np.random.rand(*shape).astype("f")
# Create the shape of the update array. All dimensions except the last
# match the parameter array, the last dimension equals the # of indices.
vals_shape = [len(indices)] + shape[1:]
vals_init = np.random.rand(*vals_shape).astype("f")
v_i = [float(x) for x in vals_init.ravel()]
p = variables.Variable(p_init)
vals = constant_op.constant(v_i, shape=vals_shape, name="vals")
ind = constant_op.constant(indices, dtype=dtypes.int32)
p2 = scatter_op(p, ind, vals, name="updated_p")
# p = init
variables.global_variables_initializer().run()
# p += vals
result = self.evaluate(p2)
# Compute the expected 'p' using numpy operations.
for i, ind in enumerate(indices):
if scatter_op == state_ops.scatter_add:
p_init.reshape(shape[0], -1)[ind, :] += (vals_init.reshape(
vals_shape[0], -1)[i, :])
else:
p_init.reshape(shape[0], -1)[ind, :] -= (vals_init.reshape(
vals_shape[0], -1)[i, :])
self.assertTrue(all((p_init == result).ravel()))
@test_util.run_deprecated_v1
def testNoRepetitions(self):
self._TestCase([2, 2], [1])
self._TestCase([4, 4, 4], [2, 0])
self._TestCase([43, 20, 10, 10], [42, 5, 6, 1, 3, 5, 7, 9])
@test_util.run_deprecated_v1
def testWithRepetitions(self):
self._TestCase([2, 2], [1, 1])
self._TestCase([5, 3, 9, 5], [2, 0, 4, 1, 3, 1, 4, 0, 4, 3])
self._TestCase([32, 4, 4], [31] * 8)
@test_util.run_deprecated_v1
def testRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices))
@test_util.run_deprecated_v1
def testSubRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices), state_ops.scatter_sub)
@test_util.run_deprecated_v1
def testWrongShape(self):
# Indices and values mismatch.
var = variables.Variable(
array_ops.zeros(shape=[1024, 64, 64], dtype=dtypes.float32))
indices = array_ops.placeholder(dtypes.int32, shape=[32])
values = array_ops.placeholder(dtypes.float32, shape=[33, 64, 64])
with self.assertRaises(ValueError):
state_ops.scatter_add(var, indices, values)
# Var and values mismatch.
values = array_ops.placeholder(dtypes.float32, shape=[32, 64, 63])
with self.assertRaises(ValueError):
state_ops.scatter_add(var, indices, values)
def _PName(param_id):
return "p" + str(param_id)
def _EmbeddingParams(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_shapeless_placeholder=False):
p = []
params = {}
feed_dict = {}
if not shape:
shape = [10]
for i in range(num_shards):
shard_shape = [vocab_size // num_shards] + shape
if i < vocab_size % num_shards: # Excess goes evenly on the first shards
shard_shape[0] += 1
param_name = _PName(i)
if use_shapeless_placeholder:
param = array_ops.placeholder(dtype, shape=None, name=param_name)
else:
param = constant_op.constant(
1.0, shape=shard_shape, dtype=dtype, name=param_name)
p.append(param)
np_type = "f" if dtype == dtypes.float32 else "d"
val = (np.random.rand(*shard_shape).astype(np_type)) + 1
params[param_name + ":0"] = val
feed_dict[param.name] = val
return p, params, feed_dict
def _EmbeddingParamsAsPartitionedVariable(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_resource=False):
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, dtype=dtype, shape=shape)
shape = shape or [10]
partitioned_variable = variable_scope.get_variable(
"p",
shape=[vocab_size] + shape,
initializer=array_ops.concat([params[p_i.name] for p_i in p], 0),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=num_shards, min_slice_size=1),
use_resource=use_resource)
return p, partitioned_variable, params, feed_dict
def _EmbeddingResult(params,
id_vals,
num_shards,
vocab_size,
partition_strategy="mod",
weight_vals=None):
if weight_vals is None:
weight_vals = np.copy(id_vals)
weight_vals.fill(1)
values = []
weights = []
weights_squared = []
for ids, wts in zip(id_vals, weight_vals):
value_aggregation = None
weight_aggregation = None
squared_weight_aggregation = None
if isinstance(ids, compat.integral_types):
ids = [ids]
wts = [wts]
for i, weight_value in zip(ids, wts):
if partition_strategy == "mod":
val = np.copy(params[_PName(i % num_shards) + ":0"][
i // num_shards, :]) * weight_value
elif partition_strategy == "div":
ids_per_partition, extras = divmod(vocab_size, num_shards)
threshold = extras * (ids_per_partition + 1)
if i < threshold:
partition = i // (ids_per_partition + 1)
offset = i % (ids_per_partition + 1)
else:
partition = extras + (i - threshold) // ids_per_partition
offset = (i - threshold) % ids_per_partition
val = np.copy(
params[_PName(partition) + ":0"][offset, :]) * weight_value
else:
assert False
if value_aggregation is None:
assert weight_aggregation is None
assert squared_weight_aggregation is None
value_aggregation = val
weight_aggregation = weight_value
squared_weight_aggregation = weight_value * weight_value
else:
assert weight_aggregation is not None
assert squared_weight_aggregation is not None
value_aggregation += val
weight_aggregation += weight_value
squared_weight_aggregation += weight_value * weight_value
values.append(value_aggregation)
weights.append(weight_aggregation)
weights_squared.append(squared_weight_aggregation)
values = np.array(values).astype(np.float32)
weights = np.array(weights).astype(np.float32)
weights_squared = np.array(weights_squared).astype(np.float32)
return values, weights, weights_squared
class EmbeddingLookupTest(test.TestCase):
# This test looks up [0, 0] in a parameter matrix sharded 2 ways. Since
# both the ids are in the first shard, one of the resulting lookup
# vector is going to be empty. The subsequent DivOp fails because of that.
# TODO(keveman): Disabling the test until the underlying problem is fixed.
@test_util.run_deprecated_v1
def testSimpleSharded(self):
with self.cached_session():
num_shards = 2
vocab_size = 4
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testMaxNorm(self):
with self.cached_session():
embeddings = constant_op.constant([[2.0]])
ids = constant_op.constant([0], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
[embeddings], ids, max_norm=1.0)
self.assertAllEqual(embedding.eval(), [[1.0]])
@test_util.run_deprecated_v1
def testMaxNormNontrivial(self):
with self.cached_session():
embeddings = constant_op.constant([[2.0, 4.0], [3.0, 1.0]])
ids = constant_op.constant([0, 1], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
[embeddings], ids, max_norm=2.0)
norms = math_ops.sqrt(
math_ops.reduce_sum(embeddings * embeddings, axis=1))
normalized = embeddings / array_ops.stack([norms, norms], axis=1)
self.assertAllEqual(embedding.eval(), 2 * self.evaluate(normalized))
@test_util.run_deprecated_v1
def testSimpleShardedPartitionedVariable(self):
with self.cached_session() as sess:
num_shards = 2
vocab_size = 4
p, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p_variable, ids)
variables.global_variables_initializer().run()
params_values = [params[p_i.name] for p_i in p]
# Test that the PartitionedVariable components equal the list in p
p_var_val = self.evaluate(list(p_variable))
# Actual test
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(params_values, p_var_val)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testSimpleShardedPartitionedResourceVariable(self):
with self.cached_session() as sess:
num_shards = 2
vocab_size = 4
p, p_variable, params, _ = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size, use_resource=True)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p_variable, ids)
variables.global_variables_initializer().run()
params_values = [params[p_i.name] for p_i in p]
# Test that the PartitionedVariable components equal the list in p
p_var_val = self.evaluate(list(p_variable))
# Actual test
print(ops.get_default_graph().as_graph_def())
tf_result = self.evaluate(embedding)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(params_values, p_var_val)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedModPartitioningInt32Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedModPartitioningInt64Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt32Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt32IdsPartitionedVariable(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
_, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
variables.global_variables_initializer().run()
embedding = embedding_ops.embedding_lookup(
p_variable, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt64Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningUnknownParamShape(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
# We clear parameter shapes, to test when shape is not statically known.
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, use_shapeless_placeholder=True)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookup(self):
vocab_size = 9
num_ids = 10
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf_logging.vlog(1, id_vals)
for ids_shape in [(10,), (2, 5)]:
for num_shards in [1, 3]:
with self.cached_session():
ids = constant_op.constant(
id_vals, shape=ids_shape, dtype=dtypes.int32)
x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
y = embedding_ops.embedding_lookup(x, ids)
y_shape = ids_shape + tuple(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookupWithComputedParams(self):
vocab_size = 9
num_ids = 5
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf_logging.vlog(1, id_vals)
for num_shards in [1, 3]:
with self.cached_session():
ids = constant_op.constant(id_vals, dtype=dtypes.int32)
x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
# This will force a conversion from IndexedSlices to Tensor.
x_squared = [math_ops.square(elem) for elem in x]
y = embedding_ops.embedding_lookup(x_squared, ids)
y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-3)
def testConstructionNonSharded(self):
with ops.Graph().as_default():
p = variables.Variable(
array_ops.zeros(shape=[100, 100], dtype=dtypes.float32))
ids = constant_op.constant([0, 1, 1, 7], dtype=dtypes.int32)
embedding_ops.embedding_lookup([p], ids)
def testConstructionSharded(self):
with ops.Graph().as_default():
p = []
for _ in range(2):
p += [
variables.Variable(
array_ops.zeros(shape=[100, 100], dtype=dtypes.float32))
]
ids = constant_op.constant([0, 1, 1, 17], dtype=dtypes.int32)
embedding_ops.embedding_lookup(p, ids)
@test_util.run_deprecated_v1
def testHigherRank(self):
np.random.seed(8)
with self.cached_session():
for params_shape in (12,), (6, 3):
params = np.random.randn(*params_shape)
for ids_shape in (3, 2), (4, 3):
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape)).reshape(ids_shape)
# Compare nonsharded to gather
simple = embedding_ops.embedding_lookup(params, ids).eval()
self.assertAllEqual(simple, array_ops.gather(params, ids).eval())
# Run a few random sharded versions
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in xrange(procs)
]
sharded = embedding_ops.embedding_lookup(split_params, ids).eval()
self.assertAllEqual(simple, sharded)
@test_util.run_deprecated_v1
def testHigherRankMaxNorm(self):
np.random.seed(8)
with self.cached_session():
for params_shape in (12,), (6, 3), (6, 2, 3):
# Test embedding rank 0, 1, 2.
# Note: the first dimension must be a common multiple of procs below.
params = 2 * np.ones(params_shape)
params_norm = params / np.sqrt(
np.sum(
params * params, tuple(range(params.ndim)[1:]), keepdims=True))
for ids_shape in (), (3), (4, 3), (2, 3, 4):
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape,
dtype=np.int64)).reshape(ids_shape)
# Compare nonsharded to gather
simple = embedding_ops.embedding_lookup(
params, ids, max_norm=1.0).eval()
# assertAllClose is used here as different implementations of sqrt may
# be used to compute each of the values being compared. For example,
# on AVX512 builds the embedding operation makes use of Eigen's fast
# vectorized square root algorithm for doubles. These different
# implementations of sqrt are not guaranteed to produce exactly the
# same results. Therefore, an exact comparison cannot be made.
self.assertAllClose(simple, array_ops.gather(params_norm, ids).eval())
# Run a few different sharded versions.
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in xrange(procs)
]
sharded = embedding_ops.embedding_lookup(
split_params, ids, max_norm=1.0).eval()
self.assertAllEqual(simple, sharded)
@test_util.run_deprecated_v1
def testTransform(self):
# This tests all combinations of:
# - ids rank 0, 1, >1
# - params sharded/unsharded
# It always applies max_norm.
np.random.seed(8)
l2_norm = 2.
with self.cached_session():
# Param values are in [l2_norm, l2_norm+1) so it will always clip.
params = np.random.rand(6, 3) + l2_norm
params_norm = l2_norm * params / np.sqrt(
np.sum(params * params, axis=1, keepdims=True))
# Compute the norm of each embedding. This will change the embedding
# rank to 0.
params_norm = np.linalg.norm(params_norm, axis=1)
transform = lambda x: linalg_ops.norm(x, axis=1)
for ids_shape in (), (3), (4, 3), (2, 3, 4):
# Test ids rank 0, 1, 2, 3.
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape,
dtype=np.int64)).reshape(ids_shape)
# Compare nonsharded to gather.
simple = embedding_ops._embedding_lookup_and_transform(
params, ids, max_norm=l2_norm, transform_fn=transform).eval()
self.assertAllClose(simple, array_ops.gather(params_norm, ids).eval())
# Run a few different sharded versions.
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in xrange(procs)
]
sharded = embedding_ops._embedding_lookup_and_transform(
split_params, ids, max_norm=l2_norm,
transform_fn=transform).eval()
# assertAllClose is used here as different implementations of sqrt may
# be used to compute each of the values being compared. For example,
# on AVX512 builds the embedding operation makes use of Eigen's fast
# vectorized square root algorithm for doubles. These different
# implementations of sqrt are not guaranteed to produce exactly the
# same results. Therefore, an exact comparison cannot be made.
self.assertAllClose(simple, sharded)
class EmbeddingLookupSparseTest(test.TestCase):
def _RandomIdsAndWeights(self, batch_size, vocab_size):
max_val_per_entry = 6
vals_per_batch_entry = np.random.randint(
1, max_val_per_entry, size=batch_size)
num_vals = np.sum(vals_per_batch_entry)
ids = np.random.randint(vocab_size, size=num_vals)
weights = 1 + np.random.rand(num_vals)
indices = []
for batch_entry, num_val in enumerate(vals_per_batch_entry):
for val_index in range(num_val):
indices.append([batch_entry, val_index])
shape = [batch_size, max_val_per_entry]
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
def _GroupByBatchEntry(self, vals, vals_per_batch_entry):
grouped_vals = []
index = 0
for num_val in vals_per_batch_entry:
grouped_vals.append(list(vals[index:(index + num_val)]))
index += num_val
return grouped_vals
@test_util.run_deprecated_v1
def testEmbeddingLookupSparse(self):
vocab_size = 13
batch_size = 10
param_shape = [2, 5]
expected_lookup_result_shape = [None] + param_shape
sp_ids, sp_weights, ids, weights, vals_per_batch_entry = (
self._RandomIdsAndWeights(batch_size, vocab_size))
grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry)
grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry)
grouped_ignored_weights = self._GroupByBatchEntry(
np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry)
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 5], ["sum", "mean", "sqrtn"],
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64],
[True, False]):
with self.cached_session():
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
embedding_sum = embedding_ops.embedding_lookup_sparse(
p,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
self.assertEqual(embedding_sum.get_shape().as_list(),
expected_lookup_result_shape)
if dtype in (dtypes.float16, dtypes.bfloat16):
self.assertEqual(embedding_sum.dtype, dtypes.float32)
else:
self.assertEqual(embedding_sum.dtype, dtype)
tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)
np_embedding_sum, np_weight_sum, np_weight_sq_sum = _EmbeddingResult(
params,
grouped_ids,
num_shards,
vocab_size,
weight_vals=grouped_ignored_weights
if ignore_weights else grouped_weights)
if combiner == "mean":
np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))
if combiner == "sqrtn":
np_embedding_sum /= np.reshape(
np.sqrt(np_weight_sq_sum), (batch_size, 1, 1))
rtol = 1e-6
if dtype == dtypes.bfloat16:
rtol = 1e-2
elif dtype == dtypes.float16:
rtol = 1e-3
atol = rtol
self.assertAllClose(np_embedding_sum, tf_embedding_sum, rtol, atol)
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookupSparse(self):
vocab_size = 12
batch_size = 4
param_shape = [2, 3]
sp_ids, sp_weights, _, _, _ = (self._RandomIdsAndWeights(
batch_size, vocab_size))
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 3], ["sum", "mean", "sqrtn"], [dtypes.float32,
dtypes.float64], [True, False]):
with self.cached_session():
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
y = embedding_ops.embedding_lookup_sparse(
x,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:])
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-5 if dtype == dtypes.float64 else 2e-3)
@test_util.run_deprecated_v1
def testIncompatibleShapes(self):
with self.cached_session():
x, _, _ = _EmbeddingParams(1, 10, dtype=dtypes.float32)
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant([[0, 0], [0, 1], [1, 0]], dtypes.int64),
constant_op.constant([0, 1, 2], dtypes.int32),
constant_op.constant([2, 2], dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant([[0, 0], [0, 1]], dtypes.int64),
constant_op.constant([12.0, 5.0], dtypes.float32),
constant_op.constant([1, 2], dtypes.int64))
with self.assertRaises(ValueError):
embedding_ops.embedding_lookup_sparse(
x, sp_ids, sp_weights, combiner="mean")
class SafeEmbeddingLookupSparseTest(test.TestCase):
def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1):
assert vocab_size > 0
assert embed_dim > 0
assert num_shards > 0
assert num_shards <= vocab_size
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0 / math.sqrt(vocab_size), dtype=dtypes.float32)
embedding_weights = list(variable_scope.get_variable(
name="embedding_weights",
shape=[vocab_size, embed_dim],
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=initializer))
for w in embedding_weights:
w.initializer.run()
embedding_weights = [w.eval() for w in embedding_weights]
return embedding_weights
def _ids_and_weights_2d(self):
# Each row demonstrates a test case:
# Row 0: multiple valid ids, 1 invalid id, weighted mean
# Row 1: all ids are invalid (leaving no valid ids after pruning)
# Row 2: no ids to begin with
# Row 3: single id
# Row 4: all ids have <=0 weight
indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [4, 0], [4, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [5, 4]
sparse_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
def _ids_and_weights_3d(self):
# Each (2-D) index demonstrates a test case:
# Index 0, 0: multiple valid ids, 1 invalid id, weighted mean
# Index 0, 1: all ids are invalid (leaving no valid ids after pruning)
# Index 0, 2: no ids to begin with
# Index 1, 0: single id
# Index 1, 1: all ids have <=0 weight
# Index 1, 2: no ids to begin with
indices = [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 1, 0], [1, 0, 0], [1, 1, 0],
[1, 1, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [2, 3, 4]
sparse_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_return_zero_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, sparse_weights).eval())
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, [0] * 4, [0] * 4, embedding_weights[0][2], [0] * 4])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_return_special_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, sparse_weights,
default_id=3).eval())
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3],
embedding_weights[0][2], embedding_weights[0][3]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_no_weights(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, None).eval())
self.assertAllClose(
embedding_lookup_result,
[(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4,
[0] * 4, embedding_weights[0][2], (
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_partitioned(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, None).eval())
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result,
[(embedding_weights[0] + embedding_weights[1]) / 2.0,
[0] * 4, [0] * 4, embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_partitioned_inconsistent_weights(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(TypeError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_return_zero_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, sparse_weights).eval())
self.assertAllClose(embedding_lookup_result, [[
(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) / 3.0,
[0] * 4, [0] * 4
], [embedding_weights[0][2], [0] * 4, [0] * 4]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_return_special_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, sparse_weights,
default_id=3).eval())
self.assertAllClose(
embedding_lookup_result,
[[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3]], [
embedding_weights[0][2], embedding_weights[0][3],
embedding_weights[0][3]
]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_no_weights(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, None).eval())
self.assertAllClose(embedding_lookup_result, [[(
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4, [
0
] * 4], [
embedding_weights[0][2],
(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4
]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_partitioned(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, None).eval())
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result, [[
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4, [0] * 4
], [
embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4
]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_partitioned_inconsistent_weights(
self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(TypeError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
class DynamicStitchOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testCint32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testCint32Gpu(self):
with self.session(use_gpu=True):
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testInt32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testInt32Gpu(self):
with self.session(use_gpu=True):
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testSumGradArgs(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2, 3]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([2, 3, 5, 7]),
ops.convert_to_tensor([1, 1])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values).eval(), [2, 3, 1, 1])
# We expect that the values are merged in order.
@test_util.run_deprecated_v1
def testStitchOrder(self):
with self.cached_session():
indices = []
np_values = []
values = []
for _ in range(10):
indices.extend([ops.convert_to_tensor(np.arange(100).astype(np.int32))])
np_values.extend([np.random.uniform(size=100)])
values.extend([ops.convert_to_tensor(np_values[-1])])
stitched = data_flow_ops.dynamic_stitch(indices, values).eval()
self.assertAllEqual(np_values[-1], stitched)
class ParallelDynamicStitchOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testCint32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 4, 6]),
ops.convert_to_tensor([2, 3, 5])
]
values = [
ops.convert_to_tensor([12, 23, 34, 45]),
ops.convert_to_tensor([1, 2, 3])
]
self.assertAllEqual(
data_flow_ops.parallel_dynamic_stitch(indices, values).eval(),
[12, 23, 1, 2, 34, 3, 45])
@test_util.run_deprecated_v1
def testInt32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 5, 6, 7]),
ops.convert_to_tensor([2, 4, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34, 45, 56]),
ops.convert_to_tensor([1, 3, 2])
]
self.assertAllEqual(
data_flow_ops.parallel_dynamic_stitch(indices, values).eval(),
[12, 23, 1, 2, 3, 34, 45, 56])
@test_util.run_deprecated_v1
def testSimple(self):
with self.session(use_gpu=False):
indices = [ops.convert_to_tensor([0, 1]), ops.convert_to_tensor([2, 3])]
values = [ops.convert_to_tensor([2, 3]), ops.convert_to_tensor([1, 1])]
self.assertAllEqual(
data_flow_ops.parallel_dynamic_stitch(indices, values).eval(),
[2, 3, 1, 1])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/embedding_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for IdentityOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class IdentityOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testInt32_6(self):
with self.cached_session():
value = array_ops.identity([1, 2, 3, 4, 5, 6]).eval()
self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), value)
@test_util.run_deprecated_v1
def testInt32_2_3(self):
with self.cached_session():
inp = constant_op.constant([10, 20, 30, 40, 50, 60], shape=[2, 3])
value = array_ops.identity(inp).eval()
self.assertAllEqual(np.array([[10, 20, 30], [40, 50, 60]]), value)
@test_util.run_deprecated_v1
def testString(self):
source = [b"A", b"b", b"C", b"d", b"E", b"f"]
with self.cached_session():
value = array_ops.identity(source).eval()
self.assertAllEqual(source, value)
def testIdentityShape(self):
with self.cached_session():
shape = [2, 3]
array_2x3 = [[1, 2, 3], [6, 5, 4]]
tensor = constant_op.constant(array_2x3)
self.assertEquals(shape, tensor.get_shape())
self.assertEquals(shape, array_ops.identity(tensor).get_shape())
self.assertEquals(shape, array_ops.identity(array_2x3).get_shape())
self.assertEquals(shape,
array_ops.identity(np.array(array_2x3)).get_shape())
@test_util.run_v1_only("b/120545219")
def testRefIdentityShape(self):
with self.cached_session():
shape = [2, 3]
tensor = variables.VariableV1(
constant_op.constant(
[[1, 2, 3], [6, 5, 4]], dtype=dtypes.int32))
self.assertEquals(shape, tensor.get_shape())
self.assertEquals(shape, gen_array_ops.ref_identity(tensor).get_shape())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/identity_op_py_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NDHWC", False), ("NDHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCHW" format is currently supported exclusively on CUDA GPUs.
test_configs += [("NCDHW", True)]
return test_configs
# TODO(mjanusz): Add microbenchmarks for 3d pooling.
class PoolingTest(test.TestCase):
def _VerifyOneTest(self, pool_func, input_sizes, window, strides, padding,
data_format, expected, use_gpu):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called: co.MaxPool, co.AvgPool.
input_sizes: Input tensor dimensions.
window: Tuple of kernel dims: planes, rows, cols.
strides: Tuple of strides for dims: planes, rows, cols.
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
use_gpu: Whether to run ops on GPU.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.cached_session(use_gpu=use_gpu) as sess:
t = constant_op.constant(x, shape=input_sizes)
window = [1] + list(window) + [1]
strides = [1] + list(strides) + [1]
if data_format == "NCDHW":
t = test_util.NHWCToNCHW(t)
window = test_util.NHWCToNCHW(window)
strides = test_util.NHWCToNCHW(strides)
t = pool_func(
t,
ksize=window,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCDHW":
t = test_util.NCHWToNHWC(t)
vals = self.evaluate(t)
# Verifies values.
actual = vals.flatten()
self.assertAllClose(expected, actual)
def _VerifyValues(self, pool_func, input_sizes, window, strides,
padding, expected):
for data_format, use_gpu in GetTestConfigs():
self._VerifyOneTest(pool_func, input_sizes, window, strides, padding,
data_format, expected, use_gpu)
def testAvgPool3dValidPadding(self):
expected_output = [20.5, 21.5, 22.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID",
expected=expected_output)
def testAvgPool3dSamePadding(self):
expected_output = [20.5, 21.5, 22.5, 26.5, 27.5, 28.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 2, 2, 4, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME",
expected=expected_output)
def testAvgPool3dSamePaddingDifferentStrides(self):
expected_output = [1.5, 4.5, 7.5, 17.5, 20.5, 23.5, 33.5, 36.5, 39.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=(1, 2, 3),
strides=(2, 3, 1),
padding="SAME",
expected=expected_output)
def testMaxPool3dValidPadding(self):
expected_output = [40.0, 41.0, 42.0]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID",
expected=expected_output)
def testMaxPool3dSamePadding(self):
expected_output = [31., 32., 33., 34., 35., 36.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 2, 2, 3, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME",
expected=expected_output)
def testMaxPool3dSamePaddingDifferentStrides(self):
expected_output = [2., 5., 8., 18., 21., 24., 34., 37., 40.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=(1, 2, 3),
strides=(2, 3, 1),
padding="SAME",
expected=expected_output)
# Test pooling on a larger input, with different stride and kernel
# size for the 'z' dimension.
# Simulate max pooling in numpy to get the expected output.
input_data = np.arange(1, 5 * 27 * 27 * 64 + 1).reshape((5, 27, 27, 64))
input_data = np.pad(input_data, [[0, 0], [0, 1], [0, 1], [0, 0]],
mode="constant")
expected_output = input_data[:, 1::2, 1::2, :]
expected_output[:, -1, :, :] = input_data[:, -2, 1::2, :]
expected_output[:, :, -1, :] = input_data[:, 1::2, -2, :]
expected_output[:, -1, -1, :] = input_data[:, -2, -2, :]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 27, 27, 64],
window=(1, 2, 2),
strides=(1, 2, 2),
padding="SAME",
expected=expected_output.flatten())
def testKernelSmallerThanStride(self):
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[58, 61, 79, 82, 205, 208, 226, 229])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[29.5, 32.5, 50.5, 53.5, 176.5, 179.5, 197.5, 200.5])
def _ConstructAndTestGradientForConfig(self,
pool_func,
input_sizes,
output_sizes,
window,
strides,
padding,
data_format,
use_gpu):
"""Verifies the gradients of a pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window: Tuple of kernel dims: planes, rows, cols.
strides: Tuple of strides for dims: planes, rows, cols.
padding: Padding type.
data_format: Data format string.
use_gpu: Whether to run on GPU.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.arange(1, total_size + 1, dtype=np.float32)
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
err_g_margin = 1e-3
err_gg_margin = 1.5e-2
if pool_func == nn_ops.avg_pool3d:
func_name = "avg_pool3d"
x_init_value = None
else:
x_init_value = np.asfarray(np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool3d"
ksize = [1, window[0], window[1], window[2], 1]
strides = [1, strides[0], strides[1], strides[2], 1]
t = input_tensor
if data_format == "NCDHW":
ksize = test_util.NHWCToNCHW(ksize)
strides = test_util.NHWCToNCHW(strides)
t = test_util.NHWCToNCHW(t)
output_sizes = test_util.NHWCToNCHW(output_sizes)
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=func_name)
t_g = gradients_impl.gradients(t**2, input_tensor)[0]
err_g = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t,
output_sizes,
x_init_value=x_init_value,
delta=1e-2)
err_gg = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t_g,
input_sizes,
x_init_value=x_init_value,
delta=1e-2)
print("%s gradient error = " % func_name, err_g)
self.assertLess(err_g, err_g_margin)
print("%s second-order gradient error = " % func_name, err_gg)
self.assertLess(err_gg, err_gg_margin)
def _ConstructAndTestGradient(self,
pool_func,
**kwargs):
"""Runs _ConstructAndTestGradientForConfig for all tests configurations."""
for data_format, use_gpu in GetTestConfigs():
self._ConstructAndTestGradientForConfig(pool_func,
data_format=data_format,
use_gpu=use_gpu,
**kwargs)
@test_util.run_deprecated_v1
def testMaxPoolGradValidPadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
output_sizes=[1, 3, 3, 3, 1],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="VALID")
@test_util.run_deprecated_v1
def testMaxPoolGradValidPadding2_1_6_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 2, 3, 4, 2],
output_sizes=[1, 1, 2, 3, 2],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="VALID")
@test_util.run_deprecated_v1
def testMaxPoolGradValidPadding2_1_7_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 2, 7, 1],
output_sizes=[1, 2, 1, 6, 1],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="VALID")
@test_util.run_deprecated_v1
def testMaxPoolGradValidPadding1_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
output_sizes=[1, 2, 2, 2, 1],
window=(1, 1, 1),
strides=(2, 2, 2),
padding="VALID")
@test_util.run_deprecated_v1
def testMaxPoolGradValidPadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 2, 2, 2, 1],
output_sizes=[2, 1, 1, 1, 1],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID")
@test_util.run_deprecated_v1
def testMaxPoolGradSamePadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 2, 4, 1],
output_sizes=[1, 3, 2, 4, 1],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="SAME")
@test_util.run_deprecated_v1
def testMaxPoolGradSamePadding1_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 2, 4, 1],
output_sizes=[1, 2, 1, 2, 1],
window=(1, 1, 1),
strides=(2, 2, 2),
padding="SAME")
@test_util.run_deprecated_v1
def testMaxPoolGradSamePadding2_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 2, 4, 1],
output_sizes=[1, 3, 2, 4, 1],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="SAME")
@test_util.run_deprecated_v1
def testMaxPoolGradSamePadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 5, 2, 4, 2],
output_sizes=[1, 3, 1, 2, 2],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME")
@test_util.run_deprecated_v1
def testMaxPoolGradSamePadding3_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 4, 2, 1],
output_sizes=[1, 3, 4, 2, 1],
window=(3, 3, 3),
strides=(1, 1, 1),
padding="SAME")
@test_util.run_deprecated_v1
def testAvgPoolGradValidPadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 1],
output_sizes=[1, 3, 3, 3, 1],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="VALID")
@test_util.run_deprecated_v1
def testAvgPoolGradValidPadding1_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 1],
output_sizes=[1, 2, 2, 2, 1],
window=(1, 1, 1),
strides=(2, 2, 2),
padding="VALID")
@test_util.run_deprecated_v1
def testAvgPoolGradValidPadding2_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 2],
output_sizes=[1, 2, 2, 2, 2],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="VALID")
@test_util.run_deprecated_v1
def testAvgPoolGradValidPadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[2, 2, 2, 2, 2],
output_sizes=[2, 1, 1, 1, 2],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID")
@test_util.run_deprecated_v1
def testAvgPoolGradSamePadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 2, 4, 2],
output_sizes=[1, 3, 2, 4, 2],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="SAME")
@test_util.run_deprecated_v1
def testAvgPoolGradSamePadding1_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 2, 4, 2],
output_sizes=[1, 2, 1, 2, 2],
window=(1, 1, 1),
strides=(2, 2, 2),
padding="SAME")
@test_util.run_deprecated_v1
def testAvgPoolGradSamePadding2_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 2, 2, 2, 1],
output_sizes=[1, 2, 2, 2, 1],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="SAME")
@test_util.run_deprecated_v1
def testAvgPoolGradSamePadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 5, 2, 4, 1],
output_sizes=[1, 3, 1, 2, 1],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME")
@test_util.run_deprecated_v1
def testAvgPoolGradSamePadding3_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 6, 2, 1],
output_sizes=[1, 3, 6, 2, 1],
window=(3, 3, 3),
strides=(1, 1, 1),
padding="SAME")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/pooling_ops_3d_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple benchmarks for reductions and their gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ReduceBenchmarks(test.Benchmark):
"""Benchmarks for reductions."""
def _run(self, func, num_iters):
# call func to maybe warm up the GPU
func()
start = time.time()
for _ in range(num_iters):
func()
end = time.time()
mean_us = (end - start) * 1e6 / num_iters
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
extras={"examples_per_sec": num_iters / (end - start)})
def benchmark_reduce_sum_grad_eager(self):
with context.eager_mode():
tensor = array_ops.zeros([100, 1000])
def fn():
backprop.gradients_function(math_ops.reduce_sum, [0])(tensor)
self._run(fn, 10000)
def benchmark_reduce_sum_grad_eager_cpu(self):
with context.eager_mode(), ops.device("/cpu:0"):
tensor = array_ops.zeros([100, 1000])
def fn():
backprop.gradients_function(math_ops.reduce_sum, [0])(tensor)
self._run(fn, 10000)
def benchmark_reduce_sum_grad_graph(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)))
with ops.Graph().as_default(), session.Session(config=config) as sess:
tensor = constant_op.constant(np.zeros([100, 1000], dtype=np.float32))
reduction = math_ops.reduce_sum(tensor)
grad, = gradients_impl.gradients(reduction, tensor)
def fn():
self.evaluate(grad.op)
self._run(fn, 10000)
def benchmark_reduce_sum_grad_graph_cpu(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)))
with ops.Graph().as_default(), session.Session(config=config) as sess:
with ops.device("/cpu:0"):
tensor = constant_op.constant(np.zeros([100, 1000], dtype=np.float32))
reduction = math_ops.reduce_sum(tensor)
grad, = gradients_impl.gradients(reduction, tensor)
def fn():
self.evaluate(grad.op)
self._run(fn, 10000)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/reduce_benchmark_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class MatrixSolveOpTest(test.TestCase):
def _verifySolve(self, x, y, batch_dims=None):
for np_type in [np.float32, np.float64, np.complex64, np.complex128]:
if np_type == np.float32 or np_type == np.complex64:
tol = 1e-5
else:
tol = 1e-12
for adjoint in False, True:
if np_type is [np.float32, np.float64]:
a = x.real().astype(np_type)
b = y.real().astype(np_type)
else:
a = x.astype(np_type)
b = y.astype(np_type)
a_np = np.conj(np.transpose(a)) if adjoint else a
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
np_ans = np.linalg.solve(a_np, b)
for use_placeholder in False, True:
with self.cached_session(use_gpu=True) as sess:
if use_placeholder:
a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
out = sess.run(tf_ans, {a_ph: a, b_ph: b})
else:
tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
out = self.evaluate(tf_ans)
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertEqual(np_ans.shape, out.shape)
self.assertAllClose(np_ans, out, atol=tol, rtol=tol)
def _generateMatrix(self, m, n):
matrix = (np.random.normal(-5, 5,
m * n).astype(np.complex128).reshape([m, n]))
matrix.imag = (np.random.normal(-5, 5, m * n).astype(np.complex128).reshape(
[m, n]))
return matrix
@test_util.run_deprecated_v1
def testSolve(self):
for n in 1, 2, 4, 9:
matrix = self._generateMatrix(n, n)
for nrhs in 1, 2, n:
rhs = self._generateMatrix(n, nrhs)
self._verifySolve(matrix, rhs)
@test_util.run_deprecated_v1
def testSolveBatch(self):
for n in 2, 5:
matrix = self._generateMatrix(n, n)
for nrhs in 1, n:
rhs = self._generateMatrix(n, nrhs)
for batch_dims in [[2], [2, 2], [7, 4]]:
self._verifySolve(matrix, rhs, batch_dims=batch_dims)
@test_util.run_deprecated_v1
def testNonSquareMatrix(self):
# When the solve of a non-square matrix is attempted we should return
# an error
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
matrix = constant_op.constant([[1., 2., 3.], [3., 4., 5.]])
linalg_ops.matrix_solve(matrix, matrix)
@test_util.run_deprecated_v1
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.session(use_gpu=True):
matrix = constant_op.constant([[1., 0.], [0., 1.]])
rhs = constant_op.constant([[1., 0.]])
with self.assertRaises(ValueError):
linalg_ops.matrix_solve(matrix, rhs)
def testNotInvertible(self):
# The input should be invertible.
with self.session(use_gpu=True):
with self.assertRaisesOpError("Input matrix is not invertible."):
# All rows of the matrix below add to zero
matrix = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
linalg_ops.matrix_solve(matrix, matrix).eval()
@test_util.run_deprecated_v1
def testConcurrent(self):
with self.session(use_gpu=True) as sess:
all_ops = []
for adjoint_ in False, True:
lhs1 = random_ops.random_normal([3, 3], seed=42)
lhs2 = random_ops.random_normal([3, 3], seed=42)
rhs1 = random_ops.random_normal([3, 3], seed=42)
rhs2 = random_ops.random_normal([3, 3], seed=42)
s1 = linalg_ops.matrix_solve(lhs1, rhs1, adjoint=adjoint_)
s2 = linalg_ops.matrix_solve(lhs2, rhs2, adjoint=adjoint_)
all_ops += [s1, s2]
val = self.evaluate(all_ops)
self.assertAllEqual(val[0], val[1])
self.assertAllEqual(val[2], val[3])
class MatrixSolveBenchmark(test.Benchmark):
matrix_shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1001, 1001),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateTestData(self, matrix_shape, num_rhs):
batch_shape = matrix_shape[:-2]
matrix_shape = matrix_shape[-2:]
assert matrix_shape[0] == matrix_shape[1]
n = matrix_shape[0]
matrix = (np.ones(matrix_shape).astype(np.float32) /
(2.0 * n) + np.diag(np.ones(n).astype(np.float32)))
rhs = np.ones([n, num_rhs]).astype(np.float32)
matrix = variables.Variable(
np.tile(matrix, batch_shape + (1, 1)), trainable=False)
rhs = variables.Variable(
np.tile(rhs, batch_shape + (1, 1)), trainable=False)
return matrix, rhs
def benchmarkMatrixSolveOp(self):
run_gpu_test = test.is_gpu_available(True)
for adjoint in False, True:
for matrix_shape in self.matrix_shapes:
for num_rhs in 1, 2, matrix_shape[-1]:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_cpu_shape_{matrix_shape}_num_rhs_{num_rhs}_"
"adjoint_{adjoint}").format(
matrix_shape=matrix_shape,
num_rhs=num_rhs,
adjoint=adjoint))
if run_gpu_test:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_gpu_shape_{matrix_shape}_num_rhs_"
"{num_rhs}_adjoint_{adjoint}").format(
matrix_shape=matrix_shape, num_rhs=num_rhs,
adjoint=adjoint))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/matrix_solve_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cond_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.util import compat
_OPTIONAL_OPS = frozenset([
"OptionalFromValue", "OptionalNone", "OptionalHasValue", "OptionalGetValue"
])
class CondV2Test(test.TestCase):
def _testCond(self, true_fn, false_fn, train_vals, feed_dict=None):
if not feed_dict:
feed_dict = {}
with self.session(graph=ops.get_default_graph()) as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
expected = control_flow_ops.cond(pred, true_fn, false_fn, name="expected")
actual = cond_v2.cond_v2(pred, true_fn, false_fn, name="actual")
expected_grad = gradients_impl.gradients(expected, train_vals)
actual_grad = gradients_impl.gradients(actual, train_vals)
sess_run_args = {pred: True}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
sess_run_args = {pred: False}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
@test_util.run_deprecated_v1
def testBasic(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * 2.0
def false_fn():
return y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testExternalControlDependencies(self):
with ops.Graph().as_default(), self.test_session():
v = variables.Variable(1.0)
v.initializer.run()
op = v.assign_add(1.0)
def true_branch():
with ops.control_dependencies([op]):
return 1.0
cond_v2.cond_v2(array_ops.placeholder_with_default(False, None),
true_branch,
lambda: 2.0).eval()
self.assertAllEqual(self.evaluate(v), 2.0)
@test_util.run_deprecated_v1
def testMultipleOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return x, y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testBasic2(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * y * 2.0
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testNoInputs(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
def true_fn():
return constant_op.constant(1.0)
def false_fn():
return constant_op.constant(2.0)
out = cond_v2.cond_v2(pred, true_fn, false_fn)
self.assertEqual(sess.run(out, {pred: True}), (1.0,))
self.assertEqual(sess.run(out, {pred: False}), (2.0,))
def _createCond(self, name):
"""Creates a cond_v2 call and returns the output tensor and the cond op."""
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
return x
def false_fn():
return x + 1
output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
cond_op = output.op.inputs[0].op
self.assertEqual(cond_op.type, "If")
return output, cond_op
def _createNestedCond(self, name):
"""Like _createCond but creates a nested cond_v2 call as well."""
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
return cond_v2.cond_v2(pred, lambda: x, lambda: x + 1)
def false_fn():
return x + 2
output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
cond_op = output.op.inputs[0].op
self.assertEqual(cond_op.type, "If")
return output, cond_op
def testDefaultName(self):
with ops.Graph().as_default():
_, cond_op = self._createCond(None)
self.assertEqual(cond_op.name, "cond")
self.assertRegexpMatches(
cond_op.get_attr("then_branch").name, r"cond_true_\d*")
self.assertRegexpMatches(
cond_op.get_attr("else_branch").name, r"cond_false_\d*")
with ops.Graph().as_default():
with ops.name_scope("foo"):
_, cond1_op = self._createCond("")
self.assertEqual(cond1_op.name, "foo/cond")
self.assertRegexpMatches(
cond1_op.get_attr("then_branch").name, r"foo_cond_true_\d*")
self.assertRegexpMatches(
cond1_op.get_attr("else_branch").name, r"foo_cond_false_\d*")
_, cond2_op = self._createCond(None)
self.assertEqual(cond2_op.name, "foo/cond_1")
self.assertRegexpMatches(
cond2_op.get_attr("then_branch").name, r"foo_cond_1_true_\d*")
self.assertRegexpMatches(
cond2_op.get_attr("else_branch").name, r"foo_cond_1_false_\d*")
@test_util.run_v1_only("b/120545219")
def testDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
return x * y * 2.0
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
return x * y * 2.0
return nested_fn()
return fn()
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testDoubleNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
@function.defun
def nested_nested_fn():
return x * y * 2.0
return nested_nested_fn()
return nested_fn()
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testNestedCond(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
return x * y * 2.0
def false_false_fn():
return x * 5.0
return _cond(pred, false_true_fn, false_false_fn, "inside_false_fn")
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testNestedCondBothBranches(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return _cond(pred, lambda: x + y, lambda: x * x, name=None)
def false_fn():
return _cond(pred, lambda: x - y, lambda: y * y, name=None)
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testDoubleNestedCond(self):
def run_test(pred1_value, pred2_value):
def build_graph():
pred1 = array_ops.placeholder(dtypes.bool, name="pred1")
pred2 = array_ops.placeholder(dtypes.bool, name="pred2")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
def false_true_true_fn():
return x * y * 2.0
def false_true_false_fn():
return x * 10.0
return _cond(
pred1,
false_true_true_fn,
false_true_false_fn,
name="inside_false_true_fn")
def false_false_fn():
return x * 5.0
return _cond(
pred2, false_true_fn, false_false_fn, name="inside_false_fn")
return x, y, pred1, pred2, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [y], {
pred1: pred1_value,
pred2: pred2_value
})
run_test(True, True)
run_test(True, False)
run_test(False, False)
run_test(False, True)
def testGradientFromInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testGradientFromInsideNestedDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
@function.defun
def inner_nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
return inner_nesting_fn()
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testBuildCondAndGradientInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
# Build cond and its gradient inside a Defun.
@function.defun
def fn():
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
return gradients_impl.gradients(cond_outer, [x, y])
grads = fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default(), self.session(
graph=ops.get_default_graph()) as sess:
grads, pred_outer, pred_inner = build_graph()
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
@test_util.run_deprecated_v1
def testSecondDerivative(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
cond_grad = gradients_impl.gradients(cond, [x])
cond_grad_grad = gradients_impl.gradients(cond_grad, [x])
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testGradientOfDeserializedCond(self):
with ops.Graph().as_default():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
ops.add_to_collection("x", x)
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
ops.add_to_collection("pred", pred)
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
ops.add_to_collection("cond", cond)
meta_graph = saver.export_meta_graph()
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
saver.import_meta_graph(meta_graph)
x = ops.get_collection("x")[0]
pred = ops.get_collection("pred")[0]
cond = ops.get_collection("cond")
cond_grad = gradients_impl.gradients(cond, [x], name="cond_grad")
cond_grad_grad = gradients_impl.gradients(
cond_grad, [x], name="cond_grad_grad")
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testGradientTapeOfCondWithResourceVariableInFunction(self):
with context.eager_mode():
v = variables.Variable(2.)
@def_function.function
def fnWithCond(): # pylint: disable=invalid-name
with backprop.GradientTape() as tape:
pred = constant_op.constant(True, dtype=dtypes.bool)
def true_fn():
return math_ops.pow(v, 3)
def false_fn():
return v
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
return tape.gradient(cond, v)
self.assertAllEqual(fnWithCond(), 12.0)
def testLowering(self):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cond_output, _ = self._createCond("cond")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(cond_output, options=run_options, run_metadata=run_metadata)
# If lowering was enabled, there should be a `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(switch_found,
"A `Switch` op should exist if the graph was lowered.")
# If lowering was enabled, there should be no `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(if_found,
"An `If` op was found, but it should be lowered.")
@test_util.run_deprecated_v1
def testLoweringDisabledInXLA(self):
with self.session(graph=ops.Graph()) as sess:
# Build the cond_v2 in an XLA context
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
cond_output, cond_op = self._createCond("cond")
xla_context.Exit()
# Check lowering attr is not set.
with self.assertRaises(ValueError):
cond_op.get_attr("_lower_using_switch_merge")
# Check the actual graph that is run.
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(cond_output, options=run_options, run_metadata=run_metadata)
# Lowering disabled in XLA, there should be no `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(
switch_found,
"A `Switch` op exists, but the graph should not be lowered.")
# Lowering disabled in XLA, there should still be an `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(
if_found,
"An `If` op was not found, but the graph should not be lowered.")
@test_util.run_deprecated_v1
def testNestedLoweringDisabledInXLA(self):
# Build the cond_v2 in an XLA context
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
_, cond_op = self._createNestedCond("cond")
xla_context.Exit()
# Check lowering attr is not set for either If node.
with self.assertRaises(ValueError):
cond_op.get_attr("_lower_using_switch_merge")
nested_if_ops = []
for func in ops.get_default_graph()._functions.values():
nested_if_ops.extend(op for op in func.graph.get_operations()
if op.type == "If")
self.assertEqual(len(nested_if_ops), 1)
with self.assertRaises(ValueError):
nested_if_ops[0].get_attr("_lower_using_switch_merge")
# TODO(skyewm): check the actual graphs that are run once we have a way to
# programmatically access those graphs.
# b/131355614
@test_util.run_deprecated_v1
def testNoOptionalsInXla(self):
@def_function.function
def func_with_cond():
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
intermediate = x + 1
return intermediate * x
def false_fn():
return x + 1
output = cond_v2.cond_v2(pred, true_fn, false_fn)
grad = gradients_impl.gradients(output, x)[0]
forward_if_op = output.op.inputs[0].op
gradient_if_op = grad.op.inputs[0].op
def verify_no_optional_ops(op, branch_name):
branch_function = ops.get_default_graph()._get_function(
op.get_attr(branch_name).name)
function_def = branch_function.definition
for node_def in function_def.node_def:
self.assertNotIn(node_def.op, _OPTIONAL_OPS)
verify_no_optional_ops(forward_if_op, "then_branch")
verify_no_optional_ops(forward_if_op, "else_branch")
verify_no_optional_ops(gradient_if_op, "then_branch")
verify_no_optional_ops(gradient_if_op, "else_branch")
return grad
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
func_with_cond()
xla_context.Exit()
@test_util.run_deprecated_v1
def testLoweringDisabledWithSingleThreadedExecutorContext(self):
with self.session(graph=ops.Graph()) as sess:
@function.defun
def _add_cond(x):
return cond_v2.cond_v2(
constant_op.constant(True, name="pred"),
lambda: x,
lambda: x + 1)
x = array_ops.placeholder(shape=None, dtype=dtypes.float32)
with context.function_executor_type("SINGLE_THREADED_EXECUTOR"):
out_cond = _add_cond(x)
# The fact that sess.run() succeeds means lowering is disabled, because
# the single threaded executor does not support cond v1 ops.
sess.run(out_cond, feed_dict={x: 1.0})
@test_util.enable_control_flow_v2
def testStructuredOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return ((x * y,), y)
def false_fn():
return ((x,), y * 3.0)
output = control_flow_ops.cond(
constant_op.constant(False), true_fn, false_fn)
self.assertEqual(self.evaluate(output[0][0]), 1.)
self.assertEqual(self.evaluate(output[1]), 9.)
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
def testRaisesOutputStructuresMismatch(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return ((x,), y * 3.0)
with self.assertRaisesRegexp(
TypeError, "true_fn and false_fn arguments to tf.cond must have the "
"same number, type, and overall structure of return values."):
control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
@test_util.enable_control_flow_v2
def testCondAndTensorArray(self):
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
output_t = output.stack()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
@test_util.enable_control_flow_v2
def testCondAndTensorArrayInDefun(self):
@function.defun
def f():
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
return output.stack()
output_t = f()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
@test_util.run_deprecated_v1
def testForwardPassRewrite(self):
x = constant_op.constant(1.0, name="x")
output = cond_v2.cond_v2(constant_op.constant(True),
lambda: x * 2.0,
lambda: x)
if_op = output.op.inputs[0].op
self.assertEqual(if_op.type, "If")
# pylint: disable=g-deprecated-assert
self.assertEqual(len(if_op.outputs), 1)
gradients_impl.gradients(output, x)
# if_op should have been rewritten to output 2.0 intermediate.
self.assertEqual(len(if_op.outputs), 2)
gradients_impl.gradients(output, x)
# Computing the gradient again shouldn't rewrite if_op again.
self.assertEqual(len(if_op.outputs), 2)
# pylint: enable=g-deprecated-assert
class CondV2CollectionTest(test.TestCase):
def testCollectionIntValueAccessInCond(self):
"""Read values from graph collections inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = 2
y = 5
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_const = constant_op.constant(ops.get_collection("x")[0])
y_const = constant_op.constant(ops.get_collection("y")[0])
return math_ops.add(x_const, y_const)
cnd = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionTensorValueAccessInCond(self):
"""Read tensors from collections inside of cond_v2 & use them."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_read = ops.get_collection("x")[0]
y_read = ops.get_collection("y")[0]
return math_ops.add(x_read, y_read)
cnd = cond_v2.cond_v2(math_ops.less(x, y), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionIntValueWriteInCond(self):
"""Make sure Int writes to collections work inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
def true_fn():
z = math_ops.add(x, y)
ops.add_to_collection("z", 7)
return math_ops.mul(x, z)
def false_fn():
z = math_ops.add(x, y)
return math_ops.mul(x, z)
cnd = cond_v2.cond_v2(constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd.eval(), 14)
read_z_collection = ops.get_collection("z")
self.assertEquals(read_z_collection, [7])
class CondV2ContainerTest(test.TestCase):
def testContainer(self):
"""Set containers outside & inside of cond_v2.
Make sure the containers are set correctly for both variable creation
(tested by variables.Variable) and for stateful ops (tested by FIFOQueue)
"""
self.skipTest("b/113048653")
with ops.Graph().as_default() as g:
with self.session(graph=g):
v0 = variables.Variable([0])
q0 = data_flow_ops.FIFOQueue(1, dtypes.float32)
def container(node):
return node.op.get_attr("container")
self.assertEqual(compat.as_bytes(""), container(v0))
self.assertEqual(compat.as_bytes(""), container(q0.queue_ref))
def true_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2t"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2t"), container(v2))
self.assertEqual(compat.as_bytes("l2t"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(2.0)
def false_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2f"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2f"), container(v2))
self.assertEqual(compat.as_bytes("l2f"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(6.0)
with ops.container("l1"):
cnd_true = cond_v2.cond_v2(
constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd_true.eval(), 2)
cnd_false = cond_v2.cond_v2(
constant_op.constant(False), true_fn, false_fn)
self.assertEquals(cnd_false.eval(), 6)
v4 = variables.Variable([3])
q4 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v5 = variables.Variable([4])
q5 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v4))
self.assertEqual(compat.as_bytes("l1"), container(q4.queue_ref))
self.assertEqual(compat.as_bytes(""), container(v5))
self.assertEqual(compat.as_bytes(""), container(q5.queue_ref))
class CondV2ColocationGroupAndDeviceTest(test.TestCase):
def testColocateWithBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn, fn).eval(), 3)
def fn2():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
def testColocateWithInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn2():
with ops.colocate_with(b.op):
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant([2.0], name="d")
self.assertEqual([b"loc:@a"], d.op.colocation_groups())
def testColocateWithInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.device("/device:CPU:1"):
b = constant_op.constant([2.0], name="b")
def fn():
with ops.colocate_with(b.op):
c = math_ops.add(a, a, name="c")
return c
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
# We expect there to be two partitions because of the
# colocate_with. We are only running the cond, which has a data
# dependency on `a` but not on `b`. So, without the colocate_with
# we would expect execution on just one device.
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def testDeviceBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
def fn():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
with ops.device("/device:CPU:0"):
self.assertIn(
compat.as_bytes("CPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn, fn)))
def fn2():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
if test_util.is_gpu_available():
with ops.device("/device:GPU:0"):
self.assertIn(
compat.as_bytes("GPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn2, fn2)))
else:
self.skipTest("Test requires a GPU to check GPU device placement.")
def testDeviceInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2})):
def fn2():
with ops.device("/device:CPU:1"):
c = constant_op.constant(3.0)
self.assertEqual("/device:CPU:1", c.op.device)
return c
with ops.device("/device:CPU:0"):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant(4.0)
self.assertEqual("/device:CPU:0", d.op.device)
def testDeviceInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
def fn():
with ops.device("/device:CPU:1"):
c = math_ops.add(a, a, name="c")
return c
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def _cond(pred, true_fn, false_fn, name):
if _is_old_cond():
return control_flow_ops.cond(pred, true_fn, false_fn, name=name)
else:
return cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
def _is_old_cond():
return isinstance(ops.get_default_graph()._get_control_flow_context(),
control_flow_ops.CondContext)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/cond_v2_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class InverseOpTest(test.TestCase):
def _verifyInverse(self, x, np_type):
for adjoint in False, True:
y = x.astype(np_type)
with self.cached_session(use_gpu=True):
# Verify that x^{-1} * x == Identity matrix.
inv = linalg_ops.matrix_inverse(y, adjoint=adjoint)
tf_ans = math_ops.matmul(inv, y, adjoint_b=adjoint)
np_ans = np.identity(y.shape[-1])
if x.ndim > 2:
tiling = list(y.shape)
tiling[-2:] = [1, 1]
np_ans = np.tile(np_ans, tiling)
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-3)
self.assertShapeEqual(y, tf_ans)
def _verifyInverseReal(self, x):
for np_type in [np.float32, np.float64]:
self._verifyInverse(x, np_type)
def _verifyInverseComplex(self, x):
for np_type in [np.complex64, np.complex128]:
self._verifyInverse(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0),
np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
def testNonsymmetric(self):
# 2x2 matrices
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
# Complex
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyInverseComplex(matrix1)
self._verifyInverseComplex(matrix2)
# Complex batch
self._verifyInverseComplex(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefinite(self):
# 2x2 matrices
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
# Complex
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyInverseComplex(matrix1)
self._verifyInverseComplex(matrix2)
# Complex batch
self._verifyInverseComplex(self._makeBatch(matrix1, matrix2))
@test_util.deprecated_graph_mode_only
def testNonSquareMatrix(self):
# When the inverse of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
linalg_ops.matrix_inverse(np.array([[1., 2., 3.], [3., 4., 5.]]))
@test_util.deprecated_graph_mode_only
def testWrongDimensions(self):
# The input to the inverse should be at least a 2-dimensional tensor.
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.matrix_inverse(tensor3)
def testNotInvertible(self):
# The input should be invertible.
with self.cached_session():
with self.assertRaisesOpError("Input is not invertible."):
# All rows of the matrix below add to zero.
tensor3 = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
linalg_ops.matrix_inverse(tensor3).eval()
def testEmpty(self):
self._verifyInverseReal(np.empty([0, 2, 2]))
self._verifyInverseReal(np.empty([2, 0, 0]))
def testRandomSmallAndLarge(self):
np.random.seed(42)
for dtype in np.float32, np.float64, np.complex64, np.complex128:
for batch_dims in [(), (1,), (3,), (2, 2)]:
for size in 8, 31, 32:
shape = batch_dims + (size, size)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape)).reshape(shape).astype(dtype)
self._verifyInverseReal(matrix)
@test_util.deprecated_graph_mode_only
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
all_ops = []
for adjoint_ in True, False:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
inv1 = linalg_ops.matrix_inverse(matrix1, adjoint=adjoint_)
inv2 = linalg_ops.matrix_inverse(matrix2, adjoint=adjoint_)
all_ops += [inv1, inv2]
inv = self.evaluate(all_ops)
self.assertAllEqual(inv[0], inv[1])
self.assertAllEqual(inv[2], inv[3])
class MatrixInverseBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))
def benchmarkMatrixInverseOp(self):
for adjoint in False, True:
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(inv),
min_iters=25,
name="matrix_inverse_cpu_{shape}_adjoint_{adjoint}".format(
shape=shape, adjoint=adjoint))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix = self._GenerateMatrix(shape)
inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(inv),
min_iters=25,
name="matrix_inverse_gpu_{shape}_adjoint_{adjoint}".format(
shape=shape, adjoint=adjoint))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/matrix_inverse_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for IdentityNOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class IdentityNOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testInt32String_6(self):
with self.cached_session() as sess:
[value0, value1] = sess.run(
array_ops.identity_n([[1, 2, 3, 4, 5, 6],
[b"a", b"b", b"C", b"d", b"E", b"f", b"g"]]))
self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), value0)
self.assertAllEqual(
np.array([b"a", b"b", b"C", b"d", b"E", b"f", b"g"]), value1)
@test_util.run_deprecated_v1
def testInt32_shapes(self):
with self.cached_session() as sess:
inp0 = constant_op.constant([10, 20, 30, 40, 50, 60], shape=[2, 3])
inp1 = constant_op.constant([11, 21, 31, 41, 51, 61], shape=[3, 2])
inp2 = constant_op.constant(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], shape=[5, 3])
[value0, value1,
value2] = sess.run(array_ops.identity_n([inp0, inp1, inp2]))
self.assertAllEqual(np.array([[10, 20, 30], [40, 50, 60]]), value0)
self.assertAllEqual(np.array([[11, 21], [31, 41], [51, 61]]), value1)
self.assertAllEqual(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]),
value2)
@test_util.run_deprecated_v1
def testString(self):
source = [b"A", b"b", b"C", b"d", b"E", b"f"]
with self.cached_session() as sess:
[value] = sess.run(array_ops.identity_n([source]))
self.assertAllEqual(source, value)
def testIdentityShape(self):
with self.cached_session():
shape = [2, 3]
array_2x3 = [[1, 2, 3], [6, 5, 4]]
tensor = constant_op.constant(array_2x3)
self.assertEquals(shape, tensor.get_shape())
self.assertEquals(shape, array_ops.identity_n([tensor])[0].get_shape())
self.assertEquals(shape, array_ops.identity_n([array_2x3])[0].get_shape())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/identity_n_op_py_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for unary coefficient-wise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
_NEG = lambda x: -x
_ABS = abs
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
def _default_tolerance(dtype):
"""Returns a sensible default tolerance for comparing results of a given type.
Args:
dtype: A datatype.
"""
if dtype == np.float16:
return 5e-3
elif dtype in (np.float32, np.complex64):
return 1e-3
elif dtype in (np.float64, np.complex128):
return 1e-5
else:
return None # Fail fast for unexpected types
class UnaryOpTest(test.TestCase):
def _compareCpu(self, x, np_func, tf_func, grad_rtol=None, grad_atol=None):
if grad_rtol is None:
grad_rtol = _default_tolerance(x.dtype)
if grad_atol is None:
grad_atol = _default_tolerance(x.dtype)
np_ans = np_func(x)
with self.cached_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
if x.dtype in (np.float32, np.float64,
dtypes_lib.bfloat16.as_numpy_dtype):
y = 1.1 * tf_func(inx)
np_ans *= 1.1
else:
y = tf_func(inx)
tf_cpu = self.evaluate(y)
self.assertShapeEqual(np_ans, y)
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_cpu, rtol=1e-3, atol=1e-3)
elif x.dtype == dtypes_lib.bfloat16.as_numpy_dtype:
self.assertAllClose(np_ans, tf_cpu, rtol=1e-2, atol=1e-2)
else:
self.assertAllClose(np_ans, tf_cpu)
if x.dtype in (np.complex64, np.complex128) and tf_func == math_ops.sign:
return # Return early
if x.dtype == np.float16:
s = list(np.shape(x))
jacob_t, _ = gradient_checker.compute_gradient(
inx, s, y, s, x_init_value=x)
xf = x.astype(np.float)
inxf = ops.convert_to_tensor(xf)
yf = tf_func(inxf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, yf, s, x_init_value=xf, delta=1e-2)
jacob_n = jacob_n.astype(np.float16)
self.assertAllClose(jacob_t, jacob_n, rtol=grad_rtol, atol=grad_atol)
elif x.dtype in (np.float32, np.complex64):
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, y, s, x_init_value=x, delta=1e-3)
self.assertAllClose(jacob_t, jacob_n, rtol=grad_rtol, atol=grad_atol)
elif x.dtype in (np.float64, np.complex128):
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, y, s, x_init_value=x, delta=1e-5)
self.assertAllClose(jacob_t, jacob_n, rtol=grad_rtol, atol=grad_atol)
def _check(self, result_tensor, result_np, input_sp_t, tol):
self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
self.assertAllEqual(input_sp_t.indices, result_tensor.indices)
self.assertAllEqual(input_sp_t.dense_shape, result_tensor.dense_shape)
if tol is None:
self.assertAllClose(result_np, result_tensor.values)
else:
self.assertAllClose(result_np, result_tensor.values, rtol=tol, atol=tol)
def _compareSparseCpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with test_util.force_cpu():
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareGpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with test_util.use_gpu():
result = tf_func(ops.convert_to_tensor(x))
tf_gpu = self.evaluate(result)
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_gpu, rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(np_ans, tf_gpu)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareSparseGpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with test_util.use_gpu():
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareBoth(self, x, np_func, tf_func):
self._compareCpu(x, np_func, tf_func)
self._compareGpu(x, np_func, tf_func)
def _compareBothSparse(self, x, np_func, tf_func, tol=None):
self._compareSparseCpu(x, np_func, tf_func, tol)
self._compareSparseGpu(x, np_func, tf_func, tol)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _sigmoid(self, x):
return 1.0 / (1.0 + np.exp(-x))
def _log_sigmoid(self, x):
return np.log(self._sigmoid(x))
def _replace_domain_error_with_inf(self, fn):
def func(x):
try:
return fn(x)
except ValueError as e:
if "domain error" in str(e):
return np.inf * np.ones_like(x)
else:
raise e
return func
@test_util.run_deprecated_v1
def testFloatBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
w = x - x.min() + 1.02 # all greater than 1
y = (x + .5).astype(np.float32) # no zero
z = (x + 15.5).astype(np.float32) # all positive
k = np.arange(-0.90, 0.90, 0.25).astype(np.float32) # between -1 and 1
self._compareBoth(x, np.abs, math_ops.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, math_ops.negative)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, math_ops.reciprocal)
self._compareBoth(x, np.square, math_ops.square)
self._compareBoth(z, np.sqrt, math_ops.sqrt)
self._compareBoth(z, self._rsqrt, math_ops.rsqrt)
self._compareBoth(x, np.exp, math_ops.exp)
self._compareBoth(x, np.expm1, math_ops.expm1)
self._compareBoth(z, np.log, math_ops.log)
self._compareBoth(z, np.log1p, math_ops.log1p)
self._compareBoth(x, np.sinh, math_ops.sinh)
self._compareBoth(x, np.cosh, math_ops.cosh)
self._compareBoth(x, np.tanh, math_ops.tanh)
self._compareBoth(x, np.arcsinh, math_ops.asinh)
self._compareBoth(w, np.arccosh, math_ops.acosh)
self._compareBoth(k, np.arctanh, math_ops.atanh)
self._compareBoth(x, self._sigmoid, math_ops.sigmoid)
self._compareBoth(x, self._log_sigmoid, math_ops.log_sigmoid)
self._compareBoth(y, np.sign, math_ops.sign)
self._compareBoth(x, np.sin, math_ops.sin)
self._compareBoth(x, np.cos, math_ops.cos)
self._compareBoth(k, np.arcsin, math_ops.asin)
self._compareBoth(k, np.arccos, math_ops.acos)
self._compareBoth(x, np.arctan, math_ops.atan)
self._compareBoth(x, np.tan, math_ops.tan)
self._compareBoth(
y, np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
math_ops.lgamma)
self._compareBoth(x, np.vectorize(math.erf), math_ops.erf)
self._compareBoth(x, np.vectorize(math.erfc), math_ops.erfc)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self._compareBoth(x, special.i0e, math_ops.bessel_i0e)
self._compareBoth(x, special.i1e, math_ops.bessel_i1e)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
self._compareBothSparse(x, np.abs, math_ops.abs)
self._compareBothSparse(x, np.negative, math_ops.negative)
self._compareBothSparse(x, np.square, math_ops.square)
self._compareBothSparse(z, np.sqrt, math_ops.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, math_ops.tanh)
self._compareBothSparse(y, np.sign, math_ops.sign)
self._compareBothSparse(x, np.vectorize(math.erf), math_ops.erf)
@test_util.run_deprecated_v1
def testFloatTanhEdge(self):
x = np.arange(40, 40 + 6).reshape(6).astype(np.float32)
self._compareBoth(x, np.tanh, math_ops.tanh)
x = np.arange(-40, -40 + 6).reshape(6).astype(np.float32)
self._compareBoth(x, np.tanh, math_ops.tanh)
@test_util.run_deprecated_v1
def testFloatEmpty(self):
x = np.empty((2, 0, 5), dtype=np.float32)
self._compareBoth(x, np.abs, math_ops.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, math_ops.negative)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(x, self._inv, math_ops.reciprocal)
self._compareBoth(x, np.square, math_ops.square)
self._compareBoth(x, np.sqrt, math_ops.sqrt)
self._compareBoth(x, self._rsqrt, math_ops.rsqrt)
self._compareBoth(x, np.exp, math_ops.exp)
self._compareBoth(x, np.expm1, math_ops.expm1)
self._compareBoth(x, np.log, math_ops.log)
self._compareBoth(x, np.log1p, math_ops.log1p)
self._compareBoth(x, np.sinh, math_ops.sinh)
self._compareBoth(x, np.arcsinh, math_ops.asinh)
self._compareBoth(x, np.cosh, math_ops.cosh)
self._compareBoth(x, np.tanh, math_ops.tanh)
self._compareBoth(x, self._sigmoid, math_ops.sigmoid)
self._compareBoth(x, np.sign, math_ops.sign)
self._compareBoth(x, np.sin, math_ops.sin)
self._compareBoth(x, np.cos, math_ops.cos)
# Can't use vectorize below, so just use some arbitrary function
self._compareBoth(x, np.sign, math_ops.lgamma)
self._compareBoth(x, np.sign, math_ops.erf)
self._compareBoth(x, np.sign, math_ops.erfc)
self._compareBoth(x, np.tan, math_ops.tan)
self._compareBoth(x, np.arcsin, math_ops.asin)
self._compareBoth(x, np.arccos, math_ops.acos)
self._compareBoth(x, np.arctan, math_ops.atan)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self._compareBoth(x, special.i0e, math_ops.bessel_i0e)
self._compareBoth(x, special.i1e, math_ops.bessel_i1e)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
self._compareBothSparse(x, np.abs, math_ops.abs)
self._compareBothSparse(x, np.negative, math_ops.negative)
self._compareBothSparse(x, np.square, math_ops.square)
self._compareBothSparse(x, np.sqrt, math_ops.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, math_ops.tanh)
self._compareBothSparse(x, np.sign, math_ops.sign)
self._compareBothSparse(x, np.sign, math_ops.erf)
@test_util.run_deprecated_v1
def testDoubleBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)
w = x - x.min() + 1.02 # all greater than 1
y = (x + .5).astype(np.float64) # no zero
z = (x + 15.5).astype(np.float64) # all positive
k = np.arange(-0.90, 0.90,
0.35).reshape(1, 3, 2).astype(np.float64) # between -1 and 1
self._compareBoth(x, np.abs, math_ops.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, math_ops.negative)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, math_ops.reciprocal)
self._compareBoth(x, np.square, math_ops.square)
self._compareBoth(z, np.sqrt, math_ops.sqrt)
self._compareBoth(z, self._rsqrt, math_ops.rsqrt)
self._compareBoth(x, np.exp, math_ops.exp)
self._compareBoth(x, np.expm1, math_ops.expm1)
self._compareBoth(z, np.log, math_ops.log)
self._compareBoth(z, np.log1p, math_ops.log1p)
self._compareBoth(x, np.sinh, math_ops.sinh)
self._compareBoth(x, np.cosh, math_ops.cosh)
self._compareBoth(x, np.tanh, math_ops.tanh)
self._compareBoth(x, np.arcsinh, math_ops.asinh)
self._compareBoth(w, np.arccosh, math_ops.acosh)
self._compareBoth(k, np.arctanh, math_ops.atanh)
self._compareBoth(x, self._sigmoid, math_ops.sigmoid)
self._compareBoth(y, np.sign, math_ops.sign)
self._compareBoth(x, np.sin, math_ops.sin)
self._compareBoth(x, np.cos, math_ops.cos)
self._compareBoth(
y, np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
math_ops.lgamma)
self._compareBoth(x, np.vectorize(math.erf), math_ops.erf)
self._compareBoth(x, np.vectorize(math.erfc), math_ops.erfc)
self._compareBoth(x, np.arctan, math_ops.atan)
self._compareBoth(k, np.arcsin, math_ops.asin)
self._compareBoth(k, np.arccos, math_ops.acos)
self._compareBoth(k, np.tan, math_ops.tan)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self._compareBoth(x, special.i0e, math_ops.bessel_i0e)
self._compareBoth(x, special.i1e, math_ops.bessel_i1e)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
self._compareBothSparse(x, np.abs, math_ops.abs)
self._compareBothSparse(x, np.negative, math_ops.negative)
self._compareBothSparse(x, np.square, math_ops.square)
self._compareBothSparse(z, np.sqrt, math_ops.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, math_ops.tanh)
self._compareBothSparse(y, np.sign, math_ops.sign)
self._compareBothSparse(x, np.vectorize(math.erf), math_ops.erf)
@test_util.run_deprecated_v1
def testHalfBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float16)
y = (x + .5).astype(np.float16) # no zero
z = (x + 15.5).astype(np.float16) # all positive
self._compareBoth(x, np.abs, math_ops.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, math_ops.negative)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, math_ops.reciprocal)
self._compareBoth(x, np.square, math_ops.square)
self._compareBoth(z, np.sqrt, math_ops.sqrt)
self._compareBoth(z, self._rsqrt, math_ops.rsqrt)
self._compareBoth(x, np.exp, math_ops.exp)
self._compareBoth(x, np.expm1, math_ops.expm1)
self._compareBoth(z, np.log, math_ops.log)
self._compareBoth(z, np.log1p, math_ops.log1p)
self._compareBoth(x, np.tanh, math_ops.tanh)
self._compareBoth(x, self._sigmoid, math_ops.sigmoid)
self._compareBoth(y, np.sign, math_ops.sign)
self._compareBoth(x, np.sin, math_ops.sin)
self._compareBoth(x, np.cos, math_ops.cos)
self._compareBoth(
y, np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
math_ops.lgamma)
self._compareBoth(x, np.vectorize(math.erf), math_ops.erf)
self._compareBoth(x, np.vectorize(math.erfc), math_ops.erfc)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self._compareBoth(x, special.i0e, math_ops.bessel_i0e)
self._compareBoth(x, special.i1e, math_ops.bessel_i1e)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
self._compareBothSparse(x, np.abs, math_ops.abs)
self._compareBothSparse(x, np.negative, math_ops.negative)
self._compareBothSparse(x, np.square, math_ops.square)
self._compareBothSparse(z, np.sqrt, math_ops.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, math_ops.tanh)
self._compareBothSparse(y, np.sign, math_ops.sign)
self._compareBothSparse(x, np.vectorize(math.erf), math_ops.erf, tol=1e-3)
def testInt32Basic(self):
x = np.arange(-6, 6, 2).reshape(1, 3, 2).astype(np.int32)
self._compareCpu(x, np.abs, math_ops.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareBoth(x, np.negative, math_ops.negative)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(x, np.square, math_ops.square)
self._compareCpu(x, np.sign, math_ops.sign)
self._compareBothSparse(x, np.abs, math_ops.abs)
self._compareBothSparse(x, np.negative, math_ops.negative)
self._compareBothSparse(x, np.square, math_ops.square)
self._compareBothSparse(x, np.sign, math_ops.sign)
def testInt64Basic(self):
x = np.arange(-6 << 40, 6 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
self._compareCpu(x, np.abs, math_ops.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, math_ops.negative)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(x, np.sign, math_ops.sign)
self._compareBothSparse(x, np.abs, math_ops.abs)
self._compareBothSparse(x, np.negative, math_ops.negative)
self._compareBothSparse(x, np.sign, math_ops.sign)
def testInt64Square(self):
x = np.arange(-6 << 20, 6 << 20, 2 << 20).reshape(1, 3, 2).astype(np.int64)
self._compareCpu(x, np.square, math_ops.square)
self._compareBothSparse(x, np.square, math_ops.square)
@test_util.run_deprecated_v1
def testComplex64Basic(self):
x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
np.complex64)
y = x + np.complex(0.5, 0.5) # no zeros
self._compareBoth(x, np.abs, math_ops.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, math_ops.negative)
self._compareBoth(x, np.negative, _NEG)
self._compareCpu(y, self._inv, math_ops.reciprocal)
self._compareCpu(x, np.square, math_ops.square)
self._compareCpu(y, np.sqrt, math_ops.sqrt)
self._compareCpu(y, self._rsqrt, math_ops.rsqrt)
self._compareBoth(x, np.exp, math_ops.exp)
self._compareCpu(x, np.expm1, math_ops.expm1)
self._compareCpu(y, np.log, math_ops.log)
self._compareCpu(y, np.log1p, math_ops.log1p)
self._compareCpu(x, np.sinh, math_ops.sinh)
self._compareCpu(x, np.cosh, math_ops.cosh)
self._compareCpu(x, np.tanh, math_ops.tanh)
# Complex64 versions of asinh() and acosh() in libstdc++ only have 6 digits
# of precision.
# Small gradient values + low precision --> High relative error
self._compareCpu(y, np.arcsinh, math_ops.asinh, grad_rtol=1e-2)
self._compareCpu(y, np.arccosh, math_ops.acosh, grad_rtol=1e-2)
self._compareCpu(y, np.arctanh, math_ops.atanh)
self._compareCpu(x, self._sigmoid, math_ops.sigmoid)
self._compareCpu(x, np.sin, math_ops.sin)
self._compareCpu(x, np.cos, math_ops.cos)
self._compareBothSparse(x, np.abs, math_ops.abs)
self._compareBothSparse(x, np.negative, math_ops.negative)
self._compareBothSparse(x, np.square, math_ops.square)
self._compareBothSparse(x, np.sqrt, math_ops.sqrt, 1e-3)
self._compareBothSparse(x, np.tanh, math_ops.tanh)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
return x / np.abs(x)
self._compareBoth(y, complex_sign, math_ops.sign)
self._compareBothSparse(y, complex_sign, math_ops.sign)
@test_util.run_deprecated_v1
def testComplex128Basic(self):
x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
np.complex128)
y = x + np.complex(0.5, 0.5) # no zeros
self._compareBoth(x, np.abs, math_ops.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, math_ops.negative)
self._compareBoth(x, np.negative, _NEG)
self._compareCpu(y, self._inv, math_ops.reciprocal)
self._compareCpu(x, np.square, math_ops.square)
self._compareCpu(y, np.sqrt, math_ops.sqrt)
self._compareCpu(y, self._rsqrt, math_ops.rsqrt)
self._compareBoth(x, np.exp, math_ops.exp)
self._compareCpu(x, np.expm1, math_ops.expm1)
self._compareCpu(y, np.log, math_ops.log)
self._compareCpu(y, np.log1p, math_ops.log1p)
self._compareCpu(x, np.sinh, math_ops.sinh)
self._compareCpu(x, np.cosh, math_ops.cosh)
self._compareCpu(x, np.tanh, math_ops.tanh)
self._compareCpu(y, np.arcsinh, math_ops.asinh)
self._compareCpu(y, np.arccosh, math_ops.acosh)
self._compareCpu(y, np.arctanh, math_ops.atanh)
self._compareCpu(x, self._sigmoid, math_ops.sigmoid)
self._compareCpu(x, np.sin, math_ops.sin)
self._compareCpu(x, np.cos, math_ops.cos)
self._compareBothSparse(x, np.abs, math_ops.abs)
self._compareBothSparse(x, np.negative, math_ops.negative)
self._compareBothSparse(x, np.square, math_ops.square)
self._compareBothSparse(x, np.sqrt, math_ops.sqrt, 1e-3)
self._compareBothSparse(x, np.tanh, math_ops.tanh)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
return x / np.abs(x)
self._compareBoth(y, complex_sign, math_ops.sign)
self._compareBothSparse(y, complex_sign, math_ops.sign)
@test_util.run_deprecated_v1
def testGradGrad(self):
np.random.seed(7)
shape = (5,)
dtype_tols = [(np.float32, 5e-4), (np.float64, 1e-6), (np.complex64, 5e-4),
(np.complex128, 1e-6)]
op_range = [
(gen_math_ops.reciprocal_grad, [-2, 2]),
(gen_math_ops.rsqrt_grad, [0.1, 3]),
(gen_math_ops.sigmoid_grad, [-2, 2]),
(gen_math_ops.sqrt_grad, [0.1, 3]),
(gen_math_ops.tanh_grad, [-2, 2]),
]
def rand(dtype, real_range):
x = np.random.uniform(
real_range[0], real_range[1], size=shape[0]).astype(dtype)
if dtype in (np.complex64, np.complex128):
x += 1j * np.random.uniform(-2, 2, size=shape[0]).astype(dtype)
return x
for op, real_range in op_range:
with self.cached_session():
for dtype, tol in dtype_tols:
x = constant_op.constant(rand(dtype, real_range))
y = constant_op.constant(rand(dtype, real_range))
z = op(x, y)
grads = gradient_checker.compute_gradient(
[x, y], [shape, shape],
z,
shape,
x_init_value=[rand(dtype, real_range),
rand(dtype, real_range)])
if isinstance(grads, tuple):
grads = [grads]
for analytical, numerical in grads:
self.assertAllClose(analytical, numerical, rtol=tol, atol=tol)
@test_util.run_in_graph_and_eager_modes
def testComplexAbsGradGrad(self):
def f(x):
real = math_ops.cos(x)
imag = ops.convert_to_tensor(1.)
return math_ops.abs(math_ops.complex(real, imag))
def g(x):
with backprop.GradientTape() as t:
t.watch(x)
y = f(x)
return t.gradient(y, x)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(g, [ops.convert_to_tensor(2.0)]))
self.assertLess(err, 1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/cwise_ops_unary_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StringToHashBucket op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringToHashBucketOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testStringToOneHashBucketFast(self):
with self.cached_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket_fast(input_string, 1)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
self.assertAllEqual([0, 0, 0], result)
@test_util.run_deprecated_v1
def testStringToHashBucketsFast(self):
with self.cached_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket_fast(input_string, 10)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c', 'd']})
# Fingerprint64('a') -> 12917804110809363939 -> mod 10 -> 9
# Fingerprint64('b') -> 11795596070477164822 -> mod 10 -> 2
# Fingerprint64('c') -> 11430444447143000872 -> mod 10 -> 2
# Fingerprint64('d') -> 4470636696479570465 -> mod 10 -> 5
self.assertAllEqual([9, 2, 2, 5], result)
@test_util.run_deprecated_v1
def testStringToOneHashBucketLegacyHash(self):
with self.cached_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket(input_string, 1)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
self.assertAllEqual([0, 0, 0], result)
@test_util.run_deprecated_v1
def testStringToHashBucketsLegacyHash(self):
with self.cached_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket(input_string, 10)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
# Hash64('a') -> 2996632905371535868 -> mod 10 -> 8
# Hash64('b') -> 5795986006276551370 -> mod 10 -> 0
# Hash64('c') -> 14899841994519054197 -> mod 10 -> 7
self.assertAllEqual([8, 0, 7], result)
def testStringToOneHashBucketStrongOneHashBucket(self):
with self.cached_session():
input_string = constant_op.constant(['a', 'b', 'c'])
output = string_ops.string_to_hash_bucket_strong(
input_string, 1, key=[123, 345])
self.assertAllEqual([0, 0, 0], self.evaluate(output))
def testStringToHashBucketsStrong(self):
with self.cached_session():
input_string = constant_op.constant(['a', 'b', 'c'])
output = string_ops.string_to_hash_bucket_strong(
input_string, 10, key=[98765, 132])
# key = [98765, 132]
# StrongKeyedHash(key, 'a') -> 7157389809176466784 -> mod 10 -> 4
# StrongKeyedHash(key, 'b') -> 15805638358933211562 -> mod 10 -> 2
# StrongKeyedHash(key, 'c') -> 18100027895074076528 -> mod 10 -> 8
self.assertAllEqual([4, 2, 8], self.evaluate(output))
def testStringToHashBucketsStrongInvalidKey(self):
with self.cached_session():
input_string = constant_op.constant(['a', 'b', 'c'])
with self.assertRaisesOpError('Key must have 2 elements'):
string_ops.string_to_hash_bucket_strong(
input_string, 10, key=[98765]).eval()
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/string_to_hash_bucket_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for while_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops import while_v2
from tensorflow.python.ops.control_flow_ops import while_loop as while_loop_v1
from tensorflow.python.ops.while_v2 import while_loop as while_loop_v2
from tensorflow.python.platform import test
class WhileV2Test(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def testSingleLoopVar(self):
x = constant_op.constant(2.)
ret = while_loop_v2(
lambda v: v < 8., lambda v: v * v, [x], return_same_structure=False)
grad = gradients_impl.gradients(ret, [x])
with self.cached_session() as sess:
self.assertEqual(self.evaluate(ret), 16.)
self.assertSequenceEqual(self.evaluate(grad), [32.])
@test_util.run_v1_only("b/120545219")
def testReturnSameStructureTrue(self):
x = constant_op.constant(2.)
ret = while_loop_v2(
lambda v: v < 8., lambda v: v * v, [x], return_same_structure=True)
grad = gradients_impl.gradients(ret, [x])
with self.cached_session() as sess:
eval_result = sess.run(ret)
self.assertIsInstance(eval_result, list)
self.assertLen(eval_result, 1)
self.assertEqual(16., eval_result[0])
self.assertSequenceEqual(sess.run(grad), [32.])
def testVerifyInputOutputTypesMatch(self):
@def_function.function
def BuildWhile():
x = constant_op.constant(1., dtypes.float32)
def Body(x):
return math_ops.cast(x, dtypes.float16) + 1
while_loop_v2(lambda x: x < 10, Body, [x])
with self.assertRaisesRegexp(
TypeError,
r"Loop var Const:0 enters the loop with type <dtype: 'float32'> "
r"but has type <dtype: 'float16'> after 1 iteration."):
BuildWhile()
def testGradientTapeResourceVariable(self):
with context.eager_mode():
v = variables.Variable(1.)
@def_function.function
def fnWithLoop(): # pylint: disable=invalid-name
with backprop.GradientTape() as tape:
_, x = while_loop_v2(
lambda i, _: i < 2,
lambda i, x: (i + 1, x * v),
[0, 2.])
return tape.gradient(x, v)
self.assertAllEqual(fnWithLoop(), 4.0)
def testExternalControlDependencies(self):
with ops.Graph().as_default(), self.test_session():
v = variables.Variable(1.)
v.initializer.run()
op = v.assign_add(1.)
def body_fn(i): # pylint: disable=invalid-name
with ops.control_dependencies([op]):
return i + 1
loop = while_loop_v2(lambda i: i < 1, body_fn, [0])
loop[0].op.run()
self.assertAllEqual(self.evaluate(v), 2.0)
@test_util.run_deprecated_v1
def testMultipleLoopVarsBasic(self):
x = constant_op.constant(5.)
y = constant_op.constant(3.)
# x = 5.
# y = 3.
# while x < 45.:
# x = x * y
ret = while_loop_v2(
lambda v, _: v < 45.,
lambda v, w: (v * w, w), [x, y],
return_same_structure=False)
# ret = [x*y^2, y]
# Note: This is simply d_ret[0]/d_x since d_ret[1]/d_x is 0.
grad = gradients_impl.gradients(ret, [x]) # [2*x*y]
with self.cached_session() as sess:
self.assertSequenceEqual(self.evaluate(ret), [45., 3.])
self.assertSequenceEqual(self.evaluate(grad), [9.])
@test_util.run_deprecated_v1
def testMultipleLoopVars(self):
x = constant_op.constant(5.)
y = constant_op.constant(3.)
# x = 5.
# y = 3.
# while x < 45.:
# x = x * y
# y = x + y
ret = while_loop_v2(
lambda v, _: v < 45.,
lambda v, w: (v * w, v + w), [x, y],
return_same_structure=False)
# ret = [y*x**2 + x*y**2, x*y + x + y]
gradx_0 = gradients_impl.gradients(ret[0], [x]) # [2*x*y + y**2]
gradx_1 = gradients_impl.gradients(ret[1], [x]) # [y + 1]
gradx_2 = gradients_impl.gradients(ret, [x]) # [2*x*y + y**2 + 2*y + 1]
grady_0 = gradients_impl.gradients(ret[0], [y]) # [2*x*y + x**2]
grady_1 = gradients_impl.gradients(ret[1], [y]) # [x + 1]
grady_2 = gradients_impl.gradients(ret, [y]) # [2*x*y + x**2 + x + 1]
with self.cached_session() as sess:
self.assertSequenceEqual(self.evaluate(ret), [120., 23.])
self.assertSequenceEqual(self.evaluate(gradx_0), [39.])
self.assertSequenceEqual(self.evaluate(gradx_1), [4.])
self.assertSequenceEqual(self.evaluate(gradx_2), [43.])
self.assertSequenceEqual(self.evaluate(grady_0), [55.])
self.assertSequenceEqual(self.evaluate(grady_1), [6.])
self.assertSequenceEqual(self.evaluate(grady_2), [61.])
@test_util.run_deprecated_v1
def testGradientTape(self):
with backprop.GradientTape() as t:
x = constant_op.constant(2.)
t.watch(x)
ret = while_loop_v2(
lambda v: v < 4., lambda v: v * v, [x],
return_same_structure=False) # x**2
grad = t.gradient(ret, x)
with self.cached_session() as sess:
self.assertAllEqual(sess.run(grad), 4.0)
@test_util.run_deprecated_v1
def testMultipleWhileLoops(self):
x = constant_op.constant(2.)
ret1 = while_loop_v2(
lambda v: v < 4., lambda v: v * v, [x],
return_same_structure=False) # x**2
ret2 = while_loop_v2(
lambda v: v < 16., lambda v: v * v, [ret1],
return_same_structure=False) # x**4
grad = gradients_impl.gradients(ret2, [x]) # 4x**3
grad_grad = gradients_impl.gradients(grad, [x]) # 12x**2
with self.cached_session() as sess:
self.assertSequenceEqual(self.evaluate(grad), [32.])
self.assertSequenceEqual(self.evaluate(grad_grad), [48.])
@test_util.run_deprecated_v1
def testDoubleDerivative(self):
x = constant_op.constant(2.)
ret = while_loop_v2(
lambda v: v < 8., lambda v: v**2, [x],
return_same_structure=False) # x**4
grad = gradients_impl.gradients(ret, [x]) # 4x**3
grad_grad = gradients_impl.gradients(grad, [x]) # 12x**2
with self.cached_session() as sess:
self.assertEqual(self.evaluate(ret), 16.)
self.assertSequenceEqual(self.evaluate(grad), [32.])
self.assertSequenceEqual(self.evaluate(grad_grad), [48.])
@test_util.run_v1_only("b/120545219")
def testPruning(self):
x = constant_op.constant(1)
tensor_list = list_ops.empty_tensor_list(
element_dtype=x.dtype, element_shape=x.shape)
def Cond(x, tl):
del tl # Unused for Cond.
return x < 5
def Body(x, tl):
return x + 1, list_ops.tensor_list_push_back(tl, x)
outputs = while_loop_v1(Cond, Body, [x, tensor_list])
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(outputs[0])
def GetOptimizedGraph():
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL))
return tf_optimizer.OptimizeGraph(config, mg)
g = GetOptimizedGraph()
self.assertEqual(len([n for n in g.node if n.op == "Enter"]), 1)
stack = list_ops.tensor_list_stack(outputs[1], element_dtype=x.dtype)
train_op.append(stack)
g = GetOptimizedGraph()
self.assertEqual(len([n for n in g.node if n.op == "Enter"]), 2)
@test_util.run_deprecated_v1
def testCaptureExternalTensorInCond(self):
x = constant_op.constant(2.)
y = constant_op.constant(1.)
ret = while_loop_v2(
lambda v: v + y < 9.,
lambda v: v * 3., [x],
return_same_structure=False)
grad = gradients_impl.gradients(ret, [x])
with self.cached_session() as sess:
self.assertEqual(self.evaluate(ret), 18.)
self.assertSequenceEqual(self.evaluate(grad), [9.])
@test_util.run_deprecated_v1
def testCaptureExternalTensorInBody(self):
x = constant_op.constant(2.)
y = constant_op.constant(3.)
ret = while_loop_v2(
lambda v: v < 8., lambda v: v * y, [x], return_same_structure=False)
grad = gradients_impl.gradients(ret, [x])
with self.cached_session() as sess:
self.assertEqual(self.evaluate(ret), 18.)
self.assertSequenceEqual(self.evaluate(grad), [9.])
@test_util.run_deprecated_v1
def testLoopWithTensorListPushBack(self):
x = constant_op.constant(2.)
tensor_list = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=ScalarShape())
def Cond(x, tl):
del tl # Unused for Cond.
return x < 5.
def Body(x, tl):
tl = list_ops.tensor_list_push_back(tl, x)
tl = list_ops.tensor_list_push_back(tl, constant_op.constant(100.))
return x**2., tl
ret = while_loop_v2(
Cond, Body, [x, tensor_list], return_same_structure=False)
grad = gradients_impl.gradients(ret[0], x)
with self.cached_session() as sess:
self.assertEqual(sess.run(ret[0]), 16.)
self.assertSequenceEqual(self.evaluate(grad), [32.])
@test_util.run_deprecated_v1
def testDuplicateAccumulator(self):
x = constant_op.constant(2.)
tensor_list = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=ScalarShape())
def Cond(x, tl):
del tl # Unused for Cond.
return x < 5.
def Body(x, tl):
# There is an accumulator in the loop already so we should not add
# another.
tl = list_ops.tensor_list_push_back(tl, x)
return x**2., tl
ret = while_loop_v2(
Cond, Body, [x, tensor_list], return_same_structure=False)
for op in ops.get_default_graph().get_operations():
if op.type == "While":
while_op = op
body_graph = while_v2._get_graph(while_op, "body")
x_input_index = [i for i, inp in enumerate(while_op.inputs) if inp == x][0]
x_input_t = body_graph.inputs[x_input_index]
accumulator_count = len(
[c for c in x_input_t.consumers() if c.type == "TensorListPushBack"])
self.assertEqual(accumulator_count, 1)
grad = gradients_impl.gradients(ret[0], x)
with self.cached_session() as sess:
self.assertEqual(sess.run(ret[0]), 16.)
self.assertSequenceEqual(self.evaluate(grad), [32.])
@parameterized.named_parameters(
("UnknownShape", None),
("PartiallyDefinedShape", [None, 2]),
("FullyDefinedShape", [1, 2]),
)
@test_util.run_deprecated_v1
def testAccumulatorElementShape(self, shape):
def MatchShape(actual_tensor_shape):
# Compare the shapes, treating None dimensions as equal. We do not
# directly check actual_tensor_shape and tf.TensorShape(shape) for
# equality because tf.Dimension.__eq__ returns None if either dimension is
# None.
if shape is None:
self.assertIsNone(actual_tensor_shape.dims)
else:
self.assertListEqual(actual_tensor_shape.as_list(), shape)
def GetAccumulatorForInputAtIndex(while_op, idx):
body_graph = while_v2._get_graph(while_op, "body")
y_input_t = body_graph.inputs[idx]
push_back_node = [c for c in y_input_t.consumers()
if c.type == "TensorListPushBack"][0]
output_idx = body_graph.outputs.index(push_back_node.outputs[0])
return while_op.outputs[output_idx]
x = array_ops.placeholder(dtype=dtypes.float32, shape=shape)
y = array_ops.placeholder(dtype=dtypes.float32, shape=shape)
# Forward pass.
ret = while_loop_v2(lambda v, u: v < 8.,
lambda v, u: (math_ops.pow(v, u), u),
[x, y],
return_same_structure=True)
while_op = ret[0].op.inputs[0].op
# Gradient pass.
grad = gradients_impl.gradients(ret[0], x)
# Note: There is an Identity b/w grad[0] and the While op.
grad_while_op = grad[0].op.inputs[0].op
# Get the TensorList output of While op containing the accumulated values
# of y.
x_input_index = [i for i, inp in enumerate(while_op.inputs) if x == inp][0]
output = GetAccumulatorForInputAtIndex(while_op, x_input_index)
_, val = list_ops.tensor_list_pop_back(output,
element_dtype=dtypes.float32)
MatchShape(val.shape)
# Take second derivative to generate intermediate grad_while_op outputs
gradients_impl.gradients(grad, x)
# Get the TensorList output of gradient While op containing the accumulated
# values of grad_x (note that grad_x is needed by the second derivative).
# grad_while_op.inputs:
grad_output_index = grad_while_op.outputs.index(grad[0].op.inputs[0])
grad_output = GetAccumulatorForInputAtIndex(grad_while_op,
grad_output_index)
_, val = list_ops.tensor_list_pop_back(grad_output,
element_dtype=dtypes.float32)
MatchShape(val.shape)
def _createWhile(self, name):
"""Helper function testDefaultName."""
output = while_v2.while_loop(
lambda i: i < 3,
lambda i: i + 1, [constant_op.constant(0)],
return_same_structure=False)
while_op = output.op.inputs[0].op
self.assertEqual(while_op.type, "While")
return while_op
def testDefaultName(self):
with ops.Graph().as_default():
while_op = self._createWhile(None)
self.assertEqual(while_op.name, "while")
self.assertRegexpMatches(
while_op.get_attr("cond").name, r"while_cond_\d*")
self.assertRegexpMatches(
while_op.get_attr("body").name, r"while_body_\d*")
with ops.Graph().as_default():
with ops.name_scope("foo"):
while1_op = self._createWhile("")
self.assertEqual(while1_op.name, "foo/while")
self.assertRegexpMatches(
while1_op.get_attr("cond").name, r"foo_while_cond_\d*")
self.assertRegexpMatches(
while1_op.get_attr("body").name, r"foo_while_body_\d*")
while2_op = self._createWhile(None)
self.assertEqual(while2_op.name, "foo/while_1")
self.assertRegexpMatches(
while2_op.get_attr("cond").name, r"foo_while_1_cond_\d*")
self.assertRegexpMatches(
while2_op.get_attr("body").name, r"foo_while_1_body_\d*")
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
def testWhileAndTensorArray(self):
param = constant_op.constant(2.0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
# map_fn uses TensorArray internally.
r = map_fn.map_fn(lambda x: math_ops.multiply(x, param), y0)
grad = gradients_impl.gradients(r, param)[0]
self.assertAllClose([2.0, 4.0, 6.0, 8.0, 10.0, 12.0], self.evaluate(r))
self.assertAllClose(21.0, self.evaluate(grad))
@test_util.run_deprecated_v1
def testNestedWhile(self):
# Compute sum of geometric progression: n^0 + n^1 + ... + n^m
# We compute the pow using a while loop.
n = constant_op.constant(3.)
m = constant_op.constant(5.)
sum_of_powers = constant_op.constant(0.)
def Body(i, previous_sum):
prod = constant_op.constant(1.)
return i - 1., previous_sum + while_loop_v2(
lambda c, _: c > 0,
lambda c, v: (c - 1., v * n), [i, prod],
return_same_structure=False)[1]
result = while_loop_v2(
lambda i, _: i >= 0,
Body, [m, sum_of_powers],
return_same_structure=False)[1]
grad = gradients_impl.gradients(result, [n])
with self.cached_session() as sess:
self.assertEqual(self.evaluate(result), 364.)
self.assertSequenceEqual(self.evaluate(grad), [547.])
@test_util.run_deprecated_v1
def testIdentityNodeInBody(self):
def Body(v):
v = array_ops.identity(v)
v = array_ops.identity(v)
return v * v
x = constant_op.constant(2.)
ret = while_loop_v2(
lambda v: v < 8., Body, [x], return_same_structure=False)
grad = gradients_impl.gradients(ret, [x])
with self.cached_session() as sess:
self.assertEqual(self.evaluate(ret), 16.)
self.assertSequenceEqual(self.evaluate(grad), [32.])
@test_util.run_deprecated_v1
def testForwardPassRewrite(self):
x = constant_op.constant(1.0, name="x")
output = while_v2.while_loop(lambda x: x < 10.0,
lambda x: x * 2.0,
[x])[0]
while_op = output.op.inputs[0].op
self.assertEqual(while_op.type, "While")
# outputs = [loop_counter, max_iters, x]
self.assertLen(while_op.outputs, 3)
gradients_impl.gradients(output, x)
# while_op should have been rewritten to output 2.0 intermediate.
# outputs = [loop_counter, max_iters, x, 2.0_accumulator, x_accumulator]
self.assertLen(while_op.outputs, 5)
gradients_impl.gradients(output, x)
# Computing the gradient again shouldn't rewrite while_op again.
self.assertLen(while_op.outputs, 5)
def ScalarShape():
return ops.convert_to_tensor([], dtype=dtypes.int32)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/while_v2_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Assign*."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AssignOpTest(test.TestCase):
def _initAssignFetch(self, x, y, use_gpu=False):
"""Initialize a param to init and update it with y."""
super(AssignOpTest, self).setUp()
with self.cached_session(use_gpu=use_gpu):
p = variables.Variable(x)
assign = state_ops.assign(p, y)
p.initializer.run()
new_value = self.evaluate(assign)
return self.evaluate(p), new_value
def _initAssignAddFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param += y."""
with self.cached_session(use_gpu=use_gpu):
p = variables.Variable(x)
add = state_ops.assign_add(p, y)
p.initializer.run()
new_value = self.evaluate(add)
return self.evaluate(p), new_value
def _initAssignSubFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param -= y."""
with self.cached_session(use_gpu=use_gpu):
p = variables.Variable(x)
sub = state_ops.assign_sub(p, y)
p.initializer.run()
new_value = self.evaluate(sub)
return self.evaluate(p), new_value
def _testTypes(self, vals):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
x = np.zeros(vals.shape).astype(dtype)
y = vals.astype(dtype)
var_value, op_value = self._initAssignFetch(x, y, use_gpu=False)
self.assertAllEqual(y, var_value)
self.assertAllEqual(y, op_value)
var_value, op_value = self._initAssignAddFetch(x, y, use_gpu=False)
self.assertAllEqual(x + y, var_value)
self.assertAllEqual(x + y, op_value)
var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
self.assertAllEqual(x - y, var_value)
self.assertAllEqual(x - y, op_value)
if test.is_built_with_cuda() and dtype in [np.float32, np.float64]:
var_value, op_value = self._initAssignFetch(x, y, use_gpu=True)
self.assertAllEqual(y, var_value)
self.assertAllEqual(y, op_value)
var_value, op_value = self._initAssignAddFetch(x, y, use_gpu=True)
self.assertAllEqual(x + y, var_value)
self.assertAllEqual(x + y, op_value)
var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
self.assertAllEqual(x - y, var_value)
self.assertAllEqual(x - y, op_value)
@test_util.run_deprecated_v1
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
@test_util.run_v1_only("b/120545219")
def testAssignNonStrictShapeChecking(self):
with self.cached_session():
data = array_ops.fill([1024, 1024], 0)
p = variables.VariableV1([1])
a = state_ops.assign(p, data, validate_shape=False)
a.op.run()
self.assertAllEqual(p.eval(), self.evaluate(data))
# Assign to yet another shape
data2 = array_ops.fill([10, 10], 1)
a2 = state_ops.assign(p, data2, validate_shape=False)
a2.op.run()
self.assertAllEqual(p.eval(), self.evaluate(data2))
@test_util.run_v1_only("b/120545219")
def testInitRequiredAssignAdd(self):
with self.cached_session():
p = variables.VariableV1(array_ops.fill([1024, 1024], 1), dtypes.int32)
a = state_ops.assign_add(p, array_ops.fill([1024, 1024], 0))
with self.assertRaisesOpError("use uninitialized"):
a.op.run()
@test_util.run_v1_only("b/120545219")
def testInitRequiredAssignSub(self):
with self.cached_session():
p = variables.VariableV1(array_ops.fill([1024, 1024], 1), dtypes.int32)
a = state_ops.assign_sub(p, array_ops.fill([1024, 1024], 0))
with self.assertRaisesOpError("use uninitialized"):
a.op.run()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/dense_update_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.nn_ops.Pad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class PadOpTest(test.TestCase):
def _npPad(self, inp, paddings, mode, constant_values=0):
mode = mode.lower()
if mode == "constant":
return np.pad(inp, paddings, mode=mode, constant_values=constant_values)
else:
return np.pad(inp, paddings, mode=mode)
def testNpPad(self):
self.assertAllEqual(
np.array([[0, 0, 0, 0, 0, 0],
[0, 3, 3, 0, 0, 0],
[0, 4, 4, 0, 0, 0],
[0, 5, 5, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]),
self._npPad(
np.array([[3, 3], [4, 4], [5, 5]]),
[[1, 2], [1, 3]],
mode="constant"))
self.assertAllEqual(
np.array([[1, 1, 1, 1, 1, 1],
[1, 3, 3, 1, 1, 1],
[1, 4, 4, 1, 1, 1],
[1, 5, 5, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]]),
self._npPad(
np.array([[3, 3], [4, 4], [5, 5]]),
[[1, 2], [1, 3]],
mode="constant", constant_values=1))
self.assertAllEqual(
np.array([[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0],
[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="reflect"))
self.assertAllEqual(
np.array([[0, 0, 1, 2, 2, 1],
[0, 0, 1, 2, 2, 1],
[3, 3, 4, 9, 9, 4],
[3, 3, 4, 9, 9, 4]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="symmetric"))
def _testPad(self, np_inputs, paddings, mode, constant_values):
np_val = self._npPad(np_inputs, paddings, mode=mode,
constant_values=constant_values)
with self.cached_session(use_gpu=True):
tf_val = array_ops.pad(np_inputs, paddings, mode=mode,
constant_values=constant_values)
out = self.evaluate(tf_val)
self.assertAllEqual(np_val, out)
self.assertShapeEqual(np_val, tf_val)
def _testGradient(self, x, a, mode, constant_values):
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
xs = list(x.shape)
ina = ops.convert_to_tensor(a)
y = array_ops.pad(inx, ina, mode=mode, constant_values=constant_values)
# Expected y's shape to be:
ys = list(np.array(x.shape) + np.sum(np.array(a), axis=1))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, y, ys, x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_inputs, paddings, constant_values):
for mode in ("CONSTANT", "REFLECT", "SYMMETRIC", "reflect", "symmetric",
"constant"):
# Zero-sized input is not allowed for REFLECT mode, but we still want
# zero-sized input test cases for the other modes.
if np_inputs.size or mode.upper() != "REFLECT":
self._testPad(np_inputs, paddings, mode=mode,
constant_values=constant_values)
if np_inputs.dtype == np.float32:
self._testGradient(np_inputs, paddings, mode=mode,
constant_values=constant_values)
@test_util.run_deprecated_v1
def testInputDims(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2, 1, 1, 1, 1]),
array_ops.reshape(
[1, 2], shape=[1, 2]))
@test_util.run_deprecated_v1
def testPaddingsDim(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2], shape=[2]))
@test_util.run_deprecated_v1
def testPaddingsDim2(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2], shape=[2, 1]))
@test_util.run_deprecated_v1
def testPaddingsDim3(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2], shape=[1, 2]))
@test_util.run_deprecated_v1
def testPaddingsDim4(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2, 3, 4, 5, 6], shape=[3, 2]))
@test_util.run_deprecated_v1
def testPaddingsNonNegative(self):
with self.session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
array_ops.pad(constant_op.constant(
[1], shape=[1]),
constant_op.constant(
[-1, 0], shape=[1, 2]))
@test_util.run_deprecated_v1
def testPaddingsNonNegative2(self):
with self.session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
array_ops.pad(constant_op.constant(
[1], shape=[1]),
constant_op.constant(
[-1, 0], shape=[1, 2]))
def testPaddingsMaximum(self):
with self.session(use_gpu=True):
with self.assertRaises(Exception):
array_ops.pad(constant_op.constant(
[1], shape=[2]),
constant_op.constant(
[2, 0], shape=[1, 2]),
mode="REFLECT").eval()
with self.assertRaises(Exception):
array_ops.pad(constant_op.constant(
[1], shape=[2]),
constant_op.constant(
[0, 3], shape=[1, 2]),
mode="SYMMETRIC").eval()
def testInvalid(self):
with self.cached_session():
x = [[1, 2, 3], [4, 5, 6]]
with self.assertRaisesRegexp(ValueError, "Unknown padding mode"):
array_ops.pad(x, [[1, 0], [2, 1]], mode="weird").eval()
def testPaddingTypes(self):
paddings = [[1, 0], [2, 3], [0, 2]]
inputs = np.random.randint(-100, 100, (4, 4, 3)).astype(np.float32)
for mode in ("CONSTANT", "REFLECT", "SYMMETRIC", "reflect", "symmetric",
"constant"):
for padding_dtype in [dtypes.int32, dtypes.int64]:
np_val = self._npPad(inputs,
paddings,
mode=mode,
constant_values=0)
with self.cached_session(use_gpu=True):
tf_val = array_ops.pad(inputs,
constant_op.constant(paddings, padding_dtype),
mode=mode,
constant_values=0)
out = self.evaluate(tf_val)
self.assertAllEqual(np_val, out)
self.assertShapeEqual(np_val, tf_val)
def testIntTypes(self):
# TODO(touts): Figure out why the padding tests do not work on GPU
# for int types and rank > 2.
for t in [np.int8, np.uint8, np.int32, np.int64]:
self._testAll(
np.random.randint(-100, 100, (4, 4, 3)).astype(t),
[[1, 0], [2, 3], [0, 2]], 0)
self._testAll(
np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t),
[[0, 0], [0, 0], [0, 0], [0, 0]], -123)
@test_util.run_deprecated_v1
def testFloatTypes(self):
for t in [np.float32, np.float64]:
self._testAll(np.random.rand(2, 5).astype(t), [[1, 0], [2, 0]], 0.0)
self._testAll(np.random.rand(2, 3, 4).astype(t),
[[0, 0], [0, 0], [0, 0]], -1234.0)
self._testAll(np.random.rand(0, 3, 4).astype(t),
[[0, 0], [2, 1], [2, 3]], 0.0)
def testComplexTypes(self):
for t in [np.complex64, np.complex128]:
x = np.random.rand(2, 5).astype(t)
self._testAll(x + 1j * x, [[1, 0], [2, 0]], 1234.0 - 1234.0j)
x = np.random.rand(3, 2, 1, 1).astype(t)
self._testAll(x + 1j * x, [[0, 0], [0, 0], [0, 0], [0, 0]], 0 + 0j)
def testString(self):
# Numpy does not support padding strings so we compare padding manually.
x = ops.convert_to_tensor([["Hello", "World"],
["Goodnight", "Moon"]])
constant = array_ops.pad(x, [[1, 0], [0, 1]], mode="CONSTANT",
constant_values="PAD")
reflect = array_ops.pad(x, [[1, 0], [0, 1]], mode="REFLECT",
constant_values="PAD")
symmetric = array_ops.pad(x, [[1, 0], [0, 1]], mode="SYMMETRIC",
constant_values="PAD")
with self.session(use_gpu=True):
self.assertAllEqual(
[[b"PAD", b"PAD", b"PAD"], [b"Hello", b"World", b"PAD"],
[b"Goodnight", b"Moon", b"PAD"]], self.evaluate(constant))
self.assertAllEqual([[b"Goodnight", b"Moon", b"Goodnight"],
[b"Hello", b"World", b"Hello"],
[b"Goodnight", b"Moon", b"Goodnight"]],
self.evaluate(reflect))
self.assertAllEqual(
[[b"Hello", b"World", b"World"], [b"Hello", b"World", b"World"],
[b"Goodnight", b"Moon", b"Moon"]], self.evaluate(symmetric))
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# Unknown paddings shape.
inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], padded.get_shape().as_list())
# Unknown input shape.
inp = array_ops.placeholder(dtypes.float32)
padded = array_ops.pad(inp, [[2, 2], [2, 2]])
self.assertEqual([None, None], padded.get_shape().as_list())
# Unknown input and paddings shape.
inp = array_ops.placeholder(dtypes.float32)
padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))
self.assertAllEqual(None, padded.get_shape().ndims)
@test_util.run_deprecated_v1
def testPartialShapeInformation(self):
unknown = array_ops.placeholder(dtypes.int32)
# Known input shape, partial unknown padding (one dimension).
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp, [[1, 2], unknown])
self.assertEqual([7, None], padded.get_shape().as_list())
# Known input shape, partial unknown padding (begin).
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp, [[unknown, 0], [1, 2]])
self.assertEqual([None, 7], padded.get_shape().as_list())
# Known input shape, partial unknown padding (end).
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp, [[1, 2], [0, unknown]])
self.assertEqual([7, None], padded.get_shape().as_list())
# Unknown input shape, partial unknown padding (one dimension).
padded = array_ops.pad(unknown, [[1, 2], unknown])
self.assertEqual([None, None], padded.get_shape().as_list())
# Unknown input shape (rank known), partial unknown padding (one dimension).
rank_known = array_ops.placeholder(dtypes.int32)
rank_known.set_shape([None, None])
padded = array_ops.pad(rank_known, [[1, 2], unknown])
self.assertEqual([None, None], padded.get_shape().as_list())
# Known input shape, partial unknown padding (begin), with constant begin.
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp, [[constant_op.constant(1, shape=[]), 2],
[0, unknown]])
self.assertEqual([7, None], padded.get_shape().as_list())
# Known input shape, partial unknown padding (begin), with constant dim.
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp,
[constant_op.constant(1, shape=[2]), [0, unknown]])
self.assertEqual([6, None], padded.get_shape().as_list())
# Zero padding on a known dimension.
inp = array_ops.placeholder(dtypes.int32, [None, None, 20])
padded = array_ops.pad(inp, [[0, 0], [0, unknown], [0, 0]])
self.assertEqual([None, None, 20], padded.get_shape().as_list())
def testScalars(self):
paddings = np.zeros((0, 2), dtype=np.int32)
inp = np.asarray(7)
with self.session(use_gpu=True):
tf_val = array_ops.pad(inp, paddings)
out = self.evaluate(tf_val)
self.assertAllEqual(inp, out)
self.assertShapeEqual(inp, tf_val)
def testPadTypes(self):
for dtype in [dtypes.int32, dtypes.int64]:
paddings = np.zeros((0, 2))
inp = np.asarray(7)
with self.cached_session(use_gpu=True):
tf_val = array_ops.pad(inp, constant_op.constant(paddings, dtype=dtype))
out = self.evaluate(tf_val)
self.assertAllEqual(inp, out)
self.assertShapeEqual(inp, tf_val)
@test_util.run_deprecated_v1
def testCollapseAdjacentNonPaddedDimensions(self):
# pyformat: disable
paddings_values = [[[0, 0], [0, 0], [0, 0], [0, 1]],
[[0, 0], [2, 3], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0], [0, 0]]]
# pyformat: enable
for paddings_value in paddings_values:
for dtype in [dtypes.float32, dtypes.int32]:
inp = constant_op.constant(1, shape=[8, 28, 28, 3], dtype=dtype)
paddings = constant_op.constant(paddings_value, dtype=dtypes.int32)
padded = array_ops.pad(inp, paddings)
middle = array_ops.slice(padded, [row[0] for row in paddings_value],
[dim.value for dim in inp.shape.dims])
left = array_ops.slice(padded, [0, 0, 0, 0],
[row[0] for row in paddings_value])
right = array_ops.slice(
padded,
[paddings_value[i][0] + inp.shape.dims[i].value for i in range(4)],
[-1, -1, -1, -1])
with self.cached_session(use_gpu=True):
self.assertAllEqual(inp.eval(), self.evaluate(middle))
self.assertAllEqual(
np.zeros([row[0] for row in paddings_value]), self.evaluate(left))
self.assertAllEqual(
np.zeros([row[1] for row in paddings_value]),
self.evaluate(right))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/pad_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lookup ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import six
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import counter
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.training.tracking import util as trackable
class BaseLookupTableTest(test.TestCase):
def getHashTable(self):
if tf2.enabled():
return lookup_ops.StaticHashTable
else:
return lookup_ops.StaticHashTableV1
def getVocabularyTable(self):
if tf2.enabled():
return lookup_ops.StaticVocabularyTable
else:
return lookup_ops.StaticVocabularyTableV1
def initialize_table(self, table):
if not tf2.enabled():
self.evaluate(table.initializer)
class StaticHashTableTest(BaseLookupTableTest):
def testStaticHashTable(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
exported_keys_tensor, exported_values_tensor = table.export()
self.assertItemsEqual([b"brain", b"salad", b"surgery"],
self.evaluate(exported_keys_tensor))
self.assertItemsEqual([0, 1, 2], self.evaluate(exported_values_tensor))
def testStaticHashTableFindHighRank(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([["brain", "salad"],
["tank", "tarkus"]])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testStaticHashTableInitWithPythonArrays(self):
default_val = -1
keys = ["brain", "salad", "surgery"]
values = [0, 1, 2]
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(
keys, values, value_dtype=dtypes.int64), default_val)
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testStaticHashTableInitWithNumPyArrays(self):
default_val = -1
keys = np.array(["brain", "salad", "surgery"], dtype=np.str)
values = np.array([0, 1, 2], dtype=np.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testMultipleStaticHashTables(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table2 = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table3 = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table1)
self.initialize_table(table2)
self.initialize_table(table3)
self.assertAllEqual(3, self.evaluate(table1.size()))
self.assertAllEqual(3, self.evaluate(table2.size()))
self.assertAllEqual(3, self.evaluate(table3.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = self.evaluate([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testStaticHashTableWithTensorDefault(self):
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testStaticHashTableWithSparseTensorInput(self):
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
sp_indices = [[0, 0], [0, 1], [1, 0]]
sp_shape = [2, 2]
input_tensor = sparse_tensor.SparseTensor(
constant_op.constant(sp_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "tank"]),
constant_op.constant(sp_shape, dtypes.int64))
output = table.lookup(input_tensor)
out_indices, out_values, out_shape = self.evaluate(output)
self.assertAllEqual([0, 1, -1], out_values)
self.assertAllEqual(sp_indices, out_indices)
self.assertAllEqual(sp_shape, out_shape)
def testSignatureMismatch(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
# Ref types do not produce a lookup signature mismatch.
input_string_ref = variables.Variable("brain")
self.evaluate(input_string_ref.initializer)
self.assertEqual(0, self.evaluate(table.lookup(input_string_ref)))
input_string = constant_op.constant([1, 2, 3], dtypes.int64)
with self.assertRaises(TypeError):
table.lookup(input_string)
with self.assertRaises(TypeError):
self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), "UNK")
def testDTypes(self):
default_val = -1
with self.assertRaises(TypeError):
self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(["a"], [1], [dtypes.string],
dtypes.int64), default_val)
@test_util.run_v1_only("(Cached) Sessions not available in TF2.0")
def testNotInitialized(self):
with self.cached_session():
default_val = -1
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(["a"], [1],
value_dtype=dtypes.int64),
default_val)
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
with self.assertRaisesOpError("Table not initialized"):
self.evaluate(output)
@test_util.run_v1_only("(Cached) Sessions not available in TF2.0")
def testInitializeTwice(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
# Make sure that initializing twice doesn't throw any errors.
self.initialize_table(table)
def testInitializationWithInvalidDimensions(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2, 3, 4], dtypes.int64)
raised_error = ValueError
if context.executing_eagerly():
raised_error = errors_impl.InvalidArgumentError
with self.assertRaises(raised_error):
self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
@test_util.run_v1_only("Sessions not available in TF2.0")
def testMultipleSessions(self):
# Start a server
server = server_lib.Server({"local0": ["localhost:0"]},
protocol="grpc",
start=True)
# Create two sessions sharing the same state
session1 = session.Session(server.target)
session2 = session.Session(server.target)
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values),
default_val,
name="t1")
# Init the table in the first session.
with session1:
self.initialize_table(table)
self.assertAllEqual(3, self.evaluate(table.size()))
# Init the table in the second session and verify that we do not get a
# "Table already initialized" error.
with session2:
table.initializer.run()
self.assertAllEqual(3, self.evaluate(table.size()))
@test_util.run_v2_only
def testImportedHashTable(self):
g = ops.Graph()
with g.as_default():
t = lookup_ops.StaticHashTable(
lookup_ops.KeyValueTensorInitializer(["a"], [1]),
2)
init_op = t._init_op
op = t.lookup(ops.convert_to_tensor(["a"]))
meta_graph = saver.export_meta_graph()
def f():
saver.import_meta_graph(meta_graph)
return ops.get_default_graph().get_tensor_by_name(op.name)
wrapped = wrap_function.wrap_function(f, [])
pruned_init_fn = wrapped.prune(
(), [wrapped.graph.get_operation_by_name(init_op.name)])
self.evaluate(pruned_init_fn())
self.assertAllEqual([1], wrapped())
def testStaticHashTableInt32String(self):
default_val = "n/a"
keys = constant_op.constant([0, 1, 2], dtypes.int32)
values = constant_op.constant(["brain", "salad", "surgery"])
table = self.getHashTable()(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
self.initialize_table(table)
input_tensor = constant_op.constant([0, 1, -1])
output = table.lookup(input_tensor)
result = self.evaluate(output)
self.assertAllEqual([b"brain", b"salad", b"n/a"], result)
def testTableUseInFunction(self):
if not context.executing_eagerly():
self.skipTest("Only Eager mode test.")
keys = constant_op.constant([0, 1, 2], dtypes.int32)
values = constant_op.constant(["brain", "salad", "surgery"])
table = self.getHashTable()(lookup_ops.KeyValueTensorInitializer(
keys, values), "n/a")
@function.defun()
def lookup_table_func(k):
return table.lookup(k)
result = lookup_table_func(constant_op.constant([0, 1, -1]))
self.assertAllEqual([b"brain", b"salad", b"n/a"], result)
result = lookup_table_func(constant_op.constant([2, -1, 1]))
self.assertAllEqual([b"surgery", b"n/a", b"salad"], result)
def testTableCreatedInFunction(self):
if not context.executing_eagerly():
self.skipTest("Only Eager mode test.")
keys = constant_op.constant([0, 1, 2], dtypes.int32)
values = constant_op.constant(["brain", "salad", "surgery"])
@function.defun()
def lookup_table_func(k):
table = self.getHashTable()(lookup_ops.KeyValueTensorInitializer(
keys, values), "n/a")
return table.lookup(k)
result = lookup_table_func(constant_op.constant([0, 1, -1]))
self.assertAllEqual([b"brain", b"salad", b"n/a"], result)
result = lookup_table_func(constant_op.constant([2, -1, 1]))
self.assertAllEqual([b"surgery", b"n/a", b"salad"], result)
class KeyValueTensorInitializerTest(BaseLookupTableTest):
def test_string(self):
init = lookup_ops.KeyValueTensorInitializer(
("brain", "salad", "surgery"), (0, 1, 2), dtypes.string, dtypes.int64)
table = self.getHashTable()(init, default_value=-1)
self.initialize_table(table)
def test_multiple_tables(self):
with ops.name_scope("table_scope"):
init1 = lookup_ops.KeyValueTensorInitializer(
("brain", "salad", "surgery"), (0, 1, 2), dtypes.string, dtypes.int64)
table1 = self.getHashTable()(init1, default_value=-1)
if not context.executing_eagerly():
self.assertEqual("hash_table", table1.name)
self.assertEqual("table_scope/hash_table",
table1.resource_handle.op.name)
init2 = lookup_ops.KeyValueTensorInitializer(
("brain", "salad", "surgery"), (0, 1, 2), dtypes.string, dtypes.int64)
table2 = self.getHashTable()(init2, default_value=-1)
if not context.executing_eagerly():
self.assertEqual("hash_table_1", table2.name)
self.assertEqual("table_scope/hash_table_1",
table2.resource_handle.op.name)
def test_int64(self):
init = lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),
dtypes.int64, dtypes.int64)
table = self.getHashTable()(init, default_value=-1)
self.initialize_table(table)
def test_int32(self):
init = lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),
dtypes.int32, dtypes.int64)
with self.assertRaises(errors_impl.OpError):
table = self.getHashTable()(init, default_value=-1)
self.initialize_table(table)
class InitializeTableFromFileOpTest(BaseLookupTableTest):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def testInitializeStringTable(self):
vocabulary_file = self._createVocabFile("one_column_1.txt")
default_value = -1
init = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)
self.assertIn("one_column_1.txt_-2_-1", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
output = table.lookup(constant_op.constant(["brain", "salad", "tank"]))
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testInitializeInt64Table(self):
vocabulary_file = self._createVocabFile(
"one_column_int64.txt", values=("42", "1", "-1000"))
with self.cached_session():
default_value = -1
init = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.int64, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)
self.assertIn("one_column_int64.txt_-2_-1", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
output = table.lookup(
constant_op.constant((42, 1, 11), dtype=dtypes.int64))
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testInitializeIndexTable(self):
vocabulary_file = self._createVocabFile("one_column_2.txt")
with self.cached_session():
default_value = "UNK"
key_index = lookup_ops.TextFileIndex.LINE_NUMBER
value_index = lookup_ops.TextFileIndex.WHOLE_LINE
init = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.int64, key_index, dtypes.string, value_index)
self.assertIn("one_column_2.txt_-1_-2", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
output = table.lookup(input_values)
result = self.evaluate(output)
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], result)
def testMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.cached_session():
default_value = -1
key_index = 1
value_index = 2
init = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, key_index, dtypes.int64, value_index)
self.assertIn("three_columns.txt_1_2", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([1, 5, 6], result)
def testInvalidDataTypeInMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.cached_session():
default_value = -1
key_index = 2
value_index = 1
init = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, key_index, dtypes.int64, value_index)
self.assertIn("three_columns.txt_2_1", init._shared_name)
with self.assertRaisesOpError("is not a valid"):
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
def testInvalidDataType(self):
vocabulary_file = self._createVocabFile("one_column_3.txt")
with self.cached_session():
default_value = "UNK"
key_index = lookup_ops.TextFileIndex.WHOLE_LINE
value_index = lookup_ops.TextFileIndex.LINE_NUMBER
with self.assertRaises(ValueError):
init = lookup_ops.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string,
value_index)
self.assertIn("one_column_3.txt_-2_-1", init._shared_name)
self.getHashTable()(init, default_value)
def testInvalidIndex(self):
vocabulary_file = self._createVocabFile("one_column_4.txt")
with self.cached_session():
default_value = -1
key_index = 1 # second column of the line
value_index = lookup_ops.TextFileIndex.LINE_NUMBER
init = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, key_index, dtypes.int64, value_index)
self.assertIn("one_column_4.txt_1_-1", init._shared_name)
with self.assertRaisesOpError("Invalid number of columns"):
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
def testInitializeSameTableWithMultipleNodes(self):
vocabulary_file = self._createVocabFile("one_column_5.txt")
with self.cached_session():
default_value = -1
init1 = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)
self.assertIn("one_column_5.txt_-2_-1", init1._shared_name)
table1 = self.getHashTable()(init1, default_value)
init2 = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)
self.assertIn("one_column_5.txt_-2_-1", init2._shared_name)
table2 = self.getHashTable()(init2, default_value)
init3 = lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)
self.assertIn("one_column_5.txt_-2_-1", init3._shared_name)
table3 = self.getHashTable()(init3, default_value)
self.evaluate(lookup_ops.tables_initializer())
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = self.evaluate([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testInitializeTableWithNoFilename(self):
with self.cached_session():
default_value = -1
with self.assertRaises(ValueError):
self.getHashTable()(lookup_ops.TextFileInitializer(
"", dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER), default_value)
def testInitializeWithVocabSize(self):
with self.cached_session():
default_value = -1
vocab_size = 3
vocabulary_file1 = self._createVocabFile("one_column6.txt")
init1 = lookup_ops.TextFileInitializer(
vocabulary_file1,
dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size)
self.assertIn("one_column6.txt_3_-2_-1", init1._shared_name)
table1 = self.getHashTable()(init1, default_value)
# Initialize from file.
self.initialize_table(table1)
self.assertEqual(vocab_size, self.evaluate(table1.size()))
vocabulary_file2 = self._createVocabFile("one_column7.txt")
vocab_size = 5
init2 = lookup_ops.TextFileInitializer(
vocabulary_file2,
dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size)
self.assertIn("one_column7.txt_5_-2_-1", init2._shared_name)
with self.assertRaisesOpError("Invalid vocab_size"):
table2 = self.getHashTable()(init2, default_value)
self.initialize_table(table2)
vocab_size = 1
vocabulary_file3 = self._createVocabFile("one_column3.txt")
init3 = lookup_ops.TextFileInitializer(
vocabulary_file3,
dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size)
self.assertIn("one_column3.txt_1_-2_-1", init3._shared_name)
table3 = self.getHashTable()(init3, default_value)
# Smaller vocab size reads only vocab_size records.
self.initialize_table(table3)
self.assertEqual(vocab_size, self.evaluate(table3.size()))
@test_util.run_v1_only("placeholder usage")
def testFeedVocabularyName(self):
vocabulary_file = self._createVocabFile("feed_vocabulary.txt")
with self.cached_session():
default_value = -1
init = lookup_ops.TextFileInitializer(
"old_file.txt", dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)
self.assertIn("old_file.txt_-2_-1", init._shared_name)
table = self.getHashTable()(init, default_value)
# Initialize with non existing file (old_file.txt) should fail.
# TODO(yleon): Update message, which might change per FileSystem.
with self.assertRaisesOpError("old_file.txt"):
table.initializer.run()
# Initialize the model feeding the vocabulary file.
filenames = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
table.initializer.run(feed_dict={filenames[0]: vocabulary_file})
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testInvalidFilenames(self):
vocabulary_file = self._createVocabFile("filename_shape.txt")
with self.cached_session():
default_value = -1
# Invalid data type
other_type = constant_op.constant(1)
with self.assertRaises(Exception) as cm:
self.getHashTable()(lookup_ops.TextFileInitializer(
other_type, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER), default_value)
self.assertIsInstance(cm.exception, (ValueError, TypeError))
# Non-scalar filename
filenames = constant_op.constant([vocabulary_file, vocabulary_file])
if not context.executing_eagerly():
with self.assertRaises(Exception) as cm:
self.getHashTable()(lookup_ops.TextFileInitializer(
filenames, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),
default_value)
self.assertIsInstance(cm.exception, (ValueError, TypeError))
else:
with self.assertRaises(errors_impl.InvalidArgumentError):
self.getHashTable()(lookup_ops.TextFileInitializer(
filenames, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),
default_value)
def testIdToStringTable(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.cached_session():
default_value = "UNK"
vocab_size = 3
init = lookup_ops.TextFileStringTableInitializer(
vocab_file, vocab_size=vocab_size)
self.assertTrue("feat_to_id_1.txt_3_-1_-2", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
out = table.lookup(input_values)
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"],
self.evaluate(out))
self.assertEqual(vocab_size, self.evaluate(table.size()))
def testStringToIdTable(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
init = lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size)
self.assertTrue("feat_to_id_2.txt_3_-1_-2", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, -1], self.evaluate(out))
self.assertEqual(vocab_size, self.evaluate(table.size()))
def testInt64ToIdTable(self):
vocab_file = self._createVocabFile(
"feat_to_id_3.txt", values=("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
init = lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64)
self.assertTrue("feat_to_id_3.txt_3_-1_-2", init._shared_name)
table = self.getHashTable()(init, default_value)
self.initialize_table(table)
out = table.lookup(
constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64))
self.assertAllEqual((0, 1, 2, -1), self.evaluate(out))
self.assertEqual(vocab_size, self.evaluate(table.size()))
class StaticVocabularyTableTest(BaseLookupTableTest):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def testStringStaticVocabularyTable(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
vocab_size = 3
oov_buckets = 1
table = self.getVocabularyTable()(lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), oov_buckets)
self.initialize_table(table)
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))
def testInt32StaticVocabularyTable(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt", ("42", "1", "-1000"))
vocab_size = 3
oov_buckets = 1
table = self.getVocabularyTable()(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
oov_buckets,
lookup_key_dtype=dtypes.int32)
self.initialize_table(table)
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))
def testInt64StaticVocabularyTable(self):
vocab_file = self._createVocabFile("feat_to_id_3.txt", ("42", "1", "-1000"))
vocab_size = 3
oov_buckets = 1
table = self.getVocabularyTable()(lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64), oov_buckets)
self.initialize_table(table)
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))
def testStringStaticVocabularyTableNoInitializer(self):
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = self.getVocabularyTable()(None, oov_buckets)
self.initialize_table(table)
values = constant_op.constant(("brain", "salad", "surgery"))
out = table.lookup(values)
self.assertAllEqual(
[
3, # fingerprint("brain") mod 5.
1, # fingerprint("salad") mod 5.
4 # fingerprint("surgery") mod 5
],
self.evaluate(out))
self.assertEqual(oov_buckets, self.evaluate(table.size()))
def testStaticVocabularyTableWithMultipleInitializers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
vocab_size = 3
oov_buckets = 3
init = lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size)
table1 = self.getVocabularyTable()(init, oov_buckets, name="table1")
table2 = self.getVocabularyTable()(init, oov_buckets, name="table2")
self.evaluate(lookup_ops.tables_initializer())
input_string = constant_op.constant(
["fruit", "brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string)
out2 = table2.lookup(input_string)
out1, out2 = self.evaluate([out1, out2])
self.assertAllEqual([5, 0, 1, 2, 5], out1)
self.assertAllEqual([5, 0, 1, 2, 5], out2)
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table1.size()))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table2.size()))
def testStaticVocabularyTableInitializationAcrossSessions(self):
vocab_file = self._createVocabFile("feat_to_id_5.txt")
with self.cached_session():
vocab_size = 3
oov_buckets = 1
table1 = self.getVocabularyTable()(lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), oov_buckets)
self.initialize_table(table1)
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string_1)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out1))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table1.size()))
with self.cached_session():
vocab_size = 3
oov_buckets = 1
# Underlying lookup table already initialized in previous session.
# No need to initialize table2
table2 = self.getVocabularyTable()(lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), oov_buckets)
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out2 = table2.lookup(input_string_2)
self.assertAllEqual([3, 1, 3], self.evaluate(out2))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table2.size()))
def testSparseTensor(self):
vocab_file = self._createVocabFile("feat_to_id_7.txt")
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "brain", "surgery", "tarkus"],
dtypes.string),
constant_op.constant(input_shape, dtypes.int64))
table = self.getVocabularyTable()(lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=3), 1)
self.initialize_table(table)
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt32SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),
constant_op.constant(input_shape, dtypes.int64))
table = self.getVocabularyTable()(
lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),
dtypes.int64, dtypes.int64),
1,
lookup_key_dtype=dtypes.int32)
self.initialize_table(table)
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt64SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),
constant_op.constant(input_shape, dtypes.int64))
table = self.getVocabularyTable()(lookup_ops.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), 1)
self.initialize_table(table)
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testStaticVocabularyTableNoInnerTable(self):
table = self.getVocabularyTable()(None, num_oov_buckets=1)
self.assertIsNone(table.resource_handle)
class DenseHashTableOpTest(test.TestCase):
def testBasic(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant([12, 15], dtypes.int64)
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([11, 12, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([0, -1, -1], result)
def testBasicBool(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([True, True, True, True], dtypes.bool)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.bool,
default_value=False,
empty_key=0,
deleted_key=-1)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant([11, 15], dtypes.int64)
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([11, 12, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([False, True, False], result)
def testSameEmptyAndDeletedKey(self):
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Empty and deleted keys"):
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=42,
deleted_key=42)
self.assertAllEqual(0, self.evaluate(table.size()))
@test_util.run_v1_only("uses placeholders")
def testLookupUnknownShape(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
placeholder_keys = array_ops.placeholder(dtypes.int64)
output = table.lookup(placeholder_keys)
self.assertAllEqual(None, output.get_shape())
result = output.eval({placeholder_keys: [11, 12, 15]})
self.assertAllEqual([0, 1, -1], result)
def testMapStringToFloat(self):
with self.cached_session():
keys = constant_op.constant(["a", "b", "c", "d"], dtypes.string)
values = constant_op.constant([0.0, 1.1, 2.2, 3.3], dtypes.float32)
default_value = constant_op.constant(-1.5, dtypes.float32)
table = lookup_ops.DenseHashTable(
dtypes.string,
dtypes.float32,
default_value=default_value,
empty_key="",
deleted_key="$")
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant(["b", "e"])
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["a", "b", "d", "e"], dtypes.string)
output = table.lookup(input_string)
self.assertAllEqual([4], output.get_shape())
result = self.evaluate(output)
self.assertAllClose([0, -1.5, 3.3, -1.5], result)
def testMapInt64ToFloat(self):
for float_dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([0.0, 1.1, 2.2, 3.3], float_dtype)
default_value = constant_op.constant(-1.5, float_dtype)
table = lookup_ops.DenseHashTable(
dtypes.int64,
float_dtype,
default_value=default_value,
empty_key=0,
deleted_key=-1)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant([12, 15], dtypes.int64)
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([11, 12, 14, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([4], output.get_shape())
result = self.evaluate(output)
self.assertAllClose([0, -1.5, 3.3, -1.5], result)
def testVectorValues(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([[0, 1, 2, 3], [3, 4, 5, 6], [6, 7, 8, 9]],
dtypes.int64)
default_value = constant_op.constant([-1, -2, -3, -4], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=0,
deleted_key=-1,
initial_num_buckets=4)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
self.assertAllEqual(4, len(self.evaluate(table.export()[0])))
self.evaluate(
table.insert(
constant_op.constant([14], dtypes.int64),
constant_op.constant([[2, 3, 4, 5]], dtypes.int64)))
self.assertAllEqual(4, self.evaluate(table.size()))
self.assertAllEqual(8, len(self.evaluate(table.export()[0])))
remove_string = constant_op.constant([12, 16], dtypes.int64)
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
self.assertAllEqual(8, len(self.evaluate(table.export()[0])))
input_string = constant_op.constant([11, 12, 14, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([4, 4],
output.shape,
msg="Saw shape: %s" % output.shape)
result = self.evaluate(output)
self.assertAllEqual(
[[0, 1, 2, 3], [-1, -2, -3, -4], [2, 3, 4, 5], [-1, -2, -3, -4]],
result)
def testVectorKeys(self):
with self.cached_session():
keys = constant_op.constant([[0, 1], [1, 2], [1, 3]], dtypes.int64)
values = constant_op.constant([10, 11, 12], dtypes.int64)
empty_key = constant_op.constant([0, 3], dtypes.int64)
deleted_key = constant_op.constant([-1, -1], dtypes.int64)
default_value = constant_op.constant(-1, dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
initial_num_buckets=8)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
self.evaluate(
table.insert(
constant_op.constant([[0, 0]], dtypes.int64),
constant_op.constant([13], dtypes.int64)))
self.assertAllEqual(4, self.evaluate(table.size()))
self.assertAllEqual(8, len(self.evaluate(table.export()[0])))
remove_string = constant_op.constant([[1, 2], [7, 8]], dtypes.int64)
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
self.assertAllEqual(8, len(self.evaluate(table.export()[0])))
input_string = constant_op.constant([[0, 1], [1, 2], [1, 3], [0, 2]],
dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([4], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([10, -1, 12, -1], result)
def testResize(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1,
initial_num_buckets=4)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
self.assertAllEqual(4, len(self.evaluate(table.export()[0])))
keys2 = constant_op.constant([12, 99], dtypes.int64)
self.evaluate(table.remove(keys2))
self.assertAllEqual(2, self.evaluate(table.size()))
self.assertAllEqual(4, len(self.evaluate(table.export()[0])))
keys3 = constant_op.constant([13, 14, 15, 16, 17], dtypes.int64)
values3 = constant_op.constant([3, 4, 5, 6, 7], dtypes.int64)
self.evaluate(table.insert(keys3, values3))
self.assertAllEqual(6, self.evaluate(table.size()))
self.assertAllEqual(16, len(self.evaluate(table.export()[0])))
keys4 = constant_op.constant([10, 11, 12, 13, 14, 15, 16, 17, 18],
dtypes.int64)
output = table.lookup(keys4)
self.assertAllEqual([-1, 0, -1, 3, 4, 5, 6, 7, -1], self.evaluate(output))
def testExport(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([1, 2, 3, 4], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=100,
deleted_key=200,
initial_num_buckets=8)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
keys2 = constant_op.constant([12, 15], dtypes.int64)
self.evaluate(table.remove(keys2))
self.assertAllEqual(3, self.evaluate(table.size()))
exported_keys, exported_values = table.export()
np_keys = self.evaluate(exported_keys)
np_values = self.evaluate(exported_values)
self.assertAllEqual(8, len(np_keys))
self.assertAllEqual(8, len(np_values))
# pair up keys and values, drop extra added dimension
pairs = np.dstack((np_keys.flatten(), np_values.flatten()))[0]
# sort by key
pairs = pairs[pairs[:, 0].argsort()]
self.assertAllEqual([[11, 1], [13, 3], [14, 4], [100, 0], [100, 0],
[100, 0], [100, 0], [200, 2]], pairs)
@test_util.run_v1_only("Saver V1 only")
def testSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
default_value = -1
empty_key = 0
deleted_key = -1
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([12, 15], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertIsInstance(val, six.string_types)
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
table.insert(
constant_op.constant([11, 14], dtypes.int64),
constant_op.constant([12, 24], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, -1, 2, 3], output.eval())
@test_util.run_v1_only("Saver V1 only")
def testSaveRestoreOnlyTable(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
default_value = -1
empty_key = 0
deleted_key = -1
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save = saver.Saver([table])
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([12, 15], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertIsInstance(val, six.string_types)
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
table.insert(
constant_op.constant([11, 14], dtypes.int64),
constant_op.constant([12, 24], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver([table])
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, -1, 2, 3], output.eval())
@test_util.run_in_graph_and_eager_modes
def testObjectSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_prefix = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
default_value = -1
empty_key = 0
deleted_key = -1
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
save_table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save_checkpoint = trackable.Checkpoint(table=save_table)
self.assertAllEqual(0, self.evaluate(save_table.size()))
self.evaluate(save_table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(save_table.size()))
self.assertAllEqual(32, len(self.evaluate(save_table.export()[0])))
save_path = save_checkpoint.save(save_prefix)
del save_table, save_checkpoint
load_table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
self.evaluate(
load_table.insert(
constant_op.constant([11, 14], dtypes.int64),
constant_op.constant([12, 24], dtypes.int64)))
self.assertAllEqual(2, self.evaluate(load_table.size()))
self.assertAllEqual(64, len(self.evaluate(load_table.export()[0])))
restore_checkpoint = trackable.Checkpoint(table=load_table)
# Restore the saved values in the parameter nodes.
restore_checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual(3, self.evaluate(load_table.size()))
self.assertAllEqual(32, len(self.evaluate(load_table.export()[0])))
input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64)
output = load_table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], self.evaluate(output))
@test_util.run_v1_only("Saver V1 only")
def testVectorSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "vector_save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
deleted_key = constant_op.constant([-2, -3], dtypes.int64)
default_value = constant_op.constant([-1, -2], dtypes.int64)
keys = constant_op.constant([[11, 12], [11, 14], [12, 13], [13, 14]],
dtypes.int64)
values = constant_op.constant([[0, 1], [2, 3], [2, 4], [4, 5]],
dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([[12, 13], [16, 17]], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertIsInstance(val, six.string_types)
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
deleted_key = constant_op.constant([-2, -3], dtypes.int64)
default_value = constant_op.constant([-1, -2], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
table.insert(
constant_op.constant([[11, 12], [13, 15]], dtypes.int64),
constant_op.constant([[21, 22], [23, 24]], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant(
[[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([[0, 1], [2, 3], [-1, -2], [4, 5], [-1, -2]],
output.eval())
@test_util.run_v1_only("Saver V1 only")
def testVectorScalarSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "vector_scalar_save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
deleted_key = constant_op.constant([-1, -1], dtypes.int64)
default_value = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant([[11, 12], [11, 14], [12, 13], [13, 14]],
dtypes.int64)
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t2",
checkpoint=True,
initial_num_buckets=32)
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([[12, 13], [15, 16]], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertIsInstance(val, six.string_types)
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
deleted_key = constant_op.constant([-1, -1], dtypes.int64)
default_value = constant_op.constant(-1, dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t2",
checkpoint=True,
initial_num_buckets=64)
table.insert(
constant_op.constant([[11, 12], [13, 15]], dtypes.int64),
constant_op.constant([3, 4], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant(
[[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([0, 1, -1, 3, -1], output.eval())
def testReprobe(self):
with self.cached_session():
# Insert 6 keys into a table with 8 buckets.
# The values are chosen to make sure collisions occur when using GCC STL
keys = constant_op.constant([11, 12, 13, 19, 20, 21], dtypes.int64)
values = constant_op.constant([51, 52, 53, 54, 55, 56], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1,
initial_num_buckets=8)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(6, self.evaluate(table.size()))
input_string = constant_op.constant([10, 11, 12, 13, 14, 19, 20, 21, 22],
dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([9], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([-1, 51, 52, 53, -1, 54, 55, 56, -1], result)
def testCustomEmptyKey(self):
with self.cached_session():
keys = constant_op.constant([11, 0, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=12,
deleted_key=-1)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([11, 0, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testErrors(self):
with self.cached_session():
table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1)
# Inserting the empty key returns an error
keys1 = constant_op.constant([11, 0], dtypes.int64)
values1 = constant_op.constant([0, 1], dtypes.int64)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"empty_key"):
self.evaluate(table.insert(keys1, values1))
# Looking up the empty key returns an error
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"empty_key"):
self.evaluate(table.lookup(keys1))
# Inserting the deleted key returns an error
keys2 = constant_op.constant([11, -1], dtypes.int64)
values2 = constant_op.constant([0, 1], dtypes.int64)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"deleted_key"):
self.evaluate(table.insert(keys2, values2))
# Looking up the empty key returns an error
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"deleted_key"):
self.evaluate(table.lookup(keys2))
# Arbitrary tensors of keys are not supported
keys = constant_op.constant([[11, 0], [12, 1]], dtypes.int64)
values = constant_op.constant([[11, 0], [12, 1]], dtypes.int64)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Expected key shape"):
self.evaluate(table.lookup(keys))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Expected key shape"):
self.evaluate(table.insert(keys, values))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Number of buckets must be"):
table2 = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=17,
deleted_key=-1,
initial_num_buckets=12)
self.assertAllEqual(0, self.evaluate(table2.size()))
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Empty and deleted keys must have same shape"):
table3 = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=42,
deleted_key=[1, 2])
self.assertAllEqual(0, self.evaluate(table3.size()))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Empty and deleted keys cannot be equal"):
table4 = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=42,
deleted_key=42)
self.assertAllEqual(0, self.evaluate(table4.size()))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Empty and deleted keys cannot be equal"):
table5 = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=[1, 2, 3],
deleted_key=[1, 2, 3])
self.assertAllEqual(0, self.evaluate(table5.size()))
class IndexTableFromFile(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def test_string_index_table_from_file(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_string_index_table_from_multicolumn_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain\t300", "salad\t20", "surgery\t1"))
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_column_index=0,
value_column_index=lookup_ops.TextFileIndex.LINE_NUMBER)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_string_index_table_from_multicolumn_file_custom_delimiter(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain 300", "salad 20", "surgery 1"))
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_column_index=0,
value_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,
delimiter=" ")
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_string_index_table_from_file_tensor_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
vocabulary_file = constant_op.constant(vocabulary_file)
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
if not context.executing_eagerly():
self.assertEqual(1,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
@test_util.run_v1_only("placeholder usage")
def test_string_index_table_from_file_placeholder_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
vocabulary_placeholder = array_ops.placeholder(dtypes.string, [])
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_placeholder, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
feed_dict = {vocabulary_placeholder.name: vocabulary_file}
lookup_ops.tables_initializer().run(feed_dict=feed_dict)
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
self.assertEqual(0,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_int32_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab2.txt", values=("42", "1", "-1000"))
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int64_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab3.txt", values=("42", "1", "-1000"))
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_index_table_from_file_with_default_value(self):
default_value = -42
vocabulary_file = self._createVocabFile("f2i_vocab4.txt")
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, default_value), self.evaluate(ids))
def test_index_table_from_file_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab5.txt")
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1000)
ids = table.lookup(
constant_op.constant(["salad", "surgery", "tarkus", "toccata"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual(
(
1, # From vocabulary file.
2, # From vocabulary file.
867, # 3 + fingerprint("tarkus") mod 300.
860), # 3 + fingerprint("toccata") mod 300.
self.evaluate(ids))
def test_index_table_from_file_fails_with_empty_vocabulary_file_name(self):
self.assertRaises(
ValueError, lookup_ops.index_table_from_file, vocabulary_file="")
def test_index_table_from_file_fails_with_empty_vocabulary(self):
self.assertRaises(
ValueError, lookup_ops.index_table_from_file, vocabulary_file=None)
def test_index_table_from_file_str_fails_with_zero_size_vocabulary(self):
vocabulary_file = self._createVocabFile("zero_vocab_str.txt")
self.assertRaisesRegexp(
ValueError,
"vocab_size must be greater than 0, got 0. "
"vocabulary_file: .*zero_vocab_str.txt",
lookup_ops.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
def test_index_table_from_file_tensor_fails_with_zero_size_vocabulary(self):
vocabulary_file = constant_op.constant(
self._createVocabFile("zero_vocab_tensor.txt"))
self.assertRaisesRegexp(
ValueError,
"vocab_size must be greater than 0, got 0. "
"vocabulary_file: .*zero_vocab_tensor.txt",
lookup_ops.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
def test_index_table_from_file_with_vocab_size_too_small(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=2)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, -1, -1), self.evaluate(ids))
self.assertEqual(2, self.evaluate(table.size()))
def test_index_table_from_file_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size"):
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
self.evaluate(table.initializer)
def test_index_table_from_file_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab8.txt")
self.assertRaises(
ValueError,
lookup_ops.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, -1), self.evaluate(ids))
self.assertEqual(3, self.evaluate(table.size()))
def test_index_table_from_file_with_invalid_hashers(self):
vocabulary_file = self._createVocabFile("invalid_hasher.txt")
with self.cached_session():
with self.assertRaises(TypeError):
lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=1)
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
def test_index_table_from_file_table_ref_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab9.txt")
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
self.assertIsNotNone(table.resource_handle)
def test_index_table_from_file_table_ref_without_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab10.txt")
with self.cached_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=0)
self.assertIsNotNone(table.resource_handle)
class IndexTableFromTensor(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_index_table_from_tensor_with_tensor_init(self):
table = lookup_ops.index_table_from_tensor(
vocabulary_list=("brain", "salad", "surgery"), num_oov_buckets=1)
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(
table.lookup(constant_op.constant(("salad", "surgery", "tarkus"))))
else:
# Reinitializing a table in eager should work.
table = lookup_ops.index_table_from_tensor(
vocabulary_list=("brain", "salad", "surgery"), num_oov_buckets=1)
self.evaluate(lookup_ops.tables_initializer())
ids = table.lookup(constant_op.constant(("salad", "surgery", "tarkus")))
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int32_index_table_from_tensor_with_tensor_init(self):
with self.cached_session():
table = lookup_ops.index_table_from_tensor(
vocabulary_list=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int64_index_table_from_tensor_with_tensor_init(self):
with self.cached_session():
table = lookup_ops.index_table_from_tensor(
vocabulary_list=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_index_table_from_tensor_with_default_value(self):
default_value = -42
with self.cached_session():
table = lookup_ops.index_table_from_tensor(
vocabulary_list=["brain", "salad", "surgery"],
default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, default_value), self.evaluate(ids))
def test_index_table_from_tensor_missing_vocabulary_list(self):
with self.cached_session():
with self.assertRaisesRegexp(ValueError,
"vocabulary_list must be specified"):
lookup_ops.index_table_from_tensor(
vocabulary_list=None, num_oov_buckets=1)
def test_index_table_from_tensor_empty_vocabulary_list(self):
with self.cached_session():
with self.assertRaisesRegexp(
errors_impl.OpError, "keys and values cannot be empty"):
_ = lookup_ops.index_table_from_tensor(
vocabulary_list=np.array([], dtype=np.str_), num_oov_buckets=1)
self.evaluate(lookup_ops.tables_initializer())
def test_index_table_from_tensor_with_invalid_hashers(self):
with self.cached_session():
with self.assertRaises(TypeError):
lookup_ops.index_table_from_tensor(
vocabulary_list=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=1)
table = lookup_ops.index_table_from_tensor(
vocabulary_list=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
class IndexToStringTableFromFileTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def test_index_to_string_table(self):
vocabulary_path = self._createVocabFile("i2f_vocab1.txt")
# vocabulary_file supports string and tensor
type_funcs = [str, constant_op.constant]
for type_func in type_funcs:
vocabulary_file = type_func(vocabulary_path)
with self.cached_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file)
features = table.lookup(
constant_op.constant([0, 1, 2, 3], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
self.evaluate(features))
def test_index_to_string_table_from_multicolumn_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain\t300", "salad\t20", "surgery\t1"))
with self.cached_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
key_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,
value_column_index=0)
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
self.evaluate(features))
def test_index_to_string_table_from_multicolumn_file_custom_delimiter(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain 300", "salad 20", "surgery 1"))
with self.cached_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
key_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,
value_column_index=0,
delimiter=" ")
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
self.evaluate(features))
def test_index_to_string_table_with_default_value(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.cached_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"salad", b"surgery", default_value),
self.evaluate(features))
def test_index_to_string_table_with_vocab_size_too_small(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.cached_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=2,
default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"salad", default_value, default_value),
self.evaluate(features))
def test_index_to_string_table_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size"):
_ = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
self.evaluate(lookup_ops.tables_initializer())
def test_index_to_string_table_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.cached_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"salad", b"surgery", b"UNK"),
self.evaluate(features))
class IndexToStringTableFromTensorTest(test.TestCase):
def test_index_to_string_table_from_tensor(self):
with self.cached_session():
vocabulary_list = constant_op.constant(["brain", "salad", "surgery"])
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=vocabulary_list)
indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)
features = table.lookup(indices)
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
self.evaluate(features))
def test_duplicate_entries(self):
with self.cached_session():
vocabulary_list = constant_op.constant(["hello", "hello"])
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=vocabulary_list)
indices = constant_op.constant([0, 1, 4], dtypes.int64)
features = table.lookup(indices)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"hello", b"hello", b"UNK"), self.evaluate(features))
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.cached_session():
vocabulary_list = constant_op.constant(["brain", "salad", "surgery"])
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=vocabulary_list, default_value=default_value)
indices = constant_op.constant([1, 2, 4], dtypes.int64)
features = table.lookup(indices)
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"salad", b"surgery", default_value),
self.evaluate(features))
class IdTableWithHashBucketsTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
@test_util.run_deprecated_v1
def testStringIdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value),
oov_buckets)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, table.size().eval())
@test_util.run_deprecated_v1
def testInt32IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt", ("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value),
oov_buckets,
key_dtype=dtypes.int32)
table.initializer.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, table.size().eval())
@test_util.run_deprecated_v1
def testInt64IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_3.txt", ("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value), oov_buckets)
table.initializer.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, table.size().eval())
@test_util.run_deprecated_v1
def testStringIdTableWithOnlyHashBucket(self):
with self.cached_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup_ops.IdTableWithHashBuckets(None, oov_buckets)
table.initializer.run()
values = constant_op.constant(("brain", "salad", "surgery"))
out = table.lookup(values)
self.assertAllEqual(
[
3, # fingerprint("brain") mod 5.
1, # fingerprint("salad") mod 5.
4 # fingerprint("surgery") mod 5
],
self.evaluate(out))
self.assertEqual(oov_buckets, table.size().eval())
@test_util.run_deprecated_v1
def testInt32IdTableWithOnlyHashBucket(self):
with self.cached_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup_ops.IdTableWithHashBuckets(
None, oov_buckets, key_dtype=dtypes.int32)
table.initializer.run()
input_string = constant_op.constant([42, 1, -1000], dtype=dtypes.int32)
out = table.lookup(input_string)
self.assertAllEqual(
[
1, # fingerprint("42") mod 5.
4, # fingerprint("1") mod 5.
2 # fingerprint("-1000") mod 5
],
self.evaluate(out))
self.assertEqual(oov_buckets, table.size().eval())
def testFloat64IdTableWithOnlyHashBucket(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup_ops.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.float64)
def testBoolIdTableWithOnlyHashBucket(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup_ops.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.bool)
@test_util.run_deprecated_v1
def testIdTableWithHashBucketsWithMultipleInitializers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.cached_session() as sess:
default_value = -1
vocab_size = 3
oov_buckets = 3
vocab_table = lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value)
table1 = lookup_ops.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup_ops.FastHashSpec,
name="table1")
table2 = lookup_ops.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup_ops.StrongHashSpec((1, 2)),
name="table2")
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(
["fruit", "brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string)
out2 = table2.lookup(input_string)
out1, out2 = self.evaluate([out1, out2])
self.assertAllEqual([5, 0, 1, 2, 5], out1)
self.assertAllEqual([5, 0, 1, 2, 3], out2)
self.assertEqual(vocab_size + oov_buckets, table1.size().eval())
self.assertEqual(vocab_size + oov_buckets, table2.size().eval())
test_util.assert_ops_in_graph({
"table1_Lookup/hash_bucket": "StringToHashBucketFast",
"table2_Lookup/hash_bucket": "StringToHashBucketStrong",
}, sess.graph)
@test_util.run_deprecated_v1
def testIdTableWithHashBucketsInitializationAcrossSessions(self):
vocab_file = self._createVocabFile("feat_to_id_5.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table1 = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value),
oov_buckets)
table1.initializer.run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string_1)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out1))
self.assertEqual(vocab_size + oov_buckets, table1.size().eval())
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
# Underlying lookup table already initialized in previous session.
# No need to call table2.initializer.run()
table2 = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value),
oov_buckets)
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out2 = table2.lookup(input_string_2)
self.assertAllEqual([3, 1, 3], self.evaluate(out2))
self.assertEqual(vocab_size + oov_buckets, table2.size().eval())
@test_util.run_deprecated_v1
def testIdTableWithHashBucketsWithMultipleInitializersDifferentDefault(self):
vocab_file = self._createVocabFile("feat_to_id_6.txt")
with self.cached_session() as sess:
default_value1 = -1
vocab_size = 3
oov_buckets = 0
table1 = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value1),
oov_buckets)
default_value2 = -2
table2 = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value2),
oov_buckets)
lookup_ops.tables_initializer().run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out1 = table1.lookup(input_string_1)
out2 = table2.lookup(input_string_2)
out1, out2 = self.evaluate([out1, out2])
self.assertAllEqual([0, 1, 2, -1], out1)
self.assertAllEqual([-2, 1, -2], out2)
self.assertEqual(vocab_size + oov_buckets, table1.size().eval())
self.assertEqual(vocab_size + oov_buckets, table2.size().eval())
@test_util.run_deprecated_v1
def testSparseTensor(self):
vocab_file = self._createVocabFile("feat_to_id_7.txt")
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "brain", "surgery", "tarkus"],
dtypes.string),
constant_op.constant(input_shape, dtypes.int64))
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(vocab_file, vocab_size=3),
-1), 1)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
@test_util.run_deprecated_v1
def testInt32SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),
constant_op.constant(input_shape, dtypes.int64))
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),
1,
key_dtype=dtypes.int32)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
@test_util.run_deprecated_v1
def testInt64SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),
constant_op.constant(input_shape, dtypes.int64))
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.StaticHashTable(
lookup_ops.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),
1,
key_dtype=dtypes.int64)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testIdTableWithHashBucketsWithInvalidHashers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
lookup_table = lookup_ops.StaticHashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value)
with self.assertRaises(TypeError):
lookup_ops.IdTableWithHashBuckets(
lookup_table, oov_buckets, hasher_spec=1)
table = lookup_ops.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None))
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
with self.assertRaises(ValueError):
table.lookup(input_string)
with self.assertRaises(ValueError):
table = lookup_ops.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup_ops.StrongHashSpec([]))
with self.assertRaises(ValueError):
table = lookup_ops.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup_ops.StrongHashSpec([1, 2, 3]))
with self.assertRaises(TypeError):
table = lookup_ops.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup_ops.StrongHashSpec([None, 2]))
def testIdTableWithHashBucketsNoInnerTable(self):
with self.cached_session():
table = lookup_ops.IdTableWithHashBuckets(None, num_oov_buckets=1)
self.assertIsNone(table.resource_handle)
class MutableHashTableOpTest(test.TestCase):
def testMutableHashTable(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery", "tarkus"])
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant(["tarkus", "tank"])
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
exported_keys, exported_values = table.export()
# exported data is in the order of the internal map, i.e. undefined
sorted_keys = np.sort(self.evaluate(exported_keys))
sorted_values = np.sort(self.evaluate(exported_values))
self.assertAllEqual([b"brain", b"salad", b"surgery"], sorted_keys)
self.assertAllEqual([0, 1, 2], sorted_values)
@test_util.run_v1_only("SaverV1")
def testSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
default_val = -1
keys = constant_op.constant(["b", "c", "d"], dtypes.string)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
save = saver.Saver()
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
val = save.save(sess, save_path)
self.assertIsInstance(val, six.string_types)
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
default_val = -1
table = lookup_ops.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
self.evaluate(
table.insert(
constant_op.constant(["a", "c"], dtypes.string),
constant_op.constant([12, 24], dtypes.int64)))
self.assertAllEqual(2, self.evaluate(table.size()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["a", "b", "c", "d", "e"],
dtypes.string)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], self.evaluate(output))
@test_util.run_v1_only("SaverV1")
def testSaveRestoreOnlyTable(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
default_val = -1
keys = constant_op.constant(["b", "c", "d"], dtypes.string)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
save = saver.Saver([table])
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
val = save.save(sess, save_path)
self.assertIsInstance(val, six.string_types)
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
default_val = -1
table = lookup_ops.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
self.evaluate(
table.insert(
constant_op.constant(["a", "c"], dtypes.string),
constant_op.constant([12, 24], dtypes.int64)))
self.assertAllEqual(2, self.evaluate(table.size()))
save = saver.Saver([table])
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["a", "b", "c", "d", "e"],
dtypes.string)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], self.evaluate(output))
@test_util.run_in_graph_and_eager_modes
def testObjectSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_prefix = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
default_val = -1
keys = constant_op.constant(["b", "c", "d"], dtypes.string)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
checkpoint = trackable.Checkpoint(table=table, v0=v0, v1=v1)
self.evaluate([v0.initializer, v1.initializer])
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
save_path = checkpoint.save(save_prefix)
del table, checkpoint, v0, v1
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
default_val = -1
table = lookup_ops.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
self.evaluate(
table.insert(
constant_op.constant(["a", "c"], dtypes.string),
constant_op.constant([12, 24], dtypes.int64)))
self.assertAllEqual(2, self.evaluate(table.size()))
checkpoint = trackable.Checkpoint(table=table, v0=v0, v1=v1)
# Restore the saved values in the parameter nodes.
checkpoint.restore(save_path).run_restore_ops()
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["a", "b", "c", "d", "e"],
dtypes.string)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], self.evaluate(output))
@test_util.run_v1_only("Multiple sessions")
def testSharing(self):
# Start a server to store the table state
server = server_lib.Server({"local0": ["localhost:0"]},
protocol="grpc",
start=True)
# Create two sessions sharing the same state
session1 = session.Session(server.target)
session2 = session.Session(server.target)
table = lookup_ops.MutableHashTable(
dtypes.int64, dtypes.string, "-", name="t1")
# Populate the table in the first session
with session1:
self.assertAllEqual(0, table.size().eval())
keys = constant_op.constant([11, 12], dtypes.int64)
values = constant_op.constant(["a", "b"])
table.insert(keys, values).run()
self.assertAllEqual(2, table.size().eval())
output = table.lookup(constant_op.constant([11, 12, 13], dtypes.int64))
self.assertAllEqual([b"a", b"b", b"-"], output.eval())
# Verify that we can access the shared data from the second session
with session2:
self.assertAllEqual(2, table.size().eval())
output = table.lookup(constant_op.constant([10, 11, 12], dtypes.int64))
self.assertAllEqual([b"-", b"a", b"b"], output.eval())
def testMutableHashTableOfTensors(self):
with self.cached_session():
default_val = constant_op.constant([-1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery", "tarkus"])
values = constant_op.constant([[0, 1], [2, 3], [4, 5], [6, 7]],
dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant(["tarkus", "tank"])
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3, 2], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([[0, 1], [2, 3], [-1, -1]], result)
exported_keys, exported_values = table.export()
# exported data is in the order of the internal map, i.e. undefined
sorted_keys = np.sort(self.evaluate(exported_keys))
sorted_values = np.sort(self.evaluate(exported_values), axis=0)
self.assertAllEqual([b"brain", b"salad", b"surgery"], sorted_keys)
sorted_expected_values = np.sort([[4, 5], [2, 3], [0, 1]], axis=0)
self.assertAllEqual(sorted_expected_values, sorted_values)
def testMutableHashTableExportInsert(self):
with self.cached_session():
default_val = constant_op.constant([-1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64)
table1 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, self.evaluate(table1.size()))
self.evaluate(table1.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table1.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
expected_output = [[0, 1], [2, 3], [-1, -1]]
output1 = table1.lookup(input_string)
self.assertAllEqual(expected_output, self.evaluate(output1))
exported_keys, exported_values = table1.export()
self.assertAllEqual(3, self.evaluate(exported_keys).size)
self.assertAllEqual(6, self.evaluate(exported_values).size)
# Populate a second table from the exported data
table2 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, self.evaluate(table2.size()))
self.evaluate(table2.insert(exported_keys, exported_values))
self.assertAllEqual(3, self.evaluate(table2.size()))
# Verify lookup result is still the same
output2 = table2.lookup(input_string)
self.assertAllEqual(expected_output, self.evaluate(output2))
def testMutableHashTableOfTensorsInvalidShape(self):
with self.cached_session():
default_val = constant_op.constant([-1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
# Shape [6] instead of [3, 2]
values = constant_op.constant([0, 1, 2, 3, 4, 5], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
self.evaluate(table.insert(keys, values))
# Shape [2,3] instead of [3, 2]
values = constant_op.constant([[0, 1, 2], [3, 4, 5]], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
self.evaluate(table.insert(keys, values))
# Shape [2, 2] instead of [3, 2]
values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
self.evaluate(table.insert(keys, values))
# Shape [3, 1] instead of [3, 2]
values = constant_op.constant([[0], [2], [4]], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
self.evaluate(table.insert(keys, values))
# Valid Insert
values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
def testMutableHashTableInvalidDefaultValue(self):
with self.cached_session():
default_val = constant_op.constant([[-1, -1]], dtypes.int64)
with self.assertRaisesOpError("Default value must be a vector"):
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
def testMutableHashTableDuplicateInsert(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery", "brain"])
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([3, 1, -1], result)
def testMutableHashTableFindHighRank(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([["brain", "salad"],
["tank", "tarkus"]])
output = table.lookup(input_string)
self.assertAllEqual([2, 2], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testMutableHashTableInsertHighRank(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant([["brain", "salad"], ["surgery", "tank"]])
values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank", "tarkus"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, 3, -1], result)
def testMutableHashTableRemoveHighRank(self):
with self.test_session():
default_val = -1
keys = constant_op.constant([["brain", "salad"], ["surgery", "tank"]])
values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(4, self.evaluate(table.size()))
remove_string = constant_op.constant(["salad", "tarkus"])
self.evaluate(table.remove(remove_string))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank", "tarkus"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, -1, 3, -1], result)
def testMutableHashTableOfTensorsFindHighRank(self):
with self.cached_session():
default_val = constant_op.constant([-1, -1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([[0, 1, 2], [2, 3, 4], [4, 5, 6]],
dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([["brain", "salad"],
["tank", "tarkus"]])
output = table.lookup(input_string)
self.assertAllEqual([2, 2, 3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual(
[[[0, 1, 2], [2, 3, 4]], [[-1, -1, -1], [-1, -1, -1]]], result)
def testMutableHashTableOfTensorsRemoveHighRank(self):
with self.test_session():
default_val = constant_op.constant([-1, -1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([[0, 1, 2], [2, 3, 4], [4, 5, 6]],
dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
remove_string = constant_op.constant([["brain", "tank"]])
self.evaluate(table.remove(remove_string))
self.assertAllEqual(2, self.evaluate(table.size()))
input_string = constant_op.constant([["brain", "salad"],
["surgery", "tank"]])
output = table.lookup(input_string)
self.assertAllEqual([2, 2, 3], output.get_shape())
result = self.evaluate(output)
self.assertAllEqual(
[[[-1, -1, -1], [2, 3, 4]], [[4, 5, 6], [-1, -1, -1]]], result)
def testMultipleMutableHashTables(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table2 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table3 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table1.insert(keys, values))
self.evaluate(table2.insert(keys, values))
self.evaluate(table3.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table1.size()))
self.assertAllEqual(3, self.evaluate(table2.size()))
self.assertAllEqual(3, self.evaluate(table3.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = self.evaluate([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testMutableHashTableWithTensorDefault(self):
with self.cached_session():
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testSignatureMismatch(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
# insert with keys of the wrong type
with self.assertRaises(ValueError):
self.evaluate(table.insert(constant_op.constant([4, 5, 6]), values))
# insert with values of the wrong type
with self.assertRaises(ValueError):
self.evaluate(table.insert(keys, constant_op.constant(["a", "b", "c"])))
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string_ref = variables.Variable("brain")
input_int64_ref = variables.Variable(-1, dtype=dtypes.int64)
self.evaluate(variables.global_variables_initializer())
# Ref types do not produce an insert signature mismatch.
self.evaluate(table.insert(input_string_ref, input_int64_ref))
self.assertAllEqual(3, self.evaluate(table.size()))
# Ref types do not produce a lookup signature mismatch.
self.assertEqual(-1, self.evaluate(table.lookup(input_string_ref)))
# lookup with keys of the wrong type
input_string = constant_op.constant([1, 2, 3], dtypes.int64)
with self.assertRaises(ValueError):
self.evaluate(table.lookup(input_string))
# default value of the wrong type
with self.assertRaises(TypeError):
lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, "UNK")
def testMutableHashTableStringFloat(self):
with self.cached_session():
default_val = -1.5
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1.1, 2.2], dtypes.float32)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.float32,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllClose([0, 1.1, default_val], result)
def testMutableHashTableIntFloat(self):
with self.cached_session():
default_val = -1.0
keys = constant_op.constant([3, 7, 0], dtypes.int64)
values = constant_op.constant([7.5, -1.2, 9.9], dtypes.float32)
table = lookup_ops.MutableHashTable(dtypes.int64, dtypes.float32,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([7, 0, 11], dtypes.int64)
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllClose([-1.2, 9.9, default_val], result)
def testMutableHashTableInt64String(self):
with self.cached_session():
default_val = "n/a"
keys = constant_op.constant([0, 1, 2], dtypes.int64)
values = constant_op.constant(["brain", "salad", "surgery"])
table = lookup_ops.MutableHashTable(dtypes.int64, dtypes.string,
default_val)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant([0, 1, 3], dtypes.int64)
output = table.lookup(input_string)
result = self.evaluate(output)
self.assertAllEqual((b"brain", b"salad", b"n/a"), result)
class MutableHashTableBenchmark(test.Benchmark):
def _create_table(self):
return lookup_ops.MutableHashTable(dtypes.int64, dtypes.float32, 0.0)
def benchmark_single_repeated_scalar_insert_scalar(self):
table = self._create_table()
value = variables.Variable(1.0)
insert = table.insert(0, value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=10000)
assert sess.run(size) == 1
def benchmark_many_repeated_scalar_insert_scalar(self):
table = self._create_table()
c = dataset_ops.make_one_shot_iterator(counter.Counter()).get_next()
value = variables.Variable(1.0)
insert = table.insert(c, value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=10000)
assert sess.run(size) >= 10000
def benchmark_single_repeated_batch_32_insert_scalar(self):
table = self._create_table()
value = variables.Variable([1.0] * 32)
insert = table.insert(list(range(32)), value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=1000)
assert sess.run(size) == 32
def benchmark_many_repeated_batch_32_insert_scalar(self):
table = self._create_table()
c = dataset_ops.make_one_shot_iterator(counter.Counter()).get_next()
value = variables.Variable([1.0] * 32)
insert = table.insert(32 * c + list(range(32)), value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=1000)
assert sess.run(size) >= 1000 * 32
class DenseHashTableBenchmark(MutableHashTableBenchmark):
def _create_table(self):
return lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.float32,
default_value=0.0,
empty_key=-1,
deleted_key=-2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/lookup_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.argmax_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class GradientCorrectnessTest(test.TestCase):
@test_util.run_deprecated_v1
def testMultipleOutputChainedGradients(self):
with self.cached_session() as sess:
x = constant_op.constant(1.0, dtype=dtypes.float32)
yexp = math_ops.exp(x)
yexplog = math_ops.log(yexp)
grads = gradients_impl.gradients([yexp, yexplog], [x])
grad_vals = self.evaluate(grads)
exp1_plus_one = (1.0 + np.exp(1.0)).astype(np.float32)
# [dexp(x)/dx + d(log(exp(x)))/dx] @ x=1 == exp(1) + 1
self.assertAllClose(grad_vals[0], exp1_plus_one)
@test_util.run_deprecated_v1
def testIdentityGradient(self):
x = constant_op.constant(3.)
dx_dx, = gradients_impl.gradients(x, x)
with self.cached_session() as sess:
self.assertAllClose(1., self.evaluate(dx_dx))
@test_util.run_deprecated_v1
def testIntegerIdentityGradient(self):
x = constant_op.constant(3)
dx_dx, = gradients_impl.gradients(x, x)
with self.cached_session() as sess:
self.assertAllClose(1, self.evaluate(dx_dx))
@test_util.run_deprecated_v1
def testGradientWithIntegerPath(self):
x = constant_op.constant([3.9, 4.1])
k = math_ops.cast(math_ops.cast(x, dtypes.int32), dtypes.float32)
y = x * k
dy_dx, = gradients_impl.gradients(y, x)
with self.cached_session() as sess:
self.assertAllClose([3., 4.], self.evaluate(dy_dx))
@test_util.run_deprecated_v1
def testNoIntegerGradient1(self):
x = constant_op.constant([3.9, 4.1])
k = math_ops.cast(math_ops.cast(x, dtypes.int32), dtypes.float32)
y = k * k
dy_dx, = gradients_impl.gradients(y, x)
self.assertIsNone(dy_dx)
@test_util.run_deprecated_v1
def testNoIntegerGradient2(self):
k = constant_op.constant([3, 4])
x = math_ops.cast(k, dtypes.float32)
y = x * x
dy_dk, = gradients_impl.gradients(y, k)
self.assertIsNone(dy_dk)
@test_util.run_deprecated_v1
def testNoIntegerGradient3(self):
k = constant_op.constant([3, 4])
m = k * k
dm_dk, = gradients_impl.gradients(m, k)
self.assertIsNone(dm_dk)
@test_util.run_deprecated_v1
def testNoIntegerGradient4(self):
k = constant_op.constant([3, 4])
m = k * k * k
dm_dk, = gradients_impl.gradients(m, k)
self.assertIsNone(dm_dk)
@test_util.run_deprecated_v1
def testNoIntegerGradient5(self):
k = constant_op.constant([3, 4])
m = k * k
n = m * m
dn_dk, = gradients_impl.gradients(n, k)
self.assertIsNone(dn_dk)
@test_util.run_deprecated_v1
def testNoIntegerGradient6(self):
k = constant_op.constant(3)
x = math_ops.cast(k, dtypes.float32)
grad_1, = gradients_impl.gradients(k * k, k)
grad_2, = gradients_impl.gradients(x * x, k)
grad_3, = gradients_impl.gradients(math_ops.square(k), k)
grad_4, = gradients_impl.gradients(math_ops.square(x), k)
self.assertIsNone(grad_1)
self.assertIsNone(grad_2)
self.assertIsNone(grad_3)
self.assertIsNone(grad_4)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/gradient_correctness_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for broadcast_to ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test as test_lib
class BroadcastToTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBroadcastToBasic(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]:
with self.session(use_gpu=True):
x = np.array([1, 2, 3], dtype=dtype)
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
@test_util.run_deprecated_v1
def testBroadcastToString(self):
with self.session(use_gpu=True):
x = np.array([b"1", b"2", b"3"])
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
@test_util.run_deprecated_v1
def testBroadcastToBool(self):
with self.session(use_gpu=True):
x = np.array([True, False, True], dtype=np.bool)
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
@test_util.run_deprecated_v1
def testBroadcastToShape(self):
for input_dim in range(1, 6):
for output_dim in range(input_dim, 6):
with self.cached_session(use_gpu=True):
input_shape = [2] * input_dim
output_shape = [2] * output_dim
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
v_np = np.broadcast_to(x, output_shape)
self.assertAllEqual(v_tf.eval(), v_np)
@test_util.run_deprecated_v1
def testBroadcastToShapeInnerDim(self):
input_shape = [2, 1, 3]
output_shape = [2, 5, 3]
with self.cached_session(use_gpu=True):
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
v_np = np.broadcast_to(x, output_shape)
self.assertAllEqual(v_tf.eval(), v_np)
@test_util.run_deprecated_v1
def testBroadcastToShapeLargerDim(self):
input_shape = [2, 1, 3, 2, 2, 2]
output_shape = [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 15, 3, 2, 2, 2]
with self.cached_session(use_gpu=True):
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
v_np = np.broadcast_to(x, output_shape)
self.assertAllEqual(v_tf.eval(), v_np)
@test_util.run_deprecated_v1
def testBroadcastToShapeLargerDim2(self):
input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1]
output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3]
with self.cached_session(use_gpu=True):
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
v_np = np.broadcast_to(x, output_shape)
self.assertAllEqual(v_tf.eval(), v_np)
@test_util.run_deprecated_v1
def testBroadcastToScalar(self):
with self.session(use_gpu=True):
x = np.array(1, dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
@test_util.run_deprecated_v1
def testBroadcastScalarToNonScalar(self):
with self.session(use_gpu=True):
x = np.array(1.0, dtype=np.float)
v_tf = array_ops.broadcast_to(constant_op.constant(1.0), [2, 3, 4,
1, 1, 1])
v_np = np.broadcast_to(x, [2, 3, 4, 1, 1, 1])
self.assertAllEqual(v_tf.eval(), v_np)
@test_util.run_deprecated_v1
def testBroadcastToShapeTypeAndInference(self):
for dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(use_gpu=True):
x = np.array([1, 2, 3])
v_tf = array_ops.broadcast_to(
constant_op.constant(x),
constant_op.constant([3, 3], dtype=dtype))
shape = v_tf.get_shape().as_list()
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
# check shape inference when shape input is constant
self.assertAllEqual(shape, v_np.shape)
def testBroadcastToBadOutputShape(self):
with context.eager_mode():
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Unable to broadcast tensor of shape"):
self.evaluate(
array_ops.broadcast_to(
constant_op.constant([0, 1]), constant_op.constant([2, 1])))
@test_util.run_deprecated_v1
def testGradientForScalar(self):
x = constant_op.constant(1, dtype=dtypes.float32)
v = array_ops.broadcast_to(x, [2, 4, 3])
out = 2 * v
with self.cached_session():
err = gradient_checker.compute_gradient_error(x, x.get_shape(), out,
out.get_shape())
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithSameRank(self):
x = constant_op.constant(np.reshape(np.arange(6), (2, 1, 3)),
dtype=dtypes.float32)
v = array_ops.broadcast_to(x, [2, 5, 3])
out = 2 * v
with self.cached_session():
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
out, out.get_shape())
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithIncreasingRank(self):
x = constant_op.constant([[1], [2]],
dtype=dtypes.float32)
v = array_ops.broadcast_to(x, [5, 2, 3])
out = 2 * v
with self.cached_session():
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
out, out.get_shape())
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithBroadcastAllDimensions(self):
x = constant_op.constant([1], dtype=dtypes.float32)
v = array_ops.broadcast_to(x, [5, 2, 3])
out = 2 * v
with self.cached_session():
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
out, out.get_shape())
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithLargeDim(self):
input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1]
output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3]
x = constant_op.constant(np.array(np.random.randn(*input_shape),
dtype=np.float32))
v = array_ops.broadcast_to(x, output_shape)
out = 2 * v
with self.cached_session():
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
out, out.get_shape())
self.assertLess(err, 1e-4)
if __name__ == "__main__":
test_lib.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/broadcast_to_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ReduceJoin op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def _input_array(num_dims):
"""Creates an ndarray where each element is the binary of its linear index.
Args:
num_dims: The number of dimensions to create.
Returns:
An ndarray of shape [2] * num_dims.
"""
formatter = "{:0%db}" % num_dims
strings = [formatter.format(i) for i in xrange(2**num_dims)]
return np.array(strings, dtype="S%d" % num_dims).reshape([2] * num_dims)
def _joined_array(num_dims, reduce_dim):
"""Creates an ndarray with the result from reduce_join on input_array.
Args:
num_dims: The number of dimensions of the original input array.
reduce_dim: The dimension to reduce.
Returns:
An ndarray of shape [2] * (num_dims - 1).
"""
formatter = "{:0%db}" % (num_dims - 1)
result = np.zeros(shape=[2] * (num_dims - 1), dtype="S%d" % (2 * num_dims))
flat = result.ravel()
for i in xrange(2**(num_dims - 1)):
dims = formatter.format(i)
flat[i] = "".join([(dims[:reduce_dim] + "%d" + dims[reduce_dim:]) % j
for j in xrange(2)])
return result
class UnicodeTestCase(test.TestCase):
"""Test case with Python3-compatible string comparator."""
def assertAllEqualUnicode(self, truth, actual):
self.assertAllEqual(
np.array(truth).astype("U"), np.array(actual).astype("U"))
class ReduceJoinTestHelperTest(UnicodeTestCase):
"""Tests for helper functions."""
def testInputArray(self):
num_dims = 3
truth = ["{:03b}".format(i) for i in xrange(2**num_dims)]
output_array = _input_array(num_dims).reshape([-1])
self.assertAllEqualUnicode(truth, output_array)
def testJoinedArray(self):
num_dims = 3
truth_dim_zero = [["000100", "001101"], ["010110", "011111"]]
truth_dim_one = [["000010", "001011"], ["100110", "101111"]]
truth_dim_two = [["000001", "010011"], ["100101", "110111"]]
output_array_dim_zero = _joined_array(num_dims, reduce_dim=0)
output_array_dim_one = _joined_array(num_dims, reduce_dim=1)
output_array_dim_two = _joined_array(num_dims, reduce_dim=2)
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqualUnicode(truth_dim_two, output_array_dim_two)
class ReduceJoinTest(UnicodeTestCase):
def _testReduceJoin(self,
input_array,
truth,
truth_shape,
axis,
keep_dims=False,
separator=""):
"""Compares the output of reduce_join to an expected result.
Args:
input_array: The string input to be joined.
truth: An array or np.array of the expected result.
truth_shape: An array or np.array of the expected shape.
axis: The indices to reduce over.
keep_dims: Whether or not to retain reduced dimensions.
separator: The separator to use for joining.
"""
with self.cached_session():
output = string_ops.reduce_join(
inputs=input_array,
axis=axis,
keep_dims=keep_dims,
separator=separator)
output_array = self.evaluate(output)
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, output.get_shape())
def _testMultipleReduceJoin(self, input_array, axis, separator=" "):
"""Tests reduce_join for one input and multiple axes.
Does so by comparing the output to that from nested reduce_string_joins.
The correctness of single-dimension reduce_join is verified by other
tests below using _testReduceJoin.
Args:
input_array: The input to test.
axis: The indices to reduce.
separator: The separator to use when joining.
"""
with self.cached_session():
output = string_ops.reduce_join(
inputs=input_array, axis=axis, keep_dims=False, separator=separator)
output_keep_dims = string_ops.reduce_join(
inputs=input_array, axis=axis, keep_dims=True, separator=separator)
truth = input_array
for index in axis:
truth = string_ops.reduce_join(
inputs=truth, axis=index, keep_dims=True, separator=separator)
if not axis:
truth = constant_op.constant(truth)
truth_squeezed = array_ops.squeeze(truth, axis=axis)
output_array = self.evaluate(output)
output_keep_dims_array = self.evaluate(output_keep_dims)
truth_array = self.evaluate(truth)
truth_squeezed_array = self.evaluate(truth_squeezed)
self.assertAllEqualUnicode(truth_array, output_keep_dims_array)
self.assertAllEqualUnicode(truth_squeezed_array, output_array)
self.assertAllEqual(truth.get_shape(), output_keep_dims.get_shape())
self.assertAllEqual(truth_squeezed.get_shape(), output.get_shape())
def testRankOne(self):
input_array = ["this", "is", "a", "test"]
truth = "thisisatest"
truth_shape = []
self._testReduceJoin(input_array, truth, truth_shape, axis=0)
def testRankTwo(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array, truth_dim_zero, truth_shape_dim_zero, axis=0)
self._testReduceJoin(
input_array, truth_dim_one, truth_shape_dim_one, axis=1)
expected_val = "thisisatestpleasedonotpanic"
expected_shape = []
self._testReduceJoin(input_array, expected_val, expected_shape, axis=None)
# Using axis=[] is a no-op.
expected_val = input_array
expected_shape = [2, 4]
self._testReduceJoin(input_array, expected_val, expected_shape, axis=[])
def testRankFive(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(input_array, truths[i], truth_shape, axis=i)
def testNegative(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(input_array, truths[i], truth_shape, axis=i - 5)
def testSingletonDimension(self):
input_arrays = [
_input_array(num_dims=5).reshape([2] * i + [1] + [2] * (5 - i))
for i in xrange(6)
]
truth = _input_array(num_dims=5)
truth_shape = [2] * 5
for i in xrange(6):
self._testReduceJoin(input_arrays[i], truth, truth_shape, axis=i)
def testSeparator(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["this please", "is do", "a not", "test panic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["this is a test", "please do not panic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
axis=0,
separator=" ")
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
axis=1,
separator=" ")
@test_util.run_deprecated_v1
def testUnknownShape(self):
input_array = [["a"], ["b"]]
truth = ["ab"]
truth_shape = None
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
reduced = string_ops.reduce_join(placeholder, axis=0)
output_array = reduced.eval(feed_dict={placeholder.name: input_array})
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, reduced.get_shape())
@test_util.run_deprecated_v1
def testUnknownIndices(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape = None
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(input_array, axis=placeholder)
output_array_dim_zero = reduced.eval(feed_dict={placeholder.name: [0]})
output_array_dim_one = reduced.eval(feed_dict={placeholder.name: [1]})
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqual(truth_shape, reduced.get_shape())
def testKeepDims(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = [["thisplease", "isdo", "anot", "testpanic"]]
truth_shape_dim_zero = [1, 4]
truth_dim_one = [["thisisatest"], ["pleasedonotpanic"]]
truth_shape_dim_one = [2, 1]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
axis=0,
keep_dims=True)
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
axis=1,
keep_dims=True)
expected_val = [["thisisatestpleasedonotpanic"]]
expected_shape = [1, 1]
self._testReduceJoin(
constant_op.constant(input_array), expected_val, expected_shape,
keep_dims=True, axis=None)
# Using axis=[] is a no-op.
expected_val = input_array
expected_shape = [2, 4]
self._testReduceJoin(
input_array, expected_val, expected_shape, keep_dims=True, axis=[])
def testMultiIndex(self):
num_dims = 3
input_array = _input_array(num_dims=num_dims)
# Also tests [].
for i in xrange(num_dims + 1):
for permutation in itertools.permutations(xrange(num_dims), i):
self._testMultipleReduceJoin(input_array, axis=permutation)
@test_util.run_deprecated_v1
def testInvalidReductionIndices(self):
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "Invalid reduction dim"):
string_ops.reduce_join(inputs="", axis=0)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], axis=-3)
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], axis=2)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], axis=[0, -3])
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], axis=[0, 2])
def testZeroDims(self):
with self.cached_session():
inputs = np.zeros([0, 1], dtype=str)
# Reduction that drops the dim of size 0.
output = string_ops.reduce_join(inputs=inputs, axis=0)
self.assertAllEqualUnicode([""], self.evaluate(output))
# Reduction that keeps the dim of size 0.
output = string_ops.reduce_join(inputs=inputs, axis=1)
output_shape = self.evaluate(output).shape
self.assertAllEqual([0], output_shape)
@test_util.run_deprecated_v1
def testInvalidArgsUnknownShape(self):
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
index_too_high = string_ops.reduce_join(placeholder, axis=1)
duplicate_index = string_ops.reduce_join(placeholder, axis=[-1, 1])
with self.assertRaisesOpError("Invalid reduction dimension 1"):
index_too_high.eval(feed_dict={placeholder.name: [""]})
with self.assertRaisesOpError("Duplicate reduction dimension 1"):
duplicate_index.eval(feed_dict={placeholder.name: [[""]]})
@test_util.run_deprecated_v1
def testInvalidArgsUnknownIndices(self):
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(["test", "test2"], axis=placeholder)
with self.assertRaisesOpError("reduction dimension -2"):
reduced.eval(feed_dict={placeholder.name: -2})
with self.assertRaisesOpError("reduction dimension 2"):
reduced.eval(feed_dict={placeholder.name: 2})
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/reduce_join_op_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.