python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DenseLayer JIT compilation on the CPU and GPU devices."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.contrib.compiler import jit
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
jit_scope = jit.experimental_jit_scope
def GetRunMetadataLabels(run_metadata):
"""Returns all labels in run_metadata."""
labels = []
for dev_stats in run_metadata.step_stats.dev_stats:
for node_stats in dev_stats.node_stats:
labels.append(node_stats.timeline_label)
return labels
def InLabels(labels, substr):
"""Returns true iff one of the labels contains substr."""
return any(substr in x for x in labels)
class DenseLayerTest(test.TestCase):
def countXlaOps(self, labels):
"""Count how many XlaCompile/XlaRun labels are present."""
xla_compile_count = sum("XlaCompile(" in x for x in labels)
xla_run_count = sum("XlaRun(" in x for x in labels)
self.assertEqual(xla_compile_count, xla_run_count)
return xla_run_count
def testDenseLayerAutoJit(self):
"""Tests dense layer compilation in auto-jit mode.
Dense layer should be compiled into a single XlaCompile/XlaRun op pair in
auto-jit mode.
"""
os.environ["TF_XLA_FLAGS"] = (
"--tf_xla_cpu_global_jit " + os.environ.get("TF_XLA_FLAGS", ""))
config = config_pb2.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = (
config_pb2.OptimizerOptions.ON_1)
with self.session(config=config) as sess:
x = array_ops.placeholder(shape=[None, None, 3], dtype=np.float32)
y = layers.dense(x, 3)
self.evaluate(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
test_utils.RunWithWarmup(
sess,
y, {x: np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
labels = GetRunMetadataLabels(run_metadata)
self.assertEqual(1, self.countXlaOps(labels))
self.assertFalse(InLabels(labels, "MatMult"))
def testDenseLayerJitScopeDefinedShape(self):
"""Tests that the dense layer node is properly compiled in jit scope.
Dense layer with static shape input tensor should be compiled into a single
XlaCompile/XlaRun op pair by XLA.
"""
with self.session() as sess:
x = array_ops.placeholder(shape=[2, 2, 3], dtype=np.float32)
with jit_scope():
y = layers.dense(x, 3)
self.evaluate(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
test_utils.RunWithWarmup(
sess,
y, {x: np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
labels = GetRunMetadataLabels(run_metadata)
self.assertEqual(1, self.countXlaOps(labels))
# No need to check whether ListDiff is compiled or not because ListDiff op
# is not used when input tensor shape is fully defined.
def testDenseLayerJitScopeUndefinedShape(self):
"""Tests that the dense layer node is properly compiled in jit scope.
"""
with self.session() as sess:
x = array_ops.placeholder(shape=[None, None, 3], dtype=np.float32)
with jit_scope():
y = layers.dense(x, 3)
self.evaluate(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
test_utils.RunWithWarmup(
sess,
y, {x: np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
labels = GetRunMetadataLabels(run_metadata)
self.assertEqual(1, self.countXlaOps(labels))
self.assertFalse(InLabels(labels, "MatMult"))
if __name__ == "__main__":
os.environ["TF_XLA_FLAGS"] = ("--tf_xla_enable_lazy_compilation=true " +
os.environ.get("TF_XLA_FLAGS", ""))
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/dense_layer_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AdagradDA optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad_da
class AdagradDAOptimizerTest(xla_test.XLATestCase):
def testAdagradDAWithoutRegularizationBasic1(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
global_step = resource_variable_ops.ResourceVariable(
0, dtype=dtypes.int64)
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run a step of AdagradDA
update.run()
# Let g to be gradient accumulator, gg to be gradient squared
# accumulator, T be the global step, lr is the learning rate, and k the
# initial gradient squared accumulator value.
# w = \dfrac{sign(-g)*lr*|g - l1*T|_{+}}{l2*T*lr + \sqrt{k+gg})}
# For -0.1*3.0*(0.1 - 0)/(0 + sqrt(0.1 + 0.1*0.1)) = -0.904534
# similarly for others.
self.assertAllCloseAccordingToType(
np.array([-0.904534, -1.603567]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([-0.094821, -0.189358]), self.evaluate(var1))
def testAdagradDAwithoutRegularizationBasic2(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
global_step = resource_variable_ops.ResourceVariable(
0, dtype=dtypes.int64)
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
# Run a step of AdagradDA
update.run()
self.assertAllCloseAccordingToType(
np.array([-0.904534, -1.603567]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([-0.094821, -0.189358]), self.evaluate(var1))
def testAdagradDAWithL1(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
global_step = resource_variable_ops.ResourceVariable(
0, dtype=dtypes.int64)
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
# Run a step of AdagradDA
update.run()
self.assertAllCloseAccordingToType(
np.array([-0.895489, -1.59555]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([-0.085339, -0.17989]), self.evaluate(var1))
def testAdagradDAWithL1_L2(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
global_step = resource_variable_ops.ResourceVariable(
0, dtype=dtypes.int64)
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
# Run a step of AdagradDA
update.run()
self.assertAllCloseAccordingToType(
np.array([-0.046907, -0.093659]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([-0.004275, -0.009023]), self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/adagrad_da_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for Tensorflow functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class FunctionTest(xla_test.XLATestCase):
def testFunction(self):
"""Executes a simple TensorFlow function."""
def APlus2B(a, b):
return a + b * 2
aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
bval = np.array([5, 6, 7, 8]).reshape([2, 2]).astype(np.float32)
expected = APlus2B(aval, bval)
with self.session():
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(a, b):
return APlus2B(a, b)
a = constant_op.constant(aval, name="a")
b = constant_op.constant(bval, name="b")
with self.test_scope():
call_f = Foo(a, b)
result = self.evaluate(call_f)
self.assertAllClose(result, expected, rtol=1e-3)
def testNestedFunctions(self):
"""Executes two nested TensorFlow functions."""
def TimesTwo(x):
return x * 2
def APlus2B(a, b):
return a + TimesTwo(b)
aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
bval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
expected = APlus2B(aval, bval)
with self.session():
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(a, b):
return APlus2B(a, b)
a = constant_op.constant(aval, name="a")
b = constant_op.constant(bval, name="b")
with self.test_scope():
call_g = Foo(a, b)
result = self.evaluate(call_g)
self.assertAllClose(result, expected, rtol=1e-3)
def testFunctionMultipleRetvals(self):
"""Executes a function with multiple return values."""
# This function will run on the XLA device
def Func(a, b):
return a + b, a - b
aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
bval = np.array([5, 6, 7, 8]).reshape([2, 2]).astype(np.float32)
expected = Func(aval, bval)
with self.session():
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(a, b):
return Func(a, b)
a = constant_op.constant(aval, name="a")
b = constant_op.constant(bval, name="b")
with self.test_scope():
call_f = Foo(a, b)
result = self.evaluate(call_f)
self.assertAllClose(result, expected, rtol=1e-3)
def testCompileTimeConstantsInDefun(self):
"""Tests that XLA handles compile-time constants in defuns."""
with self.session() as sess:
@function.Defun(dtypes.float32, dtypes.int32, dtypes.int32)
def Foo(a, c, d):
# c and d must be known at compile time
x = array_ops.slice(a, c, d)
return x
a = array_ops.placeholder(dtypes.float32)
c = array_ops.placeholder(dtypes.int32, shape=[4])
d = array_ops.placeholder(dtypes.int32, shape=[4])
with self.test_scope():
call_f = Foo(a, c, d)
result = sess.run(call_f, feed_dict={
a: np.ones([1, 4, 4, 1]),
c: [0, 0, 0, 0],
d: [1, 2, 2, 1]})
self.assertAllEqual(np.ones([1, 2, 2, 1]), result)
# TODO(b/36139787): Re-enable this test when noinline works again.
def DISABLED_testFunctionsNoInline(self):
@function.Defun(dtypes.float32, noinline=True)
def TimesTwo(x):
return x * 2
@function.Defun(dtypes.float32, dtypes.float32)
def APlus2B(a, b):
return a + TimesTwo(b)
aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
bval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
expected = aval + bval * 2
with self.session() as sess:
with self.test_scope():
a = array_ops.placeholder(dtypes.float32, name="a")
b = array_ops.placeholder(dtypes.float32, name="b")
call = APlus2B(a, b)
result = sess.run(call, {a: aval, b: bval})
self.assertAllClose(result, expected, rtol=1e-3)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/function_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Definition of XLA test case."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import random
import re
import numpy as np
from tensorflow.python.eager import context
from tensorflow.contrib.compiler import jit
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
FLAGS = flags.FLAGS
flags.DEFINE_string('test_device', None,
'Tensorflow device on which to place operators under test')
flags.DEFINE_string('types', None, 'Types to test. Comma-separated list.')
flags.DEFINE_string('disabled_manifest', None,
'Path to a file with a list of tests that should not run.')
flags.DEFINE_string('tf_xla_flags', None,
'Value to set the TF_XLA_FLAGS environment variable to')
def parse_disabled_manifest(manifest_content):
comments_re = re.compile('#.*$')
disabled_tests = []
disabled_method_types = []
for l in manifest_content.splitlines():
stripped = comments_re.sub('', l).strip()
if not stripped:
continue
entry = stripped.split(' ')
if len(entry) == 1:
disabled_tests.append(entry[0])
elif len(entry) == 2:
disabled_method_types.append((entry[0], entry[1].strip().split(',')))
else:
raise ValueError('Bad entry in manifest file.')
disabled_regex = '|'.join(disabled_tests)
method_types_filter = {}
for method, types in disabled_method_types:
method_types_filter[method] = set([
dtypes.as_dtype(types_pb2.DataType.Value(name)).as_numpy_dtype
for name in types
])
return disabled_regex, method_types_filter
class XLATestCase(test.TestCase):
"""XLA test cases are parameterized test cases."""
def __init__(self, method_name='runTest'):
super(XLATestCase, self).__init__(method_name)
self.device = FLAGS.test_device
self.has_custom_call = (self.device == 'XLA_CPU')
self._all_tf_types = set([
dtypes.as_dtype(types_pb2.DataType.Value(name))
for name in FLAGS.types.split(',')
])
self.int_tf_types = set([
dtype for dtype in self._all_tf_types if dtype.is_integer
])
self._float_tf_types = set([
dtype for dtype in self._all_tf_types if dtype.is_floating
])
self.complex_tf_types = set([
dtype for dtype in self._all_tf_types if dtype.is_complex
])
self._numeric_tf_types = set(
self.int_tf_types | self._float_tf_types | self.complex_tf_types)
self.quantized_tf_types = set(
dtype for dtype in self._all_tf_types if dtype.is_quantized)
# Quantized types don't have a numpy equivalent, include them in
# all_tf_types but not in all_types.
# TODO(b/115960798): Parametrize tests on TF types instead of numpy types
# and remove all_types.
self._all_types = set(dtype.as_numpy_dtype
for dtype in self._all_tf_types
if not dtype.is_quantized)
self._int_types = set([dtype.as_numpy_dtype for dtype in self.int_tf_types])
self.signed_int_types = set(dtype.as_numpy_dtype
for dtype in self.int_tf_types
if not dtype.is_unsigned)
self.unsigned_int_types = set(dtype.as_numpy_dtype
for dtype in self.int_tf_types
if dtype.is_unsigned)
self._float_types = set(
[dtype.as_numpy_dtype for dtype in self._float_tf_types])
self.complex_types = set([
dtype.as_numpy_dtype for dtype in self.complex_tf_types
])
self._numeric_types = set(self._int_types | self._float_types
| self.complex_types)
# Parse the manifest file, if any, into a regex identifying tests to
# disable
# TODO(xpan): Make it text proto if it doesn't scale.
# Each line of the manifest file specifies an entry. The entry can be
# 1) TestNameRegex // E.g. CumprodTest.* Or
# 2) TestName TypeName // E.g. AdamOptimizerTest.testSharing DT_BFLOAT16
# The 1) disables the entire test. While 2) only filter some numeric types
# so that they are not used in those tests.
self.disabled_regex = None
self._method_types_filter = {}
if FLAGS.disabled_manifest is not None:
with open(FLAGS.disabled_manifest, 'r') as manifest_file:
disabled_regex, self._method_types_filter = (
parse_disabled_manifest(manifest_file.read()))
if disabled_regex:
self.disabled_regex = re.compile(disabled_regex)
if FLAGS.tf_xla_flags is not None:
os.environ['TF_XLA_FLAGS'] = FLAGS.tf_xla_flags
@property
def all_tf_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
tf_types = set([dtypes.as_dtype(t)
for t in self._method_types_filter.get(name, set())])
return self._all_tf_types - tf_types
@property
def float_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
return self._float_types - self._method_types_filter.get(name, set())
@property
def float_tf_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
return self._float_tf_types - self._method_types_filter.get(name, set())
@property
def int_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
return self._int_types - self._method_types_filter.get(name, set())
@property
def numeric_tf_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
tf_types = set([dtypes.as_dtype(t)
for t in self._method_types_filter.get(name, set())])
return self._numeric_tf_types - tf_types
@property
def numeric_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
return self._numeric_types - self._method_types_filter.get(name, set())
@property
def all_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
return self._all_types - self._method_types_filter.get(name, set())
def setUp(self):
super(XLATestCase, self).setUp()
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
if self.disabled_regex is not None and self.disabled_regex.match(name):
logging.info('Disabled test case: %s', name)
self.skipTest('{} is disabled by manifest.'.format(name))
return
logging.info('Start test case: %s', name)
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
super(XLATestCase, self).tearDown()
logging.info('End test case: %s', self._testMethodName)
@contextlib.contextmanager
def session(self):
"""Custom implementation of session() for XLA tests.
We override the standard Tensorflow session() since it is too
specific to CPU and GPU tests. In particular, we want to disable soft
placement and explicitly assign ops to devices under test.
Yields:
A session to use when running a test case.
"""
graph = ops.Graph()
config = context.context().config
# Grappler can constant fold TensorListFromTensor ops into DT_VARIANT
# constants which XLA does not understand. So disable constant folding in
# these tests.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
with session.Session(
graph=graph, config=config) as sess, graph.as_default():
yield sess
def cached_session(self):
raise NotImplementedError(
'cached_session not supported on XLATestCase, please use session')
def test_session(self):
raise NotImplementedError(
'test_session not supported on XLATestCase, please use session')
@contextlib.contextmanager
def test_scope(self):
"""Test scope that runs tests on a Tensorflow/XLA device.
Uses a compilation_scope() to mark operators to compile.
Yields:
A scope to apply to the operators under test.
"""
with ops.device('device:{}:0'.format(self.device)):
yield
def Benchmark(tf_bench,
builder_fn,
use_xla_jit,
device,
separate_compiled_gradients=False):
"""Build a graph and run benchmarks against it, with or without XLA.
Args:
tf_bench: An instance of tf.test.Benchmark, used to run the benchmark.
builder_fn: A function that builds a graph when invoked, and returns
(name, fetches), where name is the name of the test, and fetches
is a list of tensors to fetch as output.
use_xla_jit: If true compile with the XLA JIT, otherwise use regular TF.
device: The tensorflow device to run on, e.g. "cpu", "gpu".
separate_compiled_gradients: If true put each gradient subgraph into a
separate compilation scope. This gives fine-grained control over which
portions of the graph will be compiled as a single unit. Compiling
gradients separately may yield better performance for some graphs.
The scope is named based on the scope of the forward computation as well
as the name of the gradients. As a result, the gradients will be compiled
in a scope that is separate from both the forward computation, and from
other gradients.
"""
with ops.Graph().as_default():
name = None
targets = []
with ops.device(device):
fetches = []
jit_scope = jit.experimental_jit_scope
with jit_scope(
compile_ops=use_xla_jit,
separate_compiled_gradients=separate_compiled_gradients):
name, fetches = builder_fn()
# We only want to benchmark the operations themselves, and not the data
# transfer of the result(s). Non-compiled identity ops ensure XLA
# doesn't know we're dropping the results, otherwise it might compile
# away the entire computation.
for fetch in fetches:
targets.append(array_ops.identity(fetch).op)
# TODO(b/132430685): Should we allow soft placement here?
config = config_pb2.ConfigProto(allow_soft_placement=True)
with session.Session(config=config) as sess:
sess.run(variables.global_variables_initializer())
xla = 'xla_' if use_xla_jit else ''
tf_bench.run_op_benchmark(
sess, targets, name='%s_%s%s' % (name, xla, device))
|
tensorflow-master
|
tensorflow/compiler/tests/xla_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the XLATestCase test fixture base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.platform import test
class XlaTestCaseTestCase(test.TestCase):
def testManifestEmptyLineDoesNotCatchAll(self):
manifest = """
testCaseOne
"""
disabled_regex, _ = xla_test.parse_disabled_manifest(manifest)
self.assertEqual(disabled_regex, "testCaseOne")
def testManifestWholeLineCommentDoesNotCatchAll(self):
manifest = """# I am a comment
testCaseOne
testCaseTwo
"""
disabled_regex, _ = xla_test.parse_disabled_manifest(manifest)
self.assertEqual(disabled_regex, "testCaseOne|testCaseTwo")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/xla_test_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA Reverse Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class ReverseOpsTest(xla_test.XLATestCase):
def testReverseOneDim(self):
shape = (7, 5, 9, 11)
for revdim in range(-len(shape), len(shape)):
self._AssertReverseEqual([revdim], shape)
def testReverseMoreThanOneDim(self):
shape = (7, 5, 9, 11)
# The offset is used to test various (but not all) combinations of negative
# and positive axis indices that are guaranteed to not collide at the same
# index.
for revdims in itertools.chain.from_iterable(
itertools.combinations(range(-offset,
len(shape) - offset), k)
for k in range(2,
len(shape) + 1)
for offset in range(0, len(shape))):
self._AssertReverseEqual(revdims, shape)
def _AssertReverseEqual(self, revdims, shape):
np.random.seed(120)
pval = np.random.randint(0, 100, size=shape).astype(float)
with self.session():
with self.test_scope():
p = array_ops.placeholder(dtypes.int32, shape=shape)
axis = constant_op.constant(
np.array(revdims, dtype=np.int32),
shape=(len(revdims),),
dtype=dtypes.int32)
rval = array_ops.reverse(p, axis).eval({p: pval})
slices = [
slice(-1, None, -1)
if d in revdims or d - len(shape) in revdims else slice(None)
for d in range(len(shape))
]
self.assertEqual(pval[slices].flatten().tolist(), rval.flatten().tolist())
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/reverse_ops_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AddN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class XlaAddNTest(xla_test.XLATestCase):
def testAddTensorLists(self):
with self.session(), self.test_scope():
l1 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=3)
l2 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=3)
l1 = list_ops.tensor_list_set_item(l1, 0, 5.)
l2 = list_ops.tensor_list_set_item(l2, 2, 10.)
l = math_ops.add_n([l1, l2])
self.assertAllEqual(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32),
[5.0, 0.0, 10.0])
def testAddTensorListsFailsIfLeadingDimsMismatch(self):
with self.session(), self.test_scope():
l1 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=2)
l2 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=3)
l = math_ops.add_n([l1, l2])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"TensorList arguments to AddN must all have the same shape"):
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32).eval()
def testAddTensorListsFailsIfElementShapesMismatch(self):
with self.session() as session, self.test_scope():
# Use placeholders instead of constant values for shapes to prevent TF's
# shape inference from catching this early.
l1_element_shape = array_ops.placeholder(dtype=dtypes.int32)
l2_element_shape = array_ops.placeholder(dtype=dtypes.int32)
l1 = list_ops.tensor_list_reserve(
element_shape=l1_element_shape,
element_dtype=dtypes.float32,
num_elements=3)
l2 = list_ops.tensor_list_reserve(
element_shape=l2_element_shape,
element_dtype=dtypes.float32,
num_elements=3)
l = math_ops.add_n([l1, l2])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"TensorList arguments to AddN must all have the same shape"):
session.run(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32), {
l1_element_shape: [],
l2_element_shape: [2]
})
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/add_n_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for PowerSign."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.contrib.opt.python.training import powersign
from tensorflow.contrib.opt.python.training import sign_decay
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def py_linear_decay_fn(decay_steps):
def linear_decay(step):
step = min(step, decay_steps)
return float(decay_steps - step) / decay_steps
return linear_decay
def powersign_update_numpy(params,
g_t,
m,
lr,
base=math.e,
beta=0.9,
py_sign_decay_fn=None,
t=None):
m_t = beta * m + (1 - beta) * g_t
if py_sign_decay_fn is None:
sign_decayed = 1.0
else:
sign_decayed = py_sign_decay_fn(t-1)
multiplier = base ** (sign_decayed * np.sign(g_t) * np.sign(m_t))
params_t = params - lr * multiplier * g_t
return params_t, m_t
class PowerSignTest(xla_test.XLATestCase):
def _testDense(self,
learning_rate=0.1,
sign_decay_fn=None,
py_sign_decay_fn=None,
base=math.e,
beta=0.9):
for dtype in self.float_types:
with self.session(), self.test_scope():
# Initialize variables for numpy implementation.
m0, m1 = 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
global_step = resource_variable_ops.ResourceVariable(0, trainable=False)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = powersign.PowerSignOptimizer(
learning_rate=learning_rate,
base=base,
beta=beta,
sign_decay_fn=sign_decay_fn,
)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 7 steps of powersign
# first 4 steps with positive gradient
# last 3 steps with negative gradient (sign(gm) should be -1)
for t in range(1, 8):
if t < 5:
update.run()
else:
neg_update.run()
var0_np, m0 = powersign_update_numpy(
var0_np,
grads0_np if t < 5 else -grads0_np,
m0,
learning_rate,
base=base,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
var1_np, m1 = powersign_update_numpy(
var1_np,
grads1_np if t < 5 else -grads1_np,
m1,
learning_rate,
base=base,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testDense(self):
decay_steps = 10
sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
py_sign_decay_fn = py_linear_decay_fn(decay_steps)
self._testDense()
self._testDense(learning_rate=0.1, base=10.0, beta=0.8)
self._testDense(
sign_decay_fn=sign_decay_fn, py_sign_decay_fn=py_sign_decay_fn)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/powersign_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
class FIFOQueueTest(xla_test.XLATestCase):
def testEnqueue(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testMultipleDequeues(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue([1]))
self.evaluate(q.enqueue([2]))
self.evaluate(q.enqueue([3]))
a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()])
self.assertAllEqual(set([1, 2, 3]), set([a, b, c]))
def testQueuesDontShare(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue(1))
q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q2.enqueue(2))
self.assertAllEqual(self.evaluate(q2.dequeue()), 2)
self.assertAllEqual(self.evaluate(q.dequeue()), 1)
def testEnqueueDictWithoutNames(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue({"a": 12.0})
def testParallelEnqueue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(3, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/fifo_queue_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for SpaceToBatch and BatchToSpace ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.platform import test
def space_to_batch_direct(input_array, block_shape, paddings):
"""Direct Python implementation of space-to-batch conversion.
This is used for tests only.
Args:
input_array: N-D array
block_shape: 1-D array of shape [num_block_dims].
paddings: 2-D array of shape [num_block_dims, 2].
Returns:
Converted tensor.
"""
input_array = np.array(input_array)
block_shape = np.array(block_shape)
num_block_dims = len(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
padded = np.pad(input_array,
pad_width=([[0, 0]] + list(paddings) + [[0, 0]] *
(input_array.ndim - 1 - num_block_dims)),
mode="constant")
reshaped_padded_shape = [input_array.shape[0]]
output_shape = [input_array.shape[0] * np.prod(block_shape)]
for block_dim, block_shape_value in enumerate(block_shape):
reduced_size = padded.shape[block_dim + 1] // block_shape_value
reshaped_padded_shape.append(reduced_size)
output_shape.append(reduced_size)
reshaped_padded_shape.append(block_shape_value)
reshaped_padded_shape.extend(input_array.shape[num_block_dims + 1:])
output_shape.extend(input_array.shape[num_block_dims + 1:])
reshaped_padded = padded.reshape(reshaped_padded_shape)
permuted_reshaped_padded = np.transpose(reshaped_padded, (
list(np.arange(num_block_dims) * 2 + 2) + [0] +
list(np.arange(num_block_dims) * 2 + 1) + list(
np.arange(input_array.ndim - num_block_dims - 1) + 1 + num_block_dims
* 2)))
return permuted_reshaped_padded.reshape(output_shape)
class SpaceToBatchTest(xla_test.XLATestCase):
"""Tests input-output pairs for the SpaceToBatch and BatchToSpace ops."""
def _testPad(self, inputs, paddings, block_size, outputs):
with self.session() as sess, self.test_scope():
for dtype in self.float_types:
# outputs = space_to_batch(inputs)
placeholder = array_ops.placeholder(dtype)
x_tf = gen_array_ops.space_to_batch(
placeholder, paddings, block_size=block_size)
self.assertAllEqual(sess.run(x_tf, {placeholder: inputs}), outputs)
# inputs = batch_to_space(outputs)
x_tf = gen_array_ops.batch_to_space(
placeholder, paddings, block_size=block_size)
self.assertAllEqual(sess.run(x_tf, {placeholder: outputs}), inputs)
def _testOne(self, inputs, block_size, outputs):
paddings = np.zeros((2, 2), dtype=np.int32)
self._testPad(inputs, paddings, block_size, outputs)
# [1, 2, 2, 1] <-> [4, 1, 1, 1]
def testSmallInput2x2(self):
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
self._testOne(x_np, block_size, x_out)
# [1, 2, 2, 1] <-> [1, 3, 3, 1] (padding) <-> [9, 1, 1, 1]
def testSmallInput2x2Pad1x0(self):
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.array([[1, 0], [1, 0]], dtype=np.int32)
block_size = 3
x_out = [[[[0]]], [[[0]]], [[[0]]], [[[0]]], [[[1]]], [[[2]]], [[[0]]],
[[[3]]], [[[4]]]]
self._testPad(x_np, paddings, block_size, x_out)
# Test with depth larger than 1.
# [1, 2, 2, 3] <-> [4, 1, 1, 3]
def testDepthInput2x2(self):
x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Test for larger input dimensions.
# [1, 4, 4, 1] <-> [4, 2, 2, 1]
def testLargerInput2x2(self):
x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]],
[[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
block_size = 2
x_out = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]]
self._testOne(x_np, block_size, x_out)
# Test with batch larger than 1.
# [2, 2, 4, 1] <-> [8, 1, 2, 1]
def testBatchInput2x2(self):
x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]],
[[[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
block_size = 2
x_out = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input spatial dimensions AND batch larger than 1, to ensure
# that elements are correctly laid out spatially and properly interleaved
# along the batch dimension.
# [2, 4, 4, 1] <-> [8, 2, 2, 1]
def testLargerInputBatch2x2(self):
x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]],
[[9], [10], [11], [12]], [[13], [14], [15], [16]]],
[[[17], [18], [19], [20]], [[21], [22], [23], [24]],
[[25], [26], [27], [28]], [[29], [30], [31], [32]]]]
x_out = [[[[1], [3]], [[9], [11]]], [[[17], [19]], [[25], [27]]],
[[[2], [4]], [[10], [12]]], [[[18], [20]], [[26], [28]]],
[[[5], [7]], [[13], [15]]], [[[21], [23]], [[29], [31]]],
[[[6], [8]], [[14], [16]]], [[[22], [24]], [[30], [32]]]]
block_size = 2
self._testOne(x_np, block_size, x_out)
class SpaceToBatchNDTest(xla_test.XLATestCase):
"""Tests input-output pairs for the SpaceToBatchND and BatchToSpaceND ops."""
def _testPad(self, inputs, block_shape, paddings, outputs):
block_shape = np.array(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
with self.session() as sess, self.test_scope():
for dtype in self.float_types:
# TODO(b/68813416): Skip bfloat16's as the input type for direct is
# float32 and results in a mismatch, while making testDirect provide the
# correctly typed input results in 'no fill-function for data-type'
# error.
if dtype == dtypes.bfloat16.as_numpy_dtype:
continue
if dtype == np.float16:
actual_inputs = np.array(inputs).astype(dtype)
actual_paddings = np.array(paddings).astype(dtype)
expected_outputs = np.array(outputs).astype(dtype)
else:
actual_inputs = inputs
actual_paddings = paddings
expected_outputs = outputs
placeholder = array_ops.placeholder(dtype)
# outputs = space_to_batch(inputs)
x_tf = array_ops.space_to_batch_nd(placeholder, block_shape,
actual_paddings)
self.assertAllEqual(
sess.run(x_tf, {placeholder: actual_inputs}), expected_outputs)
# inputs = batch_to_space(outputs)
placeholder = array_ops.placeholder(dtype)
x_tf = array_ops.batch_to_space_nd(placeholder, block_shape,
actual_paddings)
self.assertAllEqual(
sess.run(x_tf, {placeholder: expected_outputs}), actual_inputs)
def _testDirect(self, input_shape, block_shape, paddings):
inputs = np.arange(np.prod(input_shape), dtype=np.float32)
inputs = inputs.reshape(input_shape)
self._testPad(inputs, block_shape, paddings,
space_to_batch_direct(inputs, block_shape, paddings))
def testZeroBlockDimsZeroRemainingDims(self):
self._testPad(
inputs=[1, 2],
block_shape=[],
paddings=[],
outputs=[1, 2],)
def testZeroBlockDimsOneRemainingDim(self):
self._testPad(
inputs=[[1, 2], [3, 4]],
block_shape=[],
paddings=[],
outputs=[[1, 2], [3, 4]])
# Same thing, but with a no-op block dim.
self._testPad(
inputs=[[1, 2], [3, 4]],
block_shape=[1],
paddings=[[0, 0]],
outputs=[[1, 2], [3, 4]])
def testZeroBlockDimsTwoRemainingDims(self):
self._testPad(
inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
block_shape=[],
paddings=[],
outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# Same thing, but with a no-op block dim.
self._testPad(
inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
block_shape=[1],
paddings=[[0, 0]],
outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# Same thing, but with two no-op block dims.
self._testPad(
inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
block_shape=[1, 1],
paddings=[[0, 0], [0, 0]],
outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
def testOneBlockDimZeroRemainingDims(self):
self._testPad(
inputs=[[1, 2, 3], [4, 5, 6]],
block_shape=[2],
paddings=[1, 0],
outputs=[[0, 2], [0, 5], [1, 3], [4, 6]])
def testOneBlockDimOneRemainingDim(self):
self._testPad(
inputs=[[[1, 11], [2, 21], [3, 31]], [[4, 41], [5, 51], [6, 61]]],
block_shape=[2],
paddings=[1, 0],
outputs=[[[0, 0], [2, 21]], [[0, 0], [5, 51]], [[1, 11], [3, 31]],
[[4, 41], [6, 61]]])
def testDirect0(self):
# Test with zero-size remaining dimension.
self._testDirect(
input_shape=[3, 1, 2, 0], block_shape=[3], paddings=[[0, 2]])
def testDirect1(self):
# Test with zero-size blocked dimension.
self._testDirect(
input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[0, 0]])
def testDirect2(self):
# Test with padding up from zero size.
self._testDirect(
input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[1, 2]])
def testDirect3(self):
self._testDirect(
input_shape=[3, 3, 4, 5, 2],
block_shape=[3, 4, 2],
paddings=[[1, 2], [0, 0], [3, 0]])
def testDirect4(self):
self._testDirect(
input_shape=[3, 3, 4, 5, 2],
block_shape=[3, 4, 2, 2],
paddings=[[1, 2], [0, 0], [3, 0], [0, 0]])
def testDirect5(self):
self._testDirect(
input_shape=[3, 2, 2, 3, 4, 5, 2, 5],
block_shape=[1, 1, 3, 4, 2, 2],
paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0]])
def testDirect6(self):
self._testDirect(
input_shape=[3, 2, 2, 3, 4, 5, 2, 5],
block_shape=[1, 1, 3, 4, 2, 2, 1],
paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0], [0, 0]])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/spacetobatch_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateless random-number generation ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.kernel_tests.random import util as \
random_test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import stateless_random_ops as stateless
from tensorflow.python.platform import test
class StatelessRandomOpsTest(xla_test.XLATestCase):
"""Test cases for stateless random-number generator operators."""
def _random_types(self, include_int=False):
allowed_types = {dtypes.float64, dtypes.float32, dtypes.bfloat16}
if include_int:
allowed_types.update({dtypes.int32, dtypes.int64})
return self.all_tf_types & allowed_types
def testDeterminism(self):
# Stateless values should be equal iff the seeds are equal (roughly)
with self.session(), self.test_scope():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
seeds = [(x, y) for x in range(5) for y in range(5)] * 3 # pylint: disable=g-complex-comprehension
for stateless_op in [
stateless.stateless_random_uniform, stateless.stateless_random_normal
]:
for shape in (), (3,), (2, 5):
for dtype in self._random_types():
# Skip bfloat16. The result of bfloat16 is truncated from 32-bit
# result. With different seeds, the 32-bit results are different,
# but the truncated 16-bit results might be the same.
if dtype == dtypes.bfloat16:
continue
pure = stateless_op(shape, seed=seed_t, dtype=dtype)
values = [(seed, pure.eval(feed_dict={
seed_t: seed
})) for seed in seeds]
for s0, v0 in values:
for s1, v1 in values:
self.assertEqual(s0 == s1, np.all(v0 == v1))
def testRandomUniformIsInRange(self):
with self.session() as sess, self.test_scope():
for dtype in self._random_types(include_int=True):
maxval = 1
if dtype.is_integer:
maxval = 100
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
x = stateless.stateless_random_uniform(
shape=[1000], seed=seed_t, maxval=maxval, dtype=dtype)
y = sess.run(x, {seed_t: [0x12345678, 0xabcdef12]})
self.assertTrue(np.all(y >= 0))
self.assertTrue(np.all(y < maxval))
def testDistributionOfStatelessRandomUniform(self):
"""Use Pearson's Chi-squared test to test for uniformity."""
with self.session() as sess, self.test_scope():
for dtype in self._random_types(include_int=True):
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
n = 1000
maxval = 1
if dtype.is_integer:
maxval = 100
x = stateless.stateless_random_uniform(
shape=[n], seed=seed_t, maxval=maxval, dtype=dtype)
y = sess.run(x, {seed_t: [565656, 121212]})
if maxval > 1:
# Normalize y to range [0, 1).
y = y.astype(float) / maxval
# Tests that the values are distributed amongst 10 bins with equal
# probability. 16.92 is the Chi^2 value for 9 degrees of freedom with
# p=0.05. This test is probabilistic and would be flaky if the random
# seed were not fixed.
self.assertLess(random_test_util.chi_squared(y, 10), 16.92)
def testRandomNormalIsFinite(self):
with self.session() as sess, self.test_scope():
for dtype in self._random_types():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
x = stateless.stateless_random_normal(
shape=[10000], seed=seed_t, dtype=dtype)
y = sess.run(x, {seed_t: [0x12345678, 0xabcdef12]})
self.assertTrue(np.all(np.isfinite(y)))
def testDistributionOfStatelessRandomNormal(self):
"""Use Anderson-Darling test to test distribution appears normal."""
with self.session() as sess, self.test_scope():
for dtype in self._random_types():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
n = 1000
x = stateless.stateless_random_normal(
shape=[n], seed=seed_t, dtype=dtype)
y = sess.run(x, {seed_t: [25252, 314159]})
# The constant 2.492 is the 5% critical value for the Anderson-Darling
# test where the mean and variance are known. This test is probabilistic
# so to avoid flakiness the seed is fixed.
self.assertLess(
random_test_util.anderson_darling(y.astype(float)), 2.492)
def testTruncatedNormal(self):
for dtype in self._random_types():
with self.session() as sess, self.test_scope():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
n = 10000000
x = stateless.stateless_truncated_normal(
shape=[n], seed=seed_t, dtype=dtype)
y = sess.run(x, {seed_t: [0x12345678, 0xabcdef12]})
random_test_util.test_truncated_normal(
self.assertEqual, self.assertAllClose, dtype, n, y)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/stateless_random_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class MatrixBandPartTest(xla_test.XLATestCase, parameterized.TestCase):
@parameterized.parameters(
{
'batch_shape': [],
'rows': 1,
'cols': 1
},
{
'batch_shape': [],
'rows': 1,
'cols': 2
},
{
'batch_shape': [],
'rows': 1,
'cols': 7
},
{
'batch_shape': [],
'rows': 2,
'cols': 1
},
{
'batch_shape': [],
'rows': 2,
'cols': 2
},
{
'batch_shape': [],
'rows': 2,
'cols': 7
},
{
'batch_shape': [],
'rows': 7,
'cols': 1
},
{
'batch_shape': [],
'rows': 7,
'cols': 2
},
{
'batch_shape': [],
'rows': 7,
'cols': 7
},
{
'batch_shape': [2,],
'rows': 1,
'cols': 1
},
{
'batch_shape': [2,],
'rows': 1,
'cols': 2
},
{
'batch_shape': [2,],
'rows': 1,
'cols': 7
},
{
'batch_shape': [2,],
'rows': 2,
'cols': 1
},
{
'batch_shape': [2,],
'rows': 2,
'cols': 2
},
{
'batch_shape': [2,],
'rows': 2,
'cols': 7
},
{
'batch_shape': [2,],
'rows': 7,
'cols': 1
},
{
'batch_shape': [2,],
'rows': 7,
'cols': 2
},
{
'batch_shape': [2,],
'rows': 7,
'cols': 7
},
{
'batch_shape': [1, 3, 2],
'rows': 1,
'cols': 1
},
{
'batch_shape': [1, 3, 2],
'rows': 1,
'cols': 2
},
{
'batch_shape': [1, 3, 2],
'rows': 1,
'cols': 7
},
{
'batch_shape': [1, 3, 2],
'rows': 2,
'cols': 1
},
{
'batch_shape': [1, 3, 2],
'rows': 2,
'cols': 2
},
{
'batch_shape': [1, 3, 2],
'rows': 2,
'cols': 7
},
{
'batch_shape': [1, 3, 2],
'rows': 7,
'cols': 1
},
{
'batch_shape': [1, 3, 2],
'rows': 7,
'cols': 2
},
{
'batch_shape': [1, 3, 2],
'rows': 7,
'cols': 7
},
)
def testMatrixBandPart(self, batch_shape, rows, cols):
# TODO(b/125505881): Disabled due to LLVM backend crash.
if self.device == 'XLA_CPU' and cols == 7 and rows == 1 and batch_shape == [
1, 3, 2
]:
pass
for dtype in self.float_types:
with self.session():
mat = np.ones(batch_shape + [rows, cols]).astype(dtype)
batch_mat = np.tile(mat, batch_shape + [1, 1])
for lower in -1, 0, 1, rows - 1:
for upper in -1, 0, 1, cols - 1:
band_np = mat
if lower >= 0:
band_np = np.triu(band_np, -lower)
if upper >= 0:
band_np = np.tril(band_np, upper)
if batch_shape:
band_np = np.tile(band_np, batch_shape + [1, 1])
placeholder = array_ops.placeholder(dtype)
with self.test_scope():
band = array_ops.matrix_band_part(
placeholder, constant_op.constant(lower, dtype=dtypes.int32),
constant_op.constant(upper, dtype=dtypes.int32))
feed_dict = {placeholder: batch_mat}
self.assertAllEqual(band_np, band.eval(feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/matrix_band_part_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.stack_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.compiler.xla import xla
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.platform import test
class StackOpTest(xla_test.XLATestCase):
def testStackPushPop(self):
with self.session(), self.test_scope():
v = array_ops.placeholder(dtypes.float32)
def fn():
h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, v)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
return c1
self.assertAllClose([[4.0, 5.0]],
xla.compile(fn)[0].eval({v: [[4.0, 5.0]]}))
def testStackPushPopSwap(self):
with self.session(), self.test_scope():
a = np.arange(2000)
x = array_ops.placeholder(dtypes.float32)
def fn():
h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, x, swap_memory=True)
with ops.control_dependencies([c]):
return gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose(a, xla.compile(fn)[0].eval({x: a}))
def testMultiStack(self):
with self.session(), self.test_scope():
v = array_ops.placeholder(dtypes.float32)
def fn():
h1 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, v)
with ops.control_dependencies([c1]):
c1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
h2 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="bar")
c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
with ops.control_dependencies([c2]):
c2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
return c1 + c2
self.assertAllClose(9.0, xla.compile(fn)[0].eval({v: 4.0}))
def testSameNameStacks(self):
"""Different stacks with the same name do not interfere."""
with self.session() as sess, self.test_scope():
v1 = array_ops.placeholder(dtypes.float32)
v2 = array_ops.placeholder(dtypes.float32)
def fn():
h1 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
h2 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, v1)
with ops.control_dependencies([c1]):
c2 = gen_data_flow_ops.stack_push_v2(h2, v2)
with ops.control_dependencies([c2]):
pop1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
pop2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
return [pop1, pop2]
[pop1_compiled, pop2_compiled] = xla.compile(fn)
out1, out2 = sess.run([pop1_compiled, pop2_compiled], {v1: 4.0, v2: 5.0})
self.assertAllClose(out1, 4.0)
self.assertAllClose(out2, 5.0)
def testCloseStack(self):
with self.session() as sess, self.test_scope():
def fn():
h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
gen_data_flow_ops.stack_close_v2(h)
sess.run(xla.compile(fn))
def testPushCloseStack(self):
with self.session() as sess, self.test_scope():
v = array_ops.placeholder(dtypes.float32)
def fn():
h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, v)
with ops.control_dependencies([c]):
gen_data_flow_ops.stack_close_v2(h)
sess.run(xla.compile(fn), {v: [[4.0, 5.0]]})
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/stack_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adagrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
class AdagradOptimizerTest(xla_test.XLATestCase):
def testBasic(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0),
float_rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1),
float_rtol=1e-5)
def testTensorLearningRate(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0),
float_rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1),
float_rtol=1e-5)
def testSharing(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
variables.global_variables_initializer().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0),
float_rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1),
float_rtol=1e-5)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/adagrad_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the DataFormatVecPermute operator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class XlaPermuteOpTest(xla_test.XLATestCase):
def _runPermuteAndCompare(self, x, src_format, dst_format, expected):
with self.session() as session:
with self.test_scope():
placeholder = array_ops.placeholder(dtypes.as_dtype(x.dtype), x.shape)
param = {placeholder: x}
output = nn_ops.data_format_vec_permute(
placeholder, src_format=src_format, dst_format=dst_format)
result = session.run(output, param)
self.assertAllEqual(result, expected)
def testNHWCToNCHW(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "NCHW", [7, 3, 4, 9])
def testNCHWToNHWC(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "NCHW", "NHWC", [7, 9, 3, 4])
def testNHWCToHWNC(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "HWNC", [4, 9, 7, 3])
def testHWNCToNHWC(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "HWNC", "NHWC", [9, 7, 4, 3])
def testNHWCToNCHW2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "NCHW",
[[7, 4], [5, 1], [9, 3], [4, 5]])
def testNHWCToHWNC2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "HWNC",
[[9, 3], [4, 5], [7, 4], [5, 1]])
def testHWNCToNHWC2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "HWNC", "NHWC",
[[4, 5], [7, 4], [9, 3], [5, 1]])
def testNCHWToNHWC2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "NCHW", "NHWC",
[[7, 4], [4, 5], [5, 1], [9, 3]])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/permute_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA dynamic slicing ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class DynamicUpdateSliceOpsTest(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self, op, args, expected):
with self.session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(*placeholders)
result = session.run(output, feeds)
self.assertAllClose(result, expected, rtol=1e-3)
def testUpdateSlice(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
xla.dynamic_update_slice, [
np.array([], dtype=dtype),
np.array([], dtype=dtype),
np.array([0], dtype=np.int32)
],
expected=np.array([], dtype=dtype))
self._assertOpOutputMatchesExpected(
xla.dynamic_update_slice, [
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([-1, -2, -3], dtype=dtype),
np.array([6], dtype=np.int32)
],
expected=np.array([1, 2, 3, 4, 5, 6, -1, -2, -3, 10], dtype=dtype))
self._assertOpOutputMatchesExpected(
xla.dynamic_update_slice, [
np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=dtype),
np.array([[42, 43], [44, 45]], dtype=dtype),
np.array([1, 2], dtype=np.int32)
],
expected=np.array(
[[1, 2, 3, 4], [5, 6, 42, 43], [9, 10, 44, 45]], dtype=dtype))
self._assertOpOutputMatchesExpected(
xla.dynamic_update_slice, [
np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=dtype),
np.array([[], []], dtype=dtype),
np.array([1, 2], dtype=np.int32)
],
expected=np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=dtype))
self._assertOpOutputMatchesExpected(
xla.dynamic_update_slice, [
np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=dtype),
np.ones([3, 4], dtype=dtype),
np.array([0, 0], dtype=np.int32)
],
expected=np.ones([3, 4], dtype=dtype))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/dynamic_slice_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AdaMax optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.contrib.opt.python.training import adamax
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adamax_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
m_t = beta1 * m + (1 - beta1) * g_t
v_t = np.maximum(beta2 * v, np.abs(g_t))
param_t = param - (alpha / (1 - beta1**t)) * (m_t / (v_t + epsilon))
return param_t, m_t, v_t
class AdaMaxOptimizerTest(xla_test.XLATestCase):
def testBasic(self):
for i, dtype in enumerate(self.float_types):
with self.session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.AdaMaxOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
opt_variables = opt.variables()
beta1_power = opt._get_beta_accumulators()
self.assertTrue(beta1_power is not None)
self.assertIn(beta1_power, opt_variables)
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power = opt._get_beta_accumulators()
# Run 3 steps of AdaMax
for t in range(1, 4):
update.run()
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta1_power))
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), rtol=1e-2)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1), rtol=1e-2)
self.assertEqual("var0_%d/AdaMax:0" % (i,),
opt.get_slot(var=var0, name="m").name)
def testTensorLearningRate(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.AdaMaxOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power = opt._get_beta_accumulators()
# Run 3 steps of AdaMax
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
update.run()
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/adamax_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for depthwise convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
# Reference implementation of depthwise_conv2d
def ReferenceDepthwiseConv2D(input_tensor, filter_tensor, strides, padding,
data_format=None):
# Reference implementation of depthwise convolution that uses regular
# convolution.
convs = []
in_channels = filter_tensor.shape[2]
# Use a custom implementation of depthwise conv2d using slicing.
for channel in xrange(in_channels):
# Slice the input along channel
if data_format == "NCHW":
input_slice = input_tensor[:, channel:channel+1, :, :]
else:
input_slice = input_tensor[:, :, :, channel:channel+1]
# Slice the filters. Filters are H, W, InC, DepthMultiplier
filter_slice = filter_tensor[:, :, channel:channel+1, :]
# Do conv
convs.append(nn_ops.conv2d(input_slice, filter_slice,
strides, padding,
data_format=data_format,
name="depthwise_slice_%d" % channel))
# Concat along dimension.
if data_format == "NCHW":
return array_ops.concat(convs, 1)
else:
return array_ops.concat(convs, 3)
def ConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 147, 147, 2],
[3, 299, 299, 3], [5, 183, 183, 1]]
filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [3, 3, 8, 1],
[3, 3, 7, 1], [5, 5, 2, 1], [3, 3, 2, 8], [2, 2, 3,
8], [5, 5, 1, 2]]
out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 49, 49, 16],
[3, 150, 150, 24], [5, 92, 92, 2]]
strides = [1, 1, 1, 1, 1, 1, 3, 2, 2]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, SAME]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def CheckGradConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
compute_gradient_error() is very expensive. So the configs should be
relatively small.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[2, 5, 8, 1], [4, 5, 5, 1], [2, 4, 4, 2], [1, 15, 15, 2],
[2, 15, 16, 1]]
filter_sizes = [[4, 4, 1, 2], [2, 2, 1, 2], [3, 1, 2, 2], [1, 3, 2, 1],
[3, 3, 1, 2]]
out_sizes = [[2, 5, 8, 2], [4, 2, 2, 2], [2, 4, 4, 4], [1, 15, 15, 2],
[2, 5, 5, 2]]
strides = [1, 2, 1, 1, 3]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, VALID, SAME, SAME, VALID]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
class DepthwiseConv2DTest(xla_test.XLATestCase):
# This is testing that depthwise_conv2d and depthwise_conv2d_native
# produce the same results. It also tests that NCHW and NWHC
# formats agree, by comparing the depthwise_conv2d_native with
# 'NCHW' format (with transposition) matches the 'NHWC' format using
# the higher level interface.
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_type,
data_format="NHWC"):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
data_type: The data type to use.
data_format: The data_format of the input. "NHWC" or "NCHW".
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
dtype=data_type).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
dtype=data_type).reshape(filter_in_sizes)
with self.session() as sess:
if data_type == np.float32:
tolerance = 1e-4
else:
self.assertEqual(data_type, np.float64)
tolerance = 1e-8
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=data_type)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=data_type)
native_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
conv_native = nn_ops.depthwise_conv2d_native(
native_t1,
t2,
strides=strides,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
with ops.device("CPU"):
conv_interface = ReferenceDepthwiseConv2D(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
native_result = sess.run(conv_native, {t1: x1, t2: x2})
interface_result = sess.run(conv_interface, {t1: x1, t2: x2})
print("data_type:", data_type, "max diff = ",
np.amax(np.absolute(native_result - interface_result)))
self.assertAllClose(
np.ravel(native_result), np.ravel(interface_result), rtol=tolerance)
def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2D,", index, "th config:", input_size, "*",
filter_size, "stride:", stride, "padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValues(
input_size, filter_size, stride, padding, data_type)
def testDepthwiseConv2DFormat(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFormat,", index, "th config:", input_size,
"*", filter_size, "stride:", stride, "padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
data_format="NCHW")
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
dtype=np.float32).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
dtype=np.float32).reshape(filter_in_sizes)
with self.session() as sess:
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=np.float32)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=np.float32)
with self.test_scope():
conv = nn_ops.depthwise_conv2d_native(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv, {t1: x1, t2: x2})
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-4)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.session():
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = array_ops.placeholder(np.float32, shape=filter_sizes)
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
if use_xla:
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
else:
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval({t1: x1, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-3, atol=1e-3)
def testDepthwiseConv2DInputGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DInputGradCompare,", index, "th config:",
input_size, "*", filter_size, "stride:", stride, "padding:",
padding)
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
padding)
def _CompareBackpropFilter(self,
input_sizes,
filter_sizes,
output_sizes,
stride,
padding,
data_format="NHWC"):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.session():
t0 = array_ops.placeholder(np.float32, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
native_t0 = t0
native_t2 = t2
strides = [1, stride, stride, 1]
if use_xla:
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t0 = array_ops.transpose(t0, [0, 3, 1, 2])
native_t2 = array_ops.transpose(t2, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
native_t0,
t1,
native_t2,
strides=strides,
padding=padding,
data_format=data_format)
else:
# For CPU, the format NCHW is not supported. Therefore we always use
# NHWC here.
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
native_t0, t1, native_t2, strides=strides, padding=padding)
ret = backprop.eval({t0: x0, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DFilterGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFilterGradCompare,", index, "th config:",
input_size, "*", filter_size, "producing output", output_size,
"stride:", stride, "padding:", padding)
self._CompareBackpropFilter(input_size, filter_size, output_size,
stride, padding)
def testDepthwiseConv2DFilterGradFormatNCHWCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFilterGradFormatNCHWCompare,", index,
"th config:", input_size, "*", filter_size, "producing output",
output_size, "stride:", stride, "padding:", padding)
self._CompareBackpropFilter(
input_size,
filter_size,
output_size,
stride,
padding,
data_format="NCHW")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/depthwise_conv_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(xla_test.XLATestCase):
def testBasic(self):
for dtype in self.float_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
continue
with self.session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTensorLearningRate(self):
for dtype in self.float_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
continue
with self.session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSharing(self):
for dtype in self.float_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
continue
with self.session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
if t % 2 == 0:
update1.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
else:
update2.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/adam_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for scan ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def numpy_reverse(x, axis):
length = len(x.shape)
if axis < 0:
axis = length + axis
ix = [
slice(None, None, -1) if i == axis else slice(None) for i in range(length)
]
return x[ix]
def handle_options(func, x, axis, exclusive, reverse):
"""Adds tf options to numpy scan ops."""
length = len(x.shape)
if axis < 0:
axis = length + axis
if reverse:
x = numpy_reverse(x, axis)
if exclusive:
ix_head = [slice(0, 1) if i == axis else slice(None) for i in range(length)]
ix_init = [
slice(0, -1) if i == axis else slice(None) for i in range(length)
]
if func == np.cumsum:
init = np.zeros_like(x[ix_head])
elif func == np.cumprod:
init = np.ones_like(x[ix_head])
else:
raise ValueError("Unknown scan function.")
x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)
else:
x = func(x, axis=axis)
if reverse:
x = numpy_reverse(x, axis)
return x
class CumsumTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
tf_out = math_ops.cumsum(p, axis, exclusive, reverse).eval(
feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumsum(p, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumsum(input_tensor, [0]).eval()
class CumprodTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
prod = math_ops.cumprod(p, axis, exclusive, reverse)
tf_out = prod.eval(feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumprod(x, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumprod(input_tensor, [0]).eval()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/scan_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for fused batch norm operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
DATA_FORMATS = (
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
)
class FusedBatchNormTest(xla_test.XLATestCase, parameterized.TestCase):
def _reference_training(self, x, scale, offset, epsilon, data_format):
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
x_square = x * x
x_square_sum = np.sum(x_square, (0, 1, 2))
x_sum = np.sum(x, axis=(0, 1, 2))
element_count = np.size(x) / int(np.shape(x)[-1])
mean = x_sum / element_count
var = x_square_sum / element_count - mean * mean
factor = element_count / max(element_count - 1, 1)
corrected_var = var * factor
normalized = (x - mean) / np.sqrt(var + epsilon)
return (normalized * scale + offset), mean, var, corrected_var
def _reference_grad(self, x, grad_y, scale, mean, var, epsilon, data_format):
# Use the following formulas to calculate gradients:
# grad_scale =
# sum(grad_y * (x - mean)) * rsqrt(var + epsilon)
#
# grad_offset = sum(output_y)
#
# grad_x =
# 1/N * scale * rsqrt(var + epsilon) * (N * grad_y - sum(grad_y) -
# (x - mean) * sum(grad_y * (x - mean)) / (var + epsilon))
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
grad_x = scale * (grad_y - np.mean(grad_y, axis=(0, 1, 2)) -
(x - mean) * np.mean(grad_y *
(x - mean), axis=(0, 1, 2)) /
(var + epsilon)) / np.sqrt(var + epsilon)
grad_scale = np.sum(
grad_y * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2))
grad_offset = np.sum(grad_y, axis=(0, 1, 2))
return grad_x, grad_scale, grad_offset
@parameterized.named_parameters(*DATA_FORMATS)
def testInference(self, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
y_ref, mean_ref, var_ref, _ = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format_src)
with self.session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, variance = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=mean_ref,
variance=var_ref,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val, _, _ = sess.run([y, mean, variance], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
def _testLearning(self, use_gradient_checker, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
# When in training mode, fused_batchnorm applies an implicit Bessel's
# correction. So we have to use the corrected variance here, as well.
y_ref, mean_ref, _, var_ref_corr = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format_src)
with self.session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, var = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=None,
variance=None,
epsilon=epsilon,
data_format=data_format,
is_training=True)
# Check gradient.
if use_gradient_checker:
err = gradient_checker.compute_gradient_error(
t_val,
x_val_converted.shape,
y,
x_val_converted.shape,
extra_feed_dict={
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertLess(err, 1e-3)
y_val, mean_val, var_val = sess.run([y, mean, var], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(mean_val, mean_ref, atol=1e-3)
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
self.assertAllClose(var_val, var_ref_corr, atol=1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testLearning(self, data_format):
self._testLearning(False, data_format)
@parameterized.named_parameters(*DATA_FORMATS)
def testLearningWithGradientChecker(self, data_format):
self._testLearning(True, data_format)
@parameterized.named_parameters(*DATA_FORMATS)
def testGradientTraining(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
# The TensorFlow FusedBatchNormGrad training operation takes two inputs with
# implementation defined values. In theory the only correct value these
# inputs are the corresponding reserve_space_{1|2} outputs from the
# FusedBatchNorm training operation. However, in practice, we rely on the
# first one being mean on {C|G}PU, and the second one being variance on CPU
# and inverse(sqrt(variance + epsilon)) on GPU (we test this assumption
# separately).
reserve_space_1_val = mean_val
if self.device == "XLA_GPU":
reserve_space_2_val = np.reciprocal(np.sqrt(var_val + epsilon))
else:
reserve_space_2_val = var_val
data_format_src = "NHWC"
grad_x_ref, grad_scale_ref, grad_offset_ref = self._reference_grad(
x_val, grad_val, scale_val, mean_val, var_val, epsilon, data_format_src)
with self.session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad_x_ref_converted = test_utils.ConvertBetweenDataFormats(
grad_x_ref, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
reserve_space_1 = array_ops.placeholder(
np.float32, shape=scale_shape, name="reserve_space_1")
reserve_space_2 = array_ops.placeholder(
np.float32, shape=scale_shape, name="reserve_space_2")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
grad_x, grad_scale, grad_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad,
x,
scale,
reserve_space_1,
reserve_space_2,
data_format=data_format,
is_training=True)
grad_x_val, grad_scale_val, grad_offset_val = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
reserve_space_1: reserve_space_1_val,
reserve_space_2: reserve_space_2_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref_converted, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testGradientInference(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
data_format_src = "NHWC"
with self.session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
mean = array_ops.placeholder(np.float32, shape=scale_shape, name="mean")
var = array_ops.placeholder(np.float32, shape=scale_shape, name="var")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
with self.test_scope():
out = gen_nn_ops.fused_batch_norm_grad(
grad,
x,
scale,
mean,
var,
data_format=data_format,
is_training=False)
grad_x, grad_scale, grad_offset, _, _ = out
ref_x, ref_scale, ref_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad, x, scale, mean, var, data_format=data_format, is_training=False)
grad_x_val, grad_scale_val, grad_offset_val, = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
grad_x_ref, grad_scale_ref, grad_offset_ref, = sess.run(
[ref_x, ref_scale, ref_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/fused_batchnorm_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for quantized operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class QuantizedOpsTest(xla_test.XLATestCase):
# Verify that quantized types can be clustered by XLA.
def testQuantizedTypeRoundtrip(self):
with self.session() as session:
for dtype in self.quantized_tf_types:
in_values = np.array([1, 2, 3, 4, 5, 6])
expected = [[1, 2], [3, 4], [5, 6]]
with self.test_scope():
p = array_ops.placeholder(dtype=dtypes.int32)
x = math_ops.cast(p, dtype)
x = array_ops.reshape(x, [3, 2])
value = session.run(x, {p: in_values})
self.assertAllEqual(value, expected)
class DeuantizedOpsTest(xla_test.XLATestCase):
def pack_uint8_r2_to_uint32(self, test_input):
num_rows, num_columns = test_input.get_shape().as_list()
num_output_columns = int(math.ceil(num_columns / 4.0))
padding_input = array_ops.pad(
math_ops.cast(test_input, dtype=dtypes.uint8),
constant_op.constant([[
0,
0,
], [0, num_output_columns * 4 - num_columns]]))
output = array_ops.zeros([num_rows, num_output_columns],
dtype=dtypes.uint32)
num_elements_per_pack = 4
shift_bits = 8
iota_r1 = math_ops.range(num_output_columns * num_elements_per_pack)
for p in range(num_elements_per_pack):
selected_index = math_ops.equal(
math_ops.mod(iota_r1, num_elements_per_pack), p)
gather_index = array_ops.boolean_mask(iota_r1, selected_index)
gathered_input = array_ops.gather(padding_input, gather_index, axis=1)
total_shift_bits = shift_bits * (num_elements_per_pack - p - 1)
left_shift_input = bitwise_ops.left_shift(
math_ops.cast(gathered_input, dtype=dtypes.uint32), total_shift_bits)
output = bitwise_ops.bitwise_or(output, left_shift_input)
return output
def testDequantizeQuint8(self):
num_rows = 100
num_columns = 3547
random_input = np.random.normal(128.0, 10.0, [num_rows, num_columns])
with self.session() as session:
with ops.device("CPU"):
test_input = ops.convert_to_tensor(random_input, dtype=dtypes.float32)
transposed_input = array_ops.transpose(test_input, [1, 0])
quantized_input = array_ops.quantize(transposed_input, 0.0, 255.0,
dtypes.quint8)
packed_input = self.pack_uint8_r2_to_uint32(quantized_input.output)
with self.test_scope():
transposed_quantized_output = xla.dequantize(packed_input, 0.0, 255.0,
"MIN_COMBINED", True)
quantized_output = array_ops.slice(transposed_quantized_output, [0, 0],
[num_rows, num_columns])
value = session.run(quantized_output)
self.assertAllClose(value, random_input, 1.0)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/quantized_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for xla.reduce_window."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class ReduceWindowTest(xla_test.XLATestCase):
"""Test cases for xla.reduce_window."""
def _reduce_window(self, operand, init, reducer, **kwargs):
with self.session():
placeholder = array_ops.placeholder(operand.dtype)
with self.test_scope():
output = xla.reduce_window(placeholder, init, reducer, **kwargs)
return output.eval(feed_dict={placeholder: operand})
def testReduceWindow(self):
# TODO(b/77644762): float16 and float64 ReduceWindow are unimplemented.
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def sum_reducer(x, y):
return x + y
@function.Defun(dtype, dtype)
def mul_reducer(x, y):
return x * y
self.assertAllClose(
np.array([3, 5, 7, 9, 11, 13], dtype=dtype),
self._reduce_window(
np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[2]))
self.assertAllClose(
np.array([3, 7, 11], dtype=dtype),
self._reduce_window(
np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[2],
window_strides=[2]))
self.assertAllClose(
np.array([1, 4, 7], dtype=dtype),
self._reduce_window(
np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[1],
window_strides=[3]))
self.assertAllClose(
np.array([[24, 36, 24], [96, 0, 0]], dtype=dtype),
self._reduce_window(
np.array([[1, 2, 3, 4], [4, 3, 2, 1], [2, 4, 0, 1]], dtype=dtype),
1.0,
mul_reducer,
window_dimensions=[2, 2],
window_strides=[1, 1]))
self.assertAllClose(
np.array([[0, 0, 0], [5, 10, 5], [2, 4, 1], [0, 0, 0]], dtype=dtype),
self._reduce_window(
np.array([[1, 2, 3, 4], [4, 3, 2, 1], [2, 4, 0, 1]], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[2, 2],
window_strides=[2, 2],
padding=[[2, 3], [1, 2]]))
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/reduce_window_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA listdiff operator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ListDiffTest(xla_test.XLATestCase):
def _testListDiff(self, x, y, out, idx):
for dtype in [dtypes.int32, dtypes.int64]:
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.session():
x_tensor = ops.convert_to_tensor(x, dtype=dtype)
y_tensor = ops.convert_to_tensor(y, dtype=dtype)
with self.test_scope():
out_tensor, idx_tensor = array_ops.listdiff(
x_tensor, y_tensor, out_idx=index_dtype)
tf_out, tf_idx = self.evaluate([out_tensor, idx_tensor])
self.assertAllEqual(out, tf_out)
self.assertAllEqual(idx, tf_idx)
self.assertEqual(1, out_tensor.get_shape().ndims)
self.assertEqual(1, idx_tensor.get_shape().ndims)
def testBasic1(self):
self._testListDiff(x=[1, 2, 3, 4], y=[1, 2], out=[3, 4], idx=[2, 3])
def testBasic2(self):
self._testListDiff(x=[1, 2, 3, 4], y=[2], out=[1, 3, 4], idx=[0, 2, 3])
def testBasic3(self):
self._testListDiff(x=[1, 4, 3, 2], y=[4, 2], out=[1, 3], idx=[0, 2])
def testDuplicates(self):
self._testListDiff(x=[1, 2, 4, 3, 2, 3, 3, 1],
y=[4, 2],
out=[1, 3, 3, 3, 1],
idx=[0, 3, 5, 6, 7])
def testRandom(self):
num_random_tests = 10
int_low = -7
int_high = 8
max_size = 50
for _ in xrange(num_random_tests):
x_size = np.random.randint(max_size + 1)
x = np.random.randint(int_low, int_high, size=x_size)
y_size = np.random.randint(max_size + 1)
y = np.random.randint(int_low, int_high, size=y_size)
out_idx = [(entry, pos) for pos, entry in enumerate(x) if entry not in y]
if out_idx:
out, idx = map(list, zip(*out_idx))
else:
out = []
idx = []
self._testListDiff(list(x), list(y), out, idx)
def testFullyOverlapping(self):
self._testListDiff(x=[1, 2, 3, 4], y=[1, 2, 3, 4], out=[], idx=[])
def testNonOverlapping(self):
self._testListDiff(x=[1, 2, 3, 4],
y=[5, 6],
out=[1, 2, 3, 4],
idx=[0, 1, 2, 3])
def testEmptyX(self):
self._testListDiff(x=[], y=[1, 2], out=[], idx=[])
def testEmptyY(self):
self._testListDiff(x=[1, 2, 3, 4], y=[], out=[1, 2, 3, 4], idx=[0, 1, 2, 3])
def testEmptyXY(self):
self._testListDiff(x=[], y=[], out=[], idx=[])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/listdiff_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Proximal Gradient Descent optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import proximal_gradient_descent
class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase):
def testResourceProximalGradientDescentwithoutRegularization(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0])
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run 3 steps Proximal Gradient Descent.
for _ in range(3):
update.run()
self.assertAllClose(np.array([-0.9, -1.8]), self.evaluate(var0))
self.assertAllClose(np.array([-0.09, -0.18]), self.evaluate(var1))
def testProximalGradientDescentwithoutRegularization2(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 3 steps Proximal Gradient Descent
for _ in range(3):
update.run()
self.assertAllClose(np.array([0.1, 0.2]), self.evaluate(var0))
self.assertAllClose(np.array([3.91, 2.82]), self.evaluate(var1))
def testProximalGradientDescentWithL1(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.001, l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps proximal gradient descent.
for _ in range(10):
update.run()
self.assertAllClose(np.array([-1.988, -3.988001]), self.evaluate(var0))
self.assertAllClose(np.array([3.67, 2.37]), self.evaluate(var1))
def testProximalGradientDescentWithL1_L2(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.001, l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps Proximal Gradient Descent
for _ in range(10):
update.run()
self.assertAllClose(np.array([-0.0495, -0.0995]), self.evaluate(var0))
self.assertAllClose(np.array([-0.0045, -0.0095]), self.evaluate(var1))
def applyOptimizer(self, opt, steps=5):
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run ProximalAdagrad for a few steps
for _ in range(steps):
update.run()
return self.evaluate(var0), self.evaluate(var1)
def testEquivGradientDescentwithoutRegularization(self):
with self.session(), self.test_scope():
val0, val1 = self.applyOptimizer(
proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0))
with self.session(), self.test_scope():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0))
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/proximal_gradient_descent_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops which manipulate lists of tensors via bridge."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.platform import test
class ListOpsTest(xla_test.XLATestCase):
def testElementShape(self):
with self.session() as sess, self.test_scope():
dim = array_ops.placeholder(dtypes.int32)
l = list_ops.empty_tensor_list(
element_shape=(dim, 15),
element_dtype=dtypes.float32,
max_num_elements=20)
e32 = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int32)
e64 = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int64)
self.assertAllEqual(sess.run(e32, {dim: 10}), (10, 15))
self.assertAllEqual(sess.run(e64, {dim: 7}), (7, 15))
def testPushPop(self):
with self.session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=(7, 15),
element_dtype=dtypes.float32,
max_num_elements=10)
l = list_ops.tensor_list_push_back(
l, constant_op.constant(1.0, shape=(7, 15)))
l = list_ops.tensor_list_push_back(
l, constant_op.constant(2.0, shape=(7, 15)))
l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
_, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e2), 2.0 * np.ones((7, 15)))
self.assertAllEqual(sess.run(e1), 1.0 * np.ones((7, 15)))
def testDoNotConstantFoldVariants(self):
with self.session() as sess, self.test_scope():
val = array_ops.placeholder(dtype=dtypes.float32)
l = list_ops.empty_tensor_list(
element_shape=(7, 15),
element_dtype=dtypes.float32,
max_num_elements=10)
# Note: Pushing a Placeholder will force the constant folding code
# to build a Const node with a DT_VARIANT output. This tests that XLA
# passes a cf_consider_fn which prevent folding such nodes.
l = list_ops.tensor_list_push_back(
l, array_ops.fill(value=val, dims=(7, 15)))
l = list_ops.tensor_list_push_back(
l, constant_op.constant(2.0, shape=(7, 15)))
l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
_, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e2, {val: 1.0}), 2.0 * np.ones((7, 15)))
self.assertAllEqual(sess.run(e1, {val: 1.0}), 1.0 * np.ones((7, 15)))
def testPushPopSeparateLists(self):
with self.session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=[],
element_dtype=dtypes.float32,
max_num_elements=20)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l2 = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
l3 = list_ops.tensor_list_push_back(l, constant_op.constant(3.0))
_, e11 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
l2, e21 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
l2, e22 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
l3, e31 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
l3, e32 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
result = sess.run([e11, [e21, e22], [e31, e32]])
self.assertEqual(result, [1.0, [2.0, 1.0], [3.0, 1.0]])
def testEmptyTensorListNoMax(self):
with self.session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=(7, 15), element_dtype=dtypes.float32)
l = list_ops.tensor_list_push_back(
l, constant_op.constant(1.0, shape=(7, 15)))
_, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Set the max number of elements"):
self.assertAllEqual(sess.run(e), 1.0 * np.ones((7, 15)))
def testEmptyTensorListMax(self):
with self.session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=(10, 15), element_dtype=dtypes.float32,
max_num_elements=2)
l = list_ops.tensor_list_push_back(
l, array_ops.fill(value=3.0, dims=(10, 15)))
_, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e), 3.0 * np.ones((10, 15)))
def testListFromTensor(self):
with self.session(), self.test_scope():
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e, 1.0)
l, e0 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(e0, 2.0)
l, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(e1, 1.0)
self.assertAllEqual(list_ops.tensor_list_length(l), 2)
def testGetSet(self):
with self.session(), self.test_scope():
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e0, 1.0)
l = list_ops.tensor_list_set_item(l, 0, 3.0)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [3.0, 2.0])
def testSetDoesNotUpdatePushIndex(self):
with self.session(), self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=[], element_dtype=dtypes.float32, max_num_elements=2)
# SetItem should not change the push index.
l = list_ops.tensor_list_set_item(l, 1, 3.)
l = list_ops.tensor_list_push_back(l, 5.)
l = list_ops.tensor_list_push_back(l, 7.)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [5., 7.])
def testGetSetReserved(self):
with self.session(), self.test_scope():
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=2)
e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e0, 0.0)
l = list_ops.tensor_list_set_item(l, 0, 3.0)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [3.0, 0.0])
def testSetStackReservedUnknownElementShape(self):
with self.session(), self.test_scope():
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=2)
l = list_ops.tensor_list_set_item(l, 0, [3.0, 4.0])
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [[3.0, 4.0], [0., 0.]])
def testPushInEmptyListWithUnknownElementShape(self):
with self.session(), self.test_scope():
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None, max_num_elements=2)
l = list_ops.tensor_list_push_back(l, [3.0, 4.0])
# Pushing an element with a different shape should raise an error.
with self.assertRaisesRegexp(errors.InternalError, "shape"):
l = list_ops.tensor_list_push_back(l, 5.)
self.evaluate(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32))
def testGetSetReservedNonScalar(self):
with self.session() as sess, self.test_scope():
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32,
element_shape=(7, 15),
num_elements=2)
l = list_ops.tensor_list_set_item(
l, 0, constant_op.constant(1.0, shape=(7, 15)))
e1 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
e2 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e1), np.ones((7, 15)))
self.assertAllEqual(sess.run(e2), np.zeros((7, 15)))
def testStack(self):
with self.session(), self.test_scope():
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=2)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e, 1.0)
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t.shape.as_list(), [None])
self.assertAllEqual(t, [1.0, 2.0])
def testStackWithUninitializedTensors(self):
with self.session(), self.test_scope():
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=3)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [0., 0., 0.])
def testZerosLikeForTensorList(self):
with self.session(), self.test_scope():
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=2)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
z = array_ops.zeros_like(l)
z = list_ops.tensor_list_stack(z, element_dtype=dtypes.float32)
self.assertAllEqual(z.shape.as_list(), [None])
self.assertAllEqual(z, [0.0, 0.0])
if __name__ == "__main__":
os.environ['TF_XLA_FLAGS'] = ('--tf_xla_min_cluster_size=2 ' +
os.environ.get('TF_XLA_FLAGS', ''))
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/tensor_list_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class CholeskyOpTest(xla_test.XLATestCase):
# Cholesky defined for float64, float32, complex64, complex128
# (https://www.tensorflow.org/api_docs/python/tf/cholesky)
@property
def float_types(self):
return set(super(CholeskyOpTest, self).float_types).intersection(
(np.float64, np.float32, np.complex64, np.complex128))
def _verifyCholeskyBase(self, sess, placeholder, x, chol, verification, atol):
chol_np, verification_np = sess.run([chol, verification], {placeholder: x})
self.assertAllClose(x, verification_np, atol=atol)
self.assertShapeEqual(x, chol)
# Check that the cholesky is lower triangular, and has positive diagonal
# elements.
if chol_np.shape[-1] > 0:
chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
chol_np.shape[-1]))
for chol_matrix in chol_reshaped:
self.assertAllClose(chol_matrix, np.tril(chol_matrix), atol=atol)
self.assertTrue((np.diag(chol_matrix) > 0.0).all())
def _verifyCholesky(self, x, atol=1e-6):
# Verify that LL^T == x.
with self.session() as sess:
placeholder = array_ops.placeholder(
dtypes.as_dtype(x.dtype), shape=x.shape)
with self.test_scope():
chol = linalg_ops.cholesky(placeholder)
verification = math_ops.matmul(chol, chol, adjoint_b=True)
self._verifyCholeskyBase(sess, placeholder, x, chol, verification, atol)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]])
for dtype in self.float_types:
self._verifyCholesky(data.astype(dtype))
def testBatch(self):
for dtype in self.float_types:
simple_array = np.array(
[[[1., 0.], [0., 5.]]], dtype=dtype) # shape (1, 2, 2)
self._verifyCholesky(simple_array)
self._verifyCholesky(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array(
[[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]], dtype=dtype)
self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array)))
# Generate random positive-definite matrices.
matrices = np.random.rand(10, 5, 5).astype(dtype)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T, matrices[i])
self._verifyCholesky(matrices, atol=1e-4)
def testNonSquareMatrix(self):
for dtype in self.float_types:
with self.assertRaises(ValueError):
linalg_ops.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]], dtype=dtype))
with self.assertRaises(ValueError):
linalg_ops.cholesky(
np.array(
[[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]],
dtype=dtype))
def testWrongDimensions(self):
for dtype in self.float_types:
tensor3 = constant_op.constant([1., 2.], dtype=dtype)
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
def testLarge2000x2000(self):
n = 2000
shape = (n, n)
data = np.ones(shape).astype(np.float32) / (2.0 * n) + np.diag(
np.ones(n).astype(np.float32))
self._verifyCholesky(data, atol=1e-4)
def testMatrixConditionNumbers(self):
for dtype in self.float_types:
condition_number = 1000
size = 20
# Generate random positive-definite symmetric matrices, and take their
# Eigendecomposition.
matrix = np.random.rand(size, size)
matrix = np.dot(matrix.T, matrix)
_, w = np.linalg.eigh(matrix)
# Build new Eigenvalues exponentially distributed between 1 and
# 1/condition_number
v = np.exp(-np.log(condition_number) * np.linspace(0, size, size) / size)
matrix = np.dot(np.dot(w, np.diag(v)), w.T).astype(dtype)
self._verifyCholesky(matrix, atol=1e-4)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/cholesky_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class QrOpTest(xla_test.XLATestCase, parameterized.TestCase):
def AdjustedNorm(self, x):
"""Computes the norm of matrices in 'x', adjusted for dimension and type."""
norm = np.linalg.norm(x, axis=(-2, -1))
return norm / (max(x.shape[-2:]) * np.finfo(x.dtype).eps)
def CompareOrthogonal(self, x, y, rank):
# We only compare the first 'rank' orthogonal vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Q is only unique up to sign (complex phase factor for complex matrices),
# so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertTrue(np.all(self.AdjustedNorm(x - y) < 30.0))
def CheckApproximation(self, a, q, r):
# Tests that a ~= q*r.
precision = self.AdjustedNorm(a - np.matmul(q, r))
self.assertTrue(np.all(precision < 10.0))
def CheckUnitary(self, x):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = math_ops.matmul(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
precision = self.AdjustedNorm(xx.eval() - self.evaluate(identity))
self.assertTrue(np.all(precision < 5.0))
def _test(self, dtype, shape, full_matrices):
np.random.seed(1)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype)
with self.session() as sess:
x_tf = array_ops.placeholder(dtype)
with self.test_scope():
q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices)
q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})
q_dims = q_tf_val.shape
np_q = np.ndarray(q_dims, dtype)
np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
new_first_dim = np_q_reshape.shape[0]
x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
for i in range(new_first_dim):
if full_matrices:
np_q_reshape[i, :, :], _ = np.linalg.qr(
x_reshape[i, :, :], mode="complete")
else:
np_q_reshape[i, :, :], _ = np.linalg.qr(
x_reshape[i, :, :], mode="reduced")
np_q = np.reshape(np_q_reshape, q_dims)
self.CompareOrthogonal(np_q, q_tf_val, min(shape[-2:]))
self.CheckApproximation(x_np, q_tf_val, r_tf_val)
self.CheckUnitary(q_tf_val)
SIZES = [1, 2, 5, 10, 32, 100, 300]
DTYPES = [np.float32]
PARAMS = itertools.product(SIZES, SIZES, DTYPES)
@parameterized.parameters(*PARAMS)
def testQR(self, rows, cols, dtype):
# TODO(b/111317468): Test other types.
for full_matrices in [True, False]:
# Only tests the (3, 2) case for small numbers of rows/columns.
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
self._test(dtype, batch_dims + (rows, cols), full_matrices)
def testLarge2000x2000(self):
self._test(np.float32, (2000, 2000), full_matrices=True)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/qr_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Ftrl optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
class FtrlOptimizerTest(xla_test.XLATestCase):
def initVariableAndGradient(self, dtype):
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.02, 0.04], dtype=dtype)
return var0, var1, grads0, grads1
def equivAdagradTest_FtrlPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
opt = ftrl.FtrlOptimizer(
3.0,
learning_rate_power=-0.5, # using Adagrad learning rate
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run Ftrl for a few steps
for _ in range(steps):
ftrl_update.run()
return self.evaluate(var0), self.evaluate(var1)
def equivAdagradTest_AdagradPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
adagrad_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run Adagrad for a few steps
for _ in range(steps):
adagrad_update.run()
return self.evaluate(var0), self.evaluate(var1)
def equivGradientDescentTest_FtrlPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
opt = ftrl.FtrlOptimizer(
3.0,
learning_rate_power=-0.0, # using Fixed learning rate
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run Ftrl for a few steps
for _ in range(steps):
ftrl_update.run()
return self.evaluate(var0), self.evaluate(var1)
def equivGradientDescentTest_GradientDescentPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
opt = gradient_descent.GradientDescentOptimizer(3.0, name="sgd")
sgd_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run GradientDescent for a few steps
for _ in range(steps):
sgd_update.run()
return self.evaluate(var0), self.evaluate(var1)
def testFtrlwithoutRegularization(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run 3 steps FTRL
for _ in range(3):
ftrl_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-2.60260963, -4.29698515]),
self.evaluate(var0),
float_rtol=1e-4,
half_rtol=1e-2)
self.assertAllCloseAccordingToType(
np.array([-0.28432083, -0.56694895]),
self.evaluate(var1),
float_rtol=1e-5,
half_rtol=1e-2)
def testFtrlwithoutRegularization2(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 3 steps FTRL
for _ in range(3):
ftrl_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-2.55607247, -3.98729396]),
self.evaluate(var0),
1e-5,
1e-5,
float_rtol=1e-4)
self.assertAllCloseAccordingToType(
np.array([-0.28232238, -0.56096673]), self.evaluate(var1), 1e-5,
1e-5)
def testFtrlWithL1(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps FTRL
for _ in range(10):
ftrl_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-7.66718769, -10.91273689]),
self.evaluate(var0),
rtol=1e-4,
bfloat16_rtol=1e-1,
bfloat16_atol=1e-1)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]),
self.evaluate(var1),
rtol=1e-4)
def testFtrlWithL1_L2(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps FTRL
for _ in range(10):
ftrl_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-0.24059935, -0.46829352]),
self.evaluate(var0),
rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([-0.02406147, -0.04830509]),
self.evaluate(var1),
rtol=1e-5)
def testFtrlWithL1_L2_L2Shrinkage(self):
"""Test the new FTRL op with support for l2 shrinkage.
The addition of this parameter which places a constant pressure on weights
towards the origin causes the gradient descent trajectory to differ. The
weights will tend to have smaller magnitudes with this parameter set.
"""
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
# Run 10 steps FTRL
for _ in range(10):
ftrl_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-0.22578996, -0.44345799]),
self.evaluate(var0),
rtol=1e-4)
self.assertAllCloseAccordingToType(
np.array([-0.14378493, -0.13229476]),
self.evaluate(var1),
rtol=1e-4)
def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self):
"""Verifies that l2 shrinkage in FTRL does not change lr schedule."""
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.1, 0.2], dtype=dtype)
opt0 = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
opt1 = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update0 = opt0.apply_gradients([(grads0, var0)])
update1 = opt1.apply_gradients([(grads1, var1)])
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var1))
# Run 10 steps FTRL
for _ in range(10):
update0.run()
update1.run()
# var0 is experiencing L2 shrinkage so it should be smaller than var1
# in magnitude.
self.assertTrue((var0.eval()**2 < self.evaluate(var1)**2).all())
accum0 = list(opt0._slots["accum"].values())[0].eval()
accum1 = list(opt1._slots["accum"].values())[0].eval()
# L2 shrinkage should not change how we update grad accumulator.
self.assertAllCloseAccordingToType(accum0, accum1)
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is idential
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
def testEquivAdagradwithoutRegularization(self):
steps = 5
for dtype in self.float_types:
with self.session(), self.test_scope():
val0, val1 = self.equivAdagradTest_FtrlPart(steps, dtype)
with self.session(), self.test_scope():
val2, val3 = self.equivAdagradTest_AdagradPart(steps, dtype)
self.assertAllCloseAccordingToType(val0, val2, rtol=1e-4, half_rtol=1e-2)
self.assertAllCloseAccordingToType(val1, val3, rtol=1e-4, half_rtol=1e-2)
def testEquivGradientDescentwithoutRegularization(self):
steps = 5
for dtype in self.float_types:
with self.session(), self.test_scope():
val0, val1 = self.equivGradientDescentTest_FtrlPart(steps, dtype)
with self.session(), self.test_scope():
val2, val3 = self.equivGradientDescentTest_GradientDescentPart(
steps, dtype)
self.assertAllCloseAccordingToType(val0, val2, rtol=1e-5)
self.assertAllCloseAccordingToType(val1, val3, rtol=1e-5)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/ftrl_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.MatrixTriangularSolve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def MakePlaceholder(x):
return array_ops.placeholder(dtypes.as_dtype(x.dtype), shape=x.shape)
class MatrixTriangularSolveOpTest(xla_test.XLATestCase):
# MatrixTriangularSolve defined for float64, float32, complex64, complex128
# (https://www.tensorflow.org/api_docs/python/tf/matrix_triangular_solve)
@property
def float_types(self):
return set(super(MatrixTriangularSolveOpTest,
self).float_types).intersection(
(np.float64, np.float32, np.complex64, np.complex128))
def _VerifyTriangularSolveBase(self, sess, placeholder_a, placeholder_ca,
placeholder_b, a, clean_a, b, verification,
atol):
feed_dict = {placeholder_a: a, placeholder_ca: clean_a, placeholder_b: b}
verification_np = sess.run(verification, feed_dict)
self.assertAllClose(b, verification_np, atol=atol)
def _VerifyTriangularSolve(self, a, b, lower, adjoint, atol):
clean_a = np.tril(a) if lower else np.triu(a)
with self.session() as sess:
placeholder_a = MakePlaceholder(a)
placeholder_ca = MakePlaceholder(clean_a)
placeholder_b = MakePlaceholder(b)
with self.test_scope():
x = linalg_ops.matrix_triangular_solve(
placeholder_a, placeholder_b, lower=lower, adjoint=adjoint)
verification = math_ops.matmul(placeholder_ca, x, adjoint_a=adjoint)
self._VerifyTriangularSolveBase(sess, placeholder_a, placeholder_ca,
placeholder_b, a, clean_a, b,
verification, atol)
def _VerifyTriangularSolveCombo(self, a, b, atol=1e-4):
transp = lambda x: np.swapaxes(x, -1, -2)
for lower, adjoint in itertools.product([True, False], repeat=2):
self._VerifyTriangularSolve(
a if lower else transp(a), b, lower, adjoint, atol)
def testBasic(self):
rng = np.random.RandomState(0)
a = np.tril(rng.randn(5, 5))
b = rng.randn(5, 7)
for dtype in self.float_types:
self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype))
def testBasicNotActuallyTriangular(self):
rng = np.random.RandomState(0)
a = rng.randn(5, 5) # the `a` matrix is not lower-triangular
b = rng.randn(5, 7)
for dtype in self.float_types:
self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype))
def testBasicComplexDtypes(self):
rng = np.random.RandomState(0)
a = np.tril(rng.randn(5, 5) + rng.randn(5, 5) * 1j)
b = rng.randn(5, 7) + rng.randn(5, 7) * 1j
for dtype in self.complex_types:
self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype))
def testBatch(self):
rng = np.random.RandomState(0)
shapes = [((4, 3, 3), (4, 3, 5)), ((1, 2, 2), (1, 2, 1)),
((1, 1, 1), (1, 1, 2)), ((2, 3, 4, 4), (2, 3, 4, 1))]
tuples = itertools.product(self.float_types, shapes)
for dtype, (a_shape, b_shape) in tuples:
n = a_shape[-1]
a = np.tril(rng.rand(*a_shape) - 0.5) / (2.0 * n) + np.eye(n)
b = rng.randn(*b_shape)
self._VerifyTriangularSolveCombo(
a.astype(dtype), b.astype(dtype), atol=1e-3)
def testLarge(self):
n = 1024
rng = np.random.RandomState(0)
a = np.tril(rng.rand(n, n) - 0.5) / (2.0 * n) + np.eye(n)
b = rng.randn(n, n)
self._VerifyTriangularSolve(
a.astype(np.float32), b.astype(np.float32), True, False, 1e-4)
def testNonSquareCoefficientMatrix(self):
rng = np.random.RandomState(0)
for dtype in self.float_types:
a = rng.randn(3, 4).astype(dtype)
b = rng.randn(4, 4).astype(dtype)
with self.assertRaises(ValueError):
linalg_ops.matrix_triangular_solve(a, b)
with self.assertRaises(ValueError):
linalg_ops.matrix_triangular_solve(a, b)
def testWrongDimensions(self):
randn = np.random.RandomState(0).randn
for dtype in self.float_types:
lhs = constant_op.constant(randn(3, 3), dtype=dtype)
rhs = constant_op.constant(randn(4, 3), dtype=dtype)
with self.assertRaises(ValueError):
linalg_ops.matrix_triangular_solve(lhs, rhs)
with self.assertRaises(ValueError):
linalg_ops.matrix_triangular_solve(lhs, rhs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/matrix_triangular_solve_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Proximal Adagrad optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import proximal_adagrad
class ProximalAdagradOptimizerTest(xla_test.XLATestCase):
def testResourceProximalAdagradwithoutRegularization(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0])
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run 3 steps Proximal Adagrad.
for _ in range(3):
update.run()
self.assertAllClose(
np.array([-2.60260963, -4.29698515]), self.evaluate(var0))
self.assertAllClose(
np.array([-0.28432083, -0.56694895]), self.evaluate(var1))
opt_vars = opt.variables()
self.assertStartsWith(opt_vars[0].name, var0._shared_name)
self.assertStartsWith(opt_vars[1].name, var1._shared_name)
self.assertEqual(2, len(opt_vars))
def testProximalAdagradwithoutRegularization2(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 3 steps Proximal Adagrad.
for _ in range(3):
update.run()
self.assertAllClose(np.array([-1.60261, -2.296985]), self.evaluate(var0))
self.assertAllClose(np.array([3.715679, 2.433051]), self.evaluate(var1))
def testProximalAdagradWithL1(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps Proximal Adagrad
for _ in range(10):
update.run()
self.assertAllClose(np.array([-6.663634, -9.190331]), self.evaluate(var0))
self.assertAllClose(np.array([2.959304, 1.029232]), self.evaluate(var1))
def testProximalAdagradWithL1_L2(self):
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps Proximal Adagrad.
for _ in range(10):
update.run()
self.assertAllClose(np.array([-0.0495, -0.0995]), self.evaluate(var0))
self.assertAllClose(np.array([-0.0045, -0.0095]), self.evaluate(var1))
def applyOptimizer(self, opt, steps=5):
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run ProximalAdagrad for a few steps
for _ in range(steps):
update.run()
return self.evaluate(var0), self.evaluate(var1)
def testEquivAdagradwithoutRegularization(self):
with self.session(), self.test_scope():
val0, val1 = self.applyOptimizer(
proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0))
with self.session(), self.test_scope():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(
3.0, initial_accumulator_value=0.1))
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/proximal_adagrad_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the behavior of the auto-compilation pass."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
class ClusteringTest(xla_test.XLATestCase):
def testAdd(self):
val1 = np.array([4, 3, 2, 1], dtype=np.float32)
val2 = np.array([5, 6, 7, 8], dtype=np.float32)
expected = val1 + val2
with self.session():
with self.test_scope():
input1 = constant_op.constant(val1, name="const1")
input2 = constant_op.constant(val2, name="const2")
output = math_ops.add(input1, input2)
result = self.evaluate(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testAddFromCpuMultiple(self):
val1 = np.array([4, 3, 2, 1]).astype(np.float32)
val2 = np.array([5, 6, 7, 8]).astype(np.float32)
expected = val1 + val2
with self.session():
with ops.device(CPU_DEVICE):
input1 = constant_op.constant(val1, name="const1")
input2 = constant_op.constant(val2, name="const2")
with self.test_scope():
output = math_ops.add(input1, input2)
for _ in xrange(10):
result = self.evaluate(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testDeadlock(self):
# Builds a graph of the form:
# x -> y
# | \
# z -> w
# where x and z are placed on the CPU and y and w are placed on the XLA
# device. If y and w are clustered for compilation, then the graph will
# deadlock since the clustered graph will contain a self-loop.
with self.session() as sess:
with ops.device(CPU_DEVICE):
x = array_ops.placeholder(dtypes.float32, [2])
with self.test_scope():
y = x * 2
with ops.device(CPU_DEVICE):
z = y * y
with self.test_scope():
w = y + z
result = sess.run(w, {x: [1.5, 0.5]})
self.assertAllClose(result, [12., 2.], rtol=1e-3)
def testHostMemory(self):
with self.session() as sess:
x = array_ops.placeholder(dtypes.int32)
with self.test_scope():
y = x + 1
with ops.device(CPU_DEVICE):
# Place a computation on the CPU, so y and w cannot be merged into the
# same JIT compilation.
z = y * 2
with self.test_scope():
# Argument 'y' is a non-constant output of a previous cluster. Make sure
# it is properly copied to host memory so it can be used as a
# compile-time constant input for this cluster.
w = array_ops.reshape(z, y)
result = sess.run(w, {x: [1, 0]})
expected = np.array([[4], [2]], dtype=np.int32)
self.assertAllClose(expected, result, rtol=1e-3)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/clustering_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA op wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.compiler.xla import xla_data_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class XlaOpsNumericalTest(xla_test.XLATestCase, parameterized.TestCase):
def _assertOpOutputMatchesExpected(self, op, args, expected,
equality_fn=None):
with self.session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(*placeholders)
result = session.run(output, feeds)
if not equality_fn:
equality_fn = self.assertAllClose
equality_fn(result, expected, rtol=1e-3)
def testAdd(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
xla.add,
args=(np.array([1, 2, 3], dtype=dtype),
np.array([4, 5, 6], dtype=dtype)),
expected=np.array([5, 7, 9], dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x, y: xla.add(x, y, broadcast_dims=(0,)),
args=(np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([7, 11], dtype=dtype)),
expected=np.array([[8, 9], [14, 15]], dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x, y: xla.add(x, y, broadcast_dims=(1,)),
args=(np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([7, 11], dtype=dtype)),
expected=np.array([[8, 13], [10, 15]], dtype=dtype))
def testBroadcast(self):
for dtype in self.numeric_types:
v = np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2])
self._assertOpOutputMatchesExpected(
lambda x: xla.broadcast(x, (7, 42)),
args=(v,),
expected=np.tile(v, (7, 42, 1, 1)))
def testShiftRightLogical(self):
self._assertOpOutputMatchesExpected(
xla.shift_right_logical,
args=(np.array([-1, 16], dtype=np.int32), np.int32(4)),
expected=np.array([0x0FFFFFFF, 1], dtype=np.int32))
self._assertOpOutputMatchesExpected(
xla.shift_right_logical,
args=(np.array([0xFFFFFFFF, 16], dtype=np.uint32), np.uint32(4)),
expected=np.array([0x0FFFFFFF, 1], dtype=np.uint32))
def testShiftRightArithmetic(self):
self._assertOpOutputMatchesExpected(
xla.shift_right_arithmetic,
args=(np.array([-1, 16], dtype=np.int32), np.int32(4)),
expected=np.array([-1, 1], dtype=np.int32))
self._assertOpOutputMatchesExpected(
xla.shift_right_arithmetic,
args=(np.array([0xFFFFFFFF, 16], dtype=np.uint32), np.uint32(4)),
expected=np.array([0xFFFFFFFF, 1], dtype=np.uint32))
PRECISION_VALUES = (None, xla_data_pb2.PrecisionConfig.DEFAULT,
xla_data_pb2.PrecisionConfig.HIGH,
xla_data_pb2.PrecisionConfig.HIGHEST)
@parameterized.parameters(*PRECISION_VALUES)
def testConv(self, precision):
for dtype in set(self.float_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
def conv_1d_fn(lhs, rhs):
dnums = xla_data_pb2.ConvolutionDimensionNumbers()
num_spatial_dims = 1
dnums.input_batch_dimension = 0
dnums.input_feature_dimension = 1
dnums.output_batch_dimension = 0
dnums.output_feature_dimension = 1
dnums.kernel_output_feature_dimension = 0
dnums.kernel_input_feature_dimension = 1
dnums.input_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
dnums.kernel_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
dnums.output_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
precision_config = None
if precision:
precision_config = xla_data_pb2.PrecisionConfig()
precision_config.operand_precision.extend([precision, precision])
return xla.conv(
lhs,
rhs,
window_strides=(1,),
padding=((2, 1),),
lhs_dilation=(1,),
rhs_dilation=(2,),
dimension_numbers=dnums)
self._assertOpOutputMatchesExpected(
conv_1d_fn,
args=(
np.array([[[3, 4, 5, 6]]], dtype=dtype),
np.array([[[-2, -3]]], dtype=dtype),
),
expected=np.array([[[-9, -12, -21, -26, -10]]], dtype=dtype))
@parameterized.parameters(*PRECISION_VALUES)
def testDotGeneral(self, precision):
for dtype in self.float_types:
def dot_fn(lhs, rhs):
dnums = xla_data_pb2.DotDimensionNumbers()
dnums.lhs_contracting_dimensions.append(2)
dnums.rhs_contracting_dimensions.append(1)
dnums.lhs_batch_dimensions.append(0)
dnums.rhs_batch_dimensions.append(0)
precision_config = None
if precision:
precision_config = xla_data_pb2.PrecisionConfig()
precision_config.operand_precision.extend([precision, precision])
return xla.dot_general(
lhs,
rhs,
dimension_numbers=dnums,
precision_config=precision_config)
lhs = np.array(
[
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
], dtype=dtype)
rhs = np.array(
[
[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]],
], dtype=dtype)
self._assertOpOutputMatchesExpected(
dot_fn,
args=(lhs, rhs),
expected=np.array(
[
[[9, 12, 15], [19, 26, 33]],
[[95, 106, 117], [129, 144, 159]],
],
dtype=dtype))
def testNeg(self):
for dtype in self.numeric_types - {np.uint8, np.int8}:
self._assertOpOutputMatchesExpected(
xla.neg,
args=(np.array([1, 2, 3], dtype=dtype),),
expected=np.array([-1, -2, -3], dtype=dtype))
def testPad(self):
for dtype in self.numeric_types:
def pad_fn(x):
return xla.pad(
x,
padding_value=7,
padding_low=[2, 1],
padding_high=[1, 2],
padding_interior=[1, 0])
self._assertOpOutputMatchesExpected(
pad_fn,
args=(np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2]),),
expected=np.array(
[[7, 7, 7, 7, 7], [7, 7, 7, 7, 7], [7, 0, 1, 7, 7],
[7, 7, 7, 7, 7], [7, 2, 3, 7, 7], [7, 7, 7, 7, 7]],
dtype=dtype))
def testReduce(self):
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def sum_reducer(x, y):
return x + y
def sum_reduction(dims):
def fn(x):
return xla.reduce(
x, init_value=0, dimensions_to_reduce=dims, reducer=sum_reducer)
return fn
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[0]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([12, 15, 18, 21], dtype=dtype))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[1]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([6, 22, 38], dtype=dtype))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[0, 1]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=dtype(66))
@function.Defun(dtype, dtype)
def mul_reducer(x, y):
return x * y
def mul_reduction(dims):
def fn(x):
return xla.reduce(
x, init_value=1, dimensions_to_reduce=dims, reducer=mul_reducer)
return fn
self._assertOpOutputMatchesExpected(
mul_reduction(dims=[0]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([0, 45, 120, 231], dtype=dtype))
def testSelectAndScatter(self):
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def add_scatter(x, y):
return x + y
@function.Defun(dtype, dtype)
def ge_select(x, y):
return x >= y
def test_fn(operand, source):
return xla.select_and_scatter(
operand,
window_dimensions=[2, 3, 1, 1],
window_strides=[2, 2, 1, 1],
padding=[[0, 0]] * 4,
source=source,
init_value=0,
select=ge_select,
scatter=add_scatter)
self._assertOpOutputMatchesExpected(
test_fn,
args=(np.array(
[[7, 2, 5, 3, 8], [3, 8, 9, 3, 4], [1, 5, 7, 5, 6],
[0, 6, 2, 10, 2]],
dtype=dtype).reshape((4, 5, 1, 1)),
np.array([[2, 6], [3, 1]], dtype=dtype).reshape((2, 2, 1, 1))),
expected=np.array(
[[0, 0, 0, 0, 0], [0, 0, 8, 0, 0], [0, 0, 3, 0, 0],
[0, 0, 0, 1, 0]],
dtype=dtype).reshape((4, 5, 1, 1)))
def testTranspose(self):
for dtype in self.numeric_types:
v = np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2])
self._assertOpOutputMatchesExpected(
lambda x: xla.transpose(x, [1, 0]), args=(v,), expected=v.T)
def testDynamicSlice(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
xla.dynamic_slice,
args=(np.arange(1000,
dtype=np.int32).astype(dtype).reshape([10, 10, 10]),
np.array([5, 7, 3]), np.array([2, 3, 2])),
expected=np.array(
np.array([[[573, 574], [583, 584], [593, 594]],
[[673, 674], [683, 684], [693, 694]]]),
dtype=dtype))
def testDynamicSliceWithIncorrectStartIndicesShape(self):
with self.session() as session:
with self.test_scope():
output = xla.dynamic_slice(
np.arange(1000, dtype=np.int32).reshape([10, 10, 10]),
np.array([5, 7]), np.array([2, 3, 4]))
with self.assertRaises(errors.InvalidArgumentError) as invalid_arg_error:
session.run(output)
self.assertRegexpMatches(
invalid_arg_error.exception.message,
(r'start_indices must be a vector with length equal to input rank, '
r'but input rank is 3 and start_indices has shape \[2\].*'))
def testDynamicSliceWithIncorrectSizeIndicesShape(self):
with self.session() as session:
with self.test_scope():
output = xla.dynamic_slice(
np.arange(1000, dtype=np.int32).reshape([10, 10, 10]),
np.array([5, 7, 3]), np.array([2, 3]))
with self.assertRaises(errors.InvalidArgumentError) as invalid_arg_error:
session.run(output)
self.assertRegexpMatches(
invalid_arg_error.exception.message,
(r'size_indices must be a vector with length equal to input rank, '
r'but input rank is 3 and size_indices has shape \[2\].*'))
class XlaOpsShapeInferenceTest(xla_test.XLATestCase, parameterized.TestCase):
def testDotDifferentNumberOfContractingDimensions(self):
a = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
b = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(3)
with self.assertRaisesRegex(ValueError,
'Must specify the same number of contracting '
'dimensions for lhs and rhs. Got: 1 and 2'):
xla.dot_general(a, b, dim_nums)
def testDotDifferentContractingDimensionsSizes(self):
a = array_ops.placeholder(np.float32, shape=(2, 2, 2, 2))
b = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(3)
with self.assertRaisesRegex(ValueError,
'Contracting dimension sizes do not match. '
'Got: 2 and 4'):
xla.dot_general(a, b, dim_nums)
def testDotDifferentNumberOfBatchDimensions(self):
a = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
b = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_batch_dimensions.append(2)
dim_nums.rhs_batch_dimensions.append(2)
dim_nums.rhs_batch_dimensions.append(3)
with self.assertRaisesRegex(ValueError,
'Must specify the same number of batch '
'dimensions for lhs and rhs. Got: 1 and 2'):
xla.dot_general(a, b, dim_nums)
def testDotDifferentBatchDimensionsSizes(self):
a = array_ops.placeholder(np.float32, shape=(2, 2, 2, 2))
b = array_ops.placeholder(np.float32, shape=(4, 4, 4, 2))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(3)
dim_nums.lhs_batch_dimensions.append(0)
dim_nums.rhs_batch_dimensions.append(0)
with self.assertRaisesRegex(ValueError,
'Batch dimension sizes do not match. '
'Got: 2 and 4'):
xla.dot_general(a, b, dim_nums)
def testDotShapeInference(self):
a = array_ops.placeholder(np.float32, shape=(1, 2, 3, 4))
b = array_ops.placeholder(np.float32, shape=(4, 3, 2, 1))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(1)
dim_nums.rhs_contracting_dimensions.append(2)
dim_nums.lhs_batch_dimensions.append(3)
dim_nums.rhs_batch_dimensions.append(0)
c = xla.dot_general(a, b, dim_nums)
self.assertEqual(c.shape, tensor_shape.TensorShape([1, 3, 3, 1]))
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/xla_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
# Wrapper around AvgPoolGrad that ignores extra arguments needed by
# MaxPoolGrad.
def _AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding):
del outputs # Unused by average-pooling gradients.
return gen_nn_ops.avg_pool3d_grad(
inputs.get_shape().as_list(),
output_gradients,
ksize=ksize,
strides=strides,
padding=padding)
class Pooling3DTest(xla_test.XLATestCase):
def _VerifyValues(self, pool_func, input_sizes, window, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called: co.MaxPool, co.AvgPool.
input_sizes: Input tensor dimensions.
window: Tuple of kernel dims: planes, rows, cols.
strides: Tuple of strides for dims: planes, rows, cols.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.arange(1.0, total_size + 1, dtype=np.float32)
x = x.reshape(input_sizes)
with self.session() as sess, self.test_scope():
inputs = array_ops.placeholder(dtypes.float32)
t = pool_func(
inputs,
ksize=[1] + window + [1],
strides=[1] + strides + [1],
padding=padding)
vals = sess.run(t, {inputs: x})
# Verifies values.
actual = vals.flatten()
self.assertAllClose(expected, actual)
def testAvgPool3dValidPadding(self):
expected_output = [20.5, 21.5, 22.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID",
expected=expected_output)
def testAvgPool3dSamePadding(self):
expected_output = [20.5, 21.5, 22.5, 26.5, 27.5, 28.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 2, 2, 4, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME",
expected=expected_output)
def testAvgPool3dSamePaddingDifferentStrides(self):
expected_output = [1.5, 4.5, 7.5, 17.5, 20.5, 23.5, 33.5, 36.5, 39.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=[1, 2, 3],
strides=[2, 3, 1],
padding="SAME",
expected=expected_output)
def testMaxPool3dValidPadding(self):
expected_output = [40.0, 41.0, 42.0]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID",
expected=expected_output)
def testMaxPool3dSamePadding(self):
expected_output = [31., 32., 33., 34., 35., 36.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 2, 2, 3, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME",
expected=expected_output)
def testMaxPool3dSamePaddingDifferentStrides(self):
expected_output = [2., 5., 8., 18., 21., 24., 34., 37., 40.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=[1, 2, 3],
strides=[2, 3, 1],
padding="SAME",
expected=expected_output)
# Test pooling on a larger input, with different stride and kernel
# size for the 'z' dimension.
# Simulate max pooling in numpy to get the expected output.
input_data = np.arange(1, 5 * 27 * 27 * 64 + 1).reshape((5, 27, 27, 64))
input_data = np.pad(input_data, [[0, 0], [0, 1], [0, 1], [0, 0]],
mode="constant")
expected_output = input_data[:, 1::2, 1::2, :]
expected_output[:, -1, :, :] = input_data[:, -2, 1::2, :]
expected_output[:, :, -1, :] = input_data[:, 1::2, -2, :]
expected_output[:, -1, -1, :] = input_data[:, -2, -2, :]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 27, 27, 64],
window=[1, 2, 2],
strides=[1, 2, 2],
padding="SAME",
expected=expected_output.flatten())
def testKernelSmallerThanStride(self):
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[58, 61, 79, 82, 205, 208, 226, 229])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[29.5, 32.5, 50.5, 53.5, 176.5, 179.5, 197.5, 200.5])
def _VerifyGradient(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling gradient function.
Args:
pool_func: Forward pooling function
pool_grad_func: Pooling gradient function for pool_grad_func
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
pool_grad_grad_func: Second-order gradient function, if available.
"""
ksize = [1] + ksize + [1]
strides = [1] + strides + [1]
total_size = np.prod(input_sizes)
x = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_sizes)
with self.session() as sess:
# Use the forward pool function to compute some corresponding outputs
# (needed for the CPU device, and we need the shape in both cases).
with ops.device("CPU"):
inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
outputs = pool_func(
inputs,
ksize=ksize,
strides=strides,
padding=padding)
output_vals = np.array(sess.run(outputs, {inputs: x}))
output_gradient_vals = np.arange(
1, output_vals.size + 1, dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)
output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)
# Use the Tensorflow CPU pooling gradient to compute the expected input
# gradients.
with ops.device("CPU"):
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_vals.shape)
expected_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding)
expected_input_gradient_vals = sess.run(
expected_input_gradients,
{inputs: x,
output_gradients: output_gradient_vals})
output_grad_gradients = array_ops.placeholder(
dtypes.float32, shape=expected_input_gradient_vals.shape)
if pool_grad_grad_func is not None:
expected_grad_gradients = pool_grad_grad_func(
inputs,
outputs,
output_grad_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NDHWC")
expected_grad_gradients_vals = sess.run(expected_grad_gradients, {
inputs: x,
output_grad_gradients: output_grad_grad_vals
})
# Run the gradient op on the XLA device
with self.test_scope():
outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
actual_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding)
if pool_grad_grad_func is not None:
actual_grad_gradients = pool_grad_grad_func(
inputs,
outputs,
output_grad_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NDHWC")
actual = sess.run(actual_input_gradients, {
inputs: x,
outputs: output_vals,
output_gradients: output_gradient_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_input_gradient_vals.flatten(),
actual.flatten(),
rtol=1e-5,
atol=1e-6)
self.assertShapeEqual(actual, inputs)
if pool_grad_grad_func is not None:
actual_grad_gradients_vals = sess.run(
actual_grad_gradients, {
inputs: x,
outputs: output_vals,
output_grad_gradients: output_grad_grad_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_grad_gradients_vals,
actual_grad_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_grad_gradients_vals, outputs)
def testMaxPoolGradValidPadding1_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[1, 3, 3, 3, 1],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="VALID",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradValidPadding2_1_6_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 3, 3, 6, 3],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="VALID",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradValidPadding2_1_7_3d(self):
# TODO(b/73062247): the bfloat16 implementation of MaxPool3DGradGrad does
# not have enough precision for this test case to pass if
# pool_grad_grad_func is passed.
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 3, 5, 7, 3],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="VALID")
def testMaxPoolGradValidPadding2_2_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 2, 2, 2, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradSamePadding1_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 3, 2, 4, 1],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="SAME",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradSamePadding2_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 3, 2, 4, 1],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="SAME",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradSamePadding2_2_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 5, 2, 4, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradSamePadding3_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[1, 3, 3, 7, 1],
ksize=[3, 3, 3],
strides=[1, 1, 1],
padding="SAME",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testAvgPoolGradValidPadding1_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 3, 3, 3, 3],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="VALID")
def testAvgPoolGradValidPadding2_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 3, 3, 3, 3],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="VALID")
def testAvgPoolGradValidPadding2_2_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 2, 2, 2, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID")
def testAvgPoolGradSamePadding1_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 3, 2, 4, 3],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="SAME")
def testAvgPoolGradSamePadding2_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[1, 2, 2, 2, 1],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="SAME")
def testAvgPoolGradSamePadding2_2_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 5, 2, 4, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME")
def testAvgPoolGradSamePadding3_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[1, 3, 6, 7, 1],
ksize=[3, 3, 3],
strides=[1, 1, 1],
padding="SAME")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/pooling_ops_3d_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for operators with > 3 or arbitrary numbers of arguments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class NAryOpsTest(xla_test.XLATestCase):
def _testNAry(self, op, args, expected, equality_fn=None):
with self.session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(placeholders)
result = session.run(output, feeds)
if not equality_fn:
equality_fn = self.assertAllClose
equality_fn(result, expected, rtol=1e-3)
def _nAryListCheck(self, results, expected, **kwargs):
self.assertEqual(len(results), len(expected))
for (r, e) in zip(results, expected):
self.assertAllClose(r, e, **kwargs)
def _testNAryLists(self, op, args, expected):
self._testNAry(op, args, expected, equality_fn=self._nAryListCheck)
def testFloat(self):
self._testNAry(math_ops.add_n,
[np.array([[1, 2, 3]], dtype=np.float32)],
expected=np.array([[1, 2, 3]], dtype=np.float32))
self._testNAry(math_ops.add_n,
[np.array([1, 2], dtype=np.float32),
np.array([10, 20], dtype=np.float32)],
expected=np.array([11, 22], dtype=np.float32))
self._testNAry(math_ops.add_n,
[np.array([-4], dtype=np.float32),
np.array([10], dtype=np.float32),
np.array([42], dtype=np.float32)],
expected=np.array([48], dtype=np.float32))
def testComplex(self):
for dtype in self.complex_types:
self._testNAry(
math_ops.add_n, [np.array([[1 + 2j, 2 - 3j, 3 + 4j]], dtype=dtype)],
expected=np.array([[1 + 2j, 2 - 3j, 3 + 4j]], dtype=dtype))
self._testNAry(
math_ops.add_n, [
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.array([10j, 20], dtype=dtype)
],
expected=np.array([1 + 12j, 22 - 3j], dtype=dtype))
self._testNAry(
math_ops.add_n, [
np.array([-4, 5j], dtype=dtype),
np.array([2 + 10j, -2], dtype=dtype),
np.array([42j, 3 + 3j], dtype=dtype)
],
expected=np.array([-2 + 52j, 1 + 8j], dtype=dtype))
@unittest.skip("IdentityN is temporarily CompilationOnly as workaround")
def testIdentityN(self):
self._testNAryLists(array_ops.identity_n,
[np.array([[1, 2, 3]], dtype=np.float32)],
expected=[np.array([[1, 2, 3]], dtype=np.float32)])
self._testNAryLists(array_ops.identity_n,
[np.array([[1, 2], [3, 4]], dtype=np.float32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)],
expected=[
np.array([[1, 2], [3, 4]], dtype=np.float32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)])
self._testNAryLists(array_ops.identity_n,
[np.array([[1], [2], [3], [4]], dtype=np.int32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)],
expected=[
np.array([[1], [2], [3], [4]], dtype=np.int32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)])
def testConcat(self):
self._testNAry(
lambda x: array_ops.concat(x, 0), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
],
expected=np.array(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.float32))
self._testNAry(
lambda x: array_ops.concat(x, 1), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
],
expected=np.array(
[[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], dtype=np.float32))
def testOneHot(self):
with self.session() as session, self.test_scope():
indices = array_ops.constant(np.array([[2, 3], [0, 1]], dtype=np.int32))
op = array_ops.one_hot(indices,
np.int32(4),
on_value=np.float32(7), off_value=np.float32(3))
output = session.run(op)
expected = np.array([[[3, 3, 7, 3], [3, 3, 3, 7]],
[[7, 3, 3, 3], [3, 7, 3, 3]]],
dtype=np.float32)
self.assertAllEqual(output, expected)
op = array_ops.one_hot(indices,
np.int32(4),
on_value=np.int32(2), off_value=np.int32(1),
axis=1)
output = session.run(op)
expected = np.array([[[1, 1], [1, 1], [2, 1], [1, 2]],
[[2, 1], [1, 2], [1, 1], [1, 1]]],
dtype=np.int32)
self.assertAllEqual(output, expected)
def testSplitV(self):
with self.session() as session:
with self.test_scope():
output = session.run(
array_ops.split(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2]],
dtype=np.float32),
[2, 2], 1))
expected = [np.array([[1, 2], [5, 6], [9, 0]], dtype=np.float32),
np.array([[3, 4], [7, 8], [1, 2]], dtype=np.float32)]
self.assertAllEqual(output, expected)
def testStridedSlice(self):
self._testNAry(lambda x: array_ops.strided_slice(*x),
[np.array([[], [], []], dtype=np.float32),
np.array([1, 0], dtype=np.int32),
np.array([3, 0], dtype=np.int32),
np.array([1, 1], dtype=np.int32)],
expected=np.array([[], []], dtype=np.float32))
if np.int64 in self.int_types:
self._testNAry(
lambda x: array_ops.strided_slice(*x), [
np.array([[], [], []], dtype=np.float32), np.array(
[1, 0], dtype=np.int64), np.array([3, 0], dtype=np.int64),
np.array([1, 1], dtype=np.int64)
],
expected=np.array([[], []], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice(*x),
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32),
np.array([1, 1], dtype=np.int32),
np.array([3, 3], dtype=np.int32),
np.array([1, 1], dtype=np.int32)],
expected=np.array([[5, 6], [8, 9]], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice(*x),
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32),
np.array([0, 2], dtype=np.int32),
np.array([2, 0], dtype=np.int32),
np.array([1, -1], dtype=np.int32)],
expected=np.array([[3, 2], [6, 5]], dtype=np.float32))
self._testNAry(lambda x: x[0][0:2, array_ops.newaxis, ::-1],
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32)],
expected=np.array([[[3, 2, 1]], [[6, 5, 4]]],
dtype=np.float32))
self._testNAry(lambda x: x[0][1, :, array_ops.newaxis],
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32)],
expected=np.array([[4], [5], [6]], dtype=np.float32))
def testStridedSliceGrad(self):
# Tests cases where input shape is empty.
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
np.float32(0.5)],
expected=np.array(np.float32(0.5), dtype=np.float32))
# Tests case where input shape is non-empty, but gradients are empty.
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3], dtype=np.int32),
np.array([0], dtype=np.int32),
np.array([0], dtype=np.int32),
np.array([1], dtype=np.int32),
np.array([], dtype=np.float32)],
expected=np.array([0, 0, 0], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 0], dtype=np.int32),
np.array([1, 0], dtype=np.int32),
np.array([3, 0], dtype=np.int32),
np.array([1, 1], dtype=np.int32),
np.array([[], []], dtype=np.float32)],
expected=np.array([[], [], []], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 3], dtype=np.int32),
np.array([1, 1], dtype=np.int32),
np.array([3, 3], dtype=np.int32),
np.array([1, 1], dtype=np.int32),
np.array([[5, 6], [8, 9]], dtype=np.float32)],
expected=np.array([[0, 0, 0], [0, 5, 6], [0, 8, 9]],
dtype=np.float32))
def ssg_test(x):
return array_ops.strided_slice_grad(*x, shrink_axis_mask=0x4,
new_axis_mask=0x1)
self._testNAry(ssg_test,
[np.array([3, 1, 3], dtype=np.int32),
np.array([0, 0, 0, 2], dtype=np.int32),
np.array([0, 3, 1, -4], dtype=np.int32),
np.array([1, 2, 1, -3], dtype=np.int32),
np.array([[[1], [2]]], dtype=np.float32)],
expected=np.array([[[0, 0, 1]], [[0, 0, 0]], [[0, 0, 2]]],
dtype=np.float32))
ssg_test2 = lambda x: array_ops.strided_slice_grad(*x, new_axis_mask=0x15)
self._testNAry(ssg_test2,
[np.array([4, 4], dtype=np.int32),
np.array([0, 0, 0, 1, 0], dtype=np.int32),
np.array([0, 3, 0, 4, 0], dtype=np.int32),
np.array([1, 2, 1, 2, 1], dtype=np.int32),
np.array([[[[[1], [2]]], [[[3], [4]]]]], dtype=np.float32)],
expected=np.array([[0, 1, 0, 2], [0, 0, 0, 0], [0, 3, 0, 4],
[0, 0, 0, 0]], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 3], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
np.array([2, 0], dtype=np.int32),
np.array([1, -1], dtype=np.int32),
np.array([[1, 2], [3, 4]], dtype=np.float32)],
expected=np.array([[0, 2, 1], [0, 4, 3], [0, 0, 0]],
dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 3], dtype=np.int32),
np.array([2, 2], dtype=np.int32),
np.array([0, 1], dtype=np.int32),
np.array([-1, -2], dtype=np.int32),
np.array([[1], [2]], dtype=np.float32)],
expected=np.array([[0, 0, 0], [0, 0, 2], [0, 0, 1]],
dtype=np.float32))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/nary_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for JIT compilation on the CPU and GPU devices."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.contrib.compiler import jit
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
jit_scope = jit.experimental_jit_scope
# Disable rewrites to make sure we don't end up having to update this test
# whenever we implement new ones.
def NoRewriteSessionConfig():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
function_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def CompiledKernel(fn, *inputs, **kwargs):
"""Execute 'fn' as a compiled XLA kernel, with 'inputs'."""
name = kwargs.pop("name", None)
noinline = kwargs.pop("noinline", None)
@function.Defun(func_name=name, noinline=noinline, compiled=True)
def Compiled(*args):
return fn(*args)
return Compiled(*inputs)
def RunMetadataLabels(run_metadata):
"""Returns all labels in run_metadata."""
labels = []
for dev_stats in run_metadata.step_stats.dev_stats:
for node_stats in dev_stats.node_stats:
labels.append(node_stats.timeline_label)
return labels
def InLabels(labels, substr):
"""Returns true iff one of the labels contains substr."""
return any(substr in x for x in labels)
def MetadataHasXlaRunOp(run_metadata):
"""Returns true if there are XlaRun kernels in run_metadata's timeline."""
# TODO(phawkins): find a less hacky way to test whether a kernel ran.
return InLabels(RunMetadataLabels(run_metadata), "_XlaRun")
class JitLaunchTest(test.TestCase):
# Evaluates 'fn' on 'args' both directly and as a compiled XLA kernel.
# Verifies that the outputs match and that XLA was invoked. 'fn' must take
# the same number of tensors as arguments that are in 'args', and must return
# a tuple of output tensors.
#
# If 'require_kernel_launch' is True, then we verify that an XlaCompile/XlaRun
# node actually ran. However, it is sometimes possible for XlaCompile/XlaRun
# ops to be constant-folded away, so the check is optional.
def _compare(self,
fn,
args,
require_kernel_launch=True,
name=None,
noinline=None):
with session_lib.Session(config=NoRewriteSessionConfig()) as sess:
placeholders = []
feeds = {}
for arg in args:
placeholder = array_ops.placeholder(
dtypes.as_dtype(arg.dtype), list(arg.shape))
placeholders.append(placeholder)
feeds[placeholder] = arg
compiled_op = CompiledKernel(
fn, *placeholders, name=name, noinline=noinline)
direct_op = fn(*placeholders)
run_metadata = config_pb2.RunMetadata()
compiled = test_utils.RunWithWarmup(
sess, compiled_op, feeds,
config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata)
print("Compiled Result {}".format(compiled))
if require_kernel_launch:
self.assert_(MetadataHasXlaRunOp(run_metadata))
direct = sess.run(direct_op, feeds)
print("Direct Result {}".format(direct))
if (isinstance(compiled, (tuple, list)) and
(isinstance(direct, (tuple, list)))):
for (x, y) in zip(compiled, direct):
self.assertAllClose(x, y, rtol=1e-1)
else:
self.assertAllClose(compiled, direct, rtol=1e-2)
def testNoOutputs(self):
with session_lib.Session() as sess:
# Check that calling the result as a compiled kernel doesn't crash.
@function.Defun(compiled=True)
def KernelWithNoOutputs():
a = constant_op.constant(100) # pylint: disable=unused-variable
call = KernelWithNoOutputs() # pylint: disable=assignment-from-no-return
test_utils.RunWithWarmup(sess, call, {})
def testAliasing(self):
"""Regression test for compiled functions that return an aliased buffer.
XLA returns aliased buffers if outputs are identical. Tests that
we handle that case.
"""
def AddOnceReturnTwice(x):
y = math_ops.add(x, x)
return y, y
# Exercises compiling a function (say, Foo) which calls another function
# (say, Bar) which is not inlined. When the compiler compiles Foo, it needs
# to symbolically execute Bar correctly regardless of whether Bar is inlined
# or not.
# Tests compiled=True and noinline=True.
self._compare(
AddOnceReturnTwice, [np.array([[[0.5, -1.0]]], dtype=np.float32)],
name="AddOnceReturnTwice_inline",
noinline=True)
# Tests compiled=True and noinline=False.
self._compare(
AddOnceReturnTwice, [np.array([[[0.5, -1.0]]], dtype=np.float32)],
name="AddOnceReturnTwice_noinline",
noinline=False)
def testOneConstOutput(self):
"""Test consisting of a single constant return value."""
def OneConstOutput():
return constant_op.constant([-3, 44, 99])
self._compare(OneConstOutput, [], require_kernel_launch=False)
def testConstZeroElementOutput(self):
"""Test consisting of a constant zero element return value."""
def ConstZeroElementOutput():
return array_ops.fill([7, 0], 3.0)
self._compare(ConstZeroElementOutput, [], require_kernel_launch=False)
def testSomeConstOutputs(self):
"""Test kernels that return a mixture of const and non-const outputs."""
def SomeConstOutputs(x):
return constant_op.constant(
[-2, 7]), array_ops.identity(x), constant_op.constant(3.5)
self._compare(
SomeConstOutputs, [np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32)])
def testInt32Input(self):
"""Test an int32-typed input.
On a GPU, int32 tensors will be placed in host memory.
"""
def AddToSelf(x):
return math_ops.add(x, x)
self._compare(AddToSelf, [np.array([7, 1, 3], dtype=np.int32)])
def testMandatoryConstantInput(self):
"""Tests an operator that has a mandatory-constant shape input."""
def FillWithFloat(x):
return array_ops.fill(x, 9.5)
self._compare(FillWithFloat, [np.array([3, 2], dtype=np.int32)])
def testMnistForwardFunc(self):
"""Compute inference function from MNIST beginners tutorial."""
batch_size = 16
image_size = 28 * 28
num_classes = 10
# Define a TensorFlow function to compute the forward pass.
def MnistForward(w, b, x):
return nn_ops.softmax(math_ops.matmul(x, w) + b)
w = np.random.random_sample((image_size, num_classes)).astype(np.float32)
b = np.random.random_sample((num_classes)).astype(np.float32)
x = np.random.random_sample((batch_size, image_size)).astype(np.float32)
self._compare(MnistForward, [w, b, x])
def testExplicitMarking(self):
"""Test explicit marking of operators to compile."""
batch_size = 16
image_size = 28 * 28
num_classes = 10
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32)
w = array_ops.placeholder(dtypes.float32)
b = array_ops.placeholder(dtypes.float32)
with jit_scope():
y1 = math_ops.matmul(x, w)
y2 = math_ops.add(y1, b)
with jit_scope():
y = math_ops.square(y2)
dw = np.random.random_sample((image_size, num_classes)).astype(np.float32)
db = np.random.random_sample((num_classes)).astype(np.float32)
dx = np.random.random_sample((batch_size, image_size)).astype(np.float32)
with session_lib.Session() as sess:
run_metadata = config_pb2.RunMetadata()
output = test_utils.RunWithWarmup(
sess,
y, {
x: dx,
w: dw,
b: db
},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
# TODO(phawkins): really we would like to test that there were exactly
# two kernel launches. However, we have no reliable way to determine
# that.
self.assert_(MetadataHasXlaRunOp(run_metadata))
expected = np.square(np.dot(dx, dw) + db)
self.assertAllClose(expected, output, rtol=1e-1)
class XlaCompilationTest(test.TestCase):
"""Tests for auto-compilation on CPU/GPU devices."""
def testReshape(self):
"""Tests an operator with compile-time constant and non-constant inputs."""
with self.session(config=NoRewriteSessionConfig()) as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.int32)
with jit_scope():
# Reshape's first argument is non-constant in the JIT, but its second
# (shape) argument will be treated as a compile-time constant for
# each JIT compilation.
# We do not use a tf.const() argument since we want to ensure the
# shape is still a run-time argument to the JIT, and not
# statically known as part of the JIT compilation's input graph.
z = array_ops.reshape(x, y)
run_metadata = config_pb2.RunMetadata()
out = test_utils.RunWithWarmup(
sess,
z, {
x: np.array([1, 2, 3, 4, 5, 6], np.float32),
y: [-1, 3]
},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assert_(MetadataHasXlaRunOp(run_metadata))
self.assertAllClose(np.array([[1, 2, 3], [4, 5, 6]], np.float32), out)
def testIgnoredArguments(self):
"""Tests that JIT computations can ignore formal parameters."""
with self.session(config=NoRewriteSessionConfig()) as sess:
x = array_ops.placeholder(dtypes.int32)
y = array_ops.placeholder(dtypes.int32)
with jit_scope():
z = math_ops.add(x, x)
w = math_ops.add(y, y)
# Pulls 'w' into the same compilation via control dependencies.
with ops.control_dependencies([w]):
n = control_flow_ops.no_op()
with ops.control_dependencies([n]):
t = math_ops.add(z, z)
run_metadata = config_pb2.RunMetadata()
out = test_utils.RunWithWarmup(
sess,
t, {
x: np.int32(7),
y: np.int32(404)
},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assert_(MetadataHasXlaRunOp(run_metadata))
self.assertAllClose(28, out)
def testLoops(self):
"""Tests that compilation accepts computations containing loops."""
with self.session(config=NoRewriteSessionConfig()) as session:
x = array_ops.placeholder(dtypes.float32)
with jit_scope():
c = lambda i, _: math_ops.less(i, 5)
b = lambda i, x: (i + 1, x * 2.0 + 1.0)
_, y = control_flow_ops.while_loop(c, b, (constant_op.constant(0), x))
run_metadata = config_pb2.RunMetadata()
result = session.run(y, {x: np.float32(2)},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assert_(MetadataHasXlaRunOp(run_metadata))
self.assertAllClose(result, np.float32(95), rtol=1e-1)
def testCond(self):
"""Tests that compilation handles switch operators."""
with self.session(config=NoRewriteSessionConfig()) as session:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = array_ops.placeholder(dtypes.bool)
with jit_scope():
z = x + 1.0
w = control_flow_ops.cond(c, lambda: z, lambda: y)
t = math_ops.add(z, w)
# If JIT compilation chooses to cluster z and t, then execution will
# deadlock.
run_metadata = config_pb2.RunMetadata()
result = test_utils.RunWithWarmup(
session,
t, {
x: np.float32(2),
y: np.float32(4),
c: True
},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assert_(MetadataHasXlaRunOp(run_metadata))
self.assertAllClose(result, np.float32(6), rtol=1e-1)
def testNestedFunction(self):
g = ops.Graph()
with g.as_default():
@function.Defun(compiled=True)
def Bar(x, y):
return x + 2 * y
@function.Defun(compiled=True)
def Foo(x):
return Bar(x * x, x * x * x)
@function.Defun()
def Entry(x):
return Foo(x)
inp = array_ops.placeholder(dtypes.float32)
out = Entry(inp)
with self.session(
config=NoRewriteSessionConfig(), graph=g, use_gpu=True) as sess:
run_metadata = config_pb2.RunMetadata()
val = sess.run(out,
feed_dict={inp: [2., 10.]},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertAllClose(val, [20., 2100.])
def testLoopDeadlock(self):
"""Regression test for bug that caused deadlocks in graphs with loops."""
with self.session(config=NoRewriteSessionConfig()) as session:
x = array_ops.placeholder(dtypes.float32)
with jit_scope():
y = x + 1.0
c = lambda i, _x, _y: math_ops.less(i, 5)
b = lambda i, x, _y: (i + 1, x * 2.0 + 1.0, x - 3.0)
_, _, w = control_flow_ops.while_loop(c, b,
(constant_op.constant(0), y, x))
u = w + y
result = session.run(u, {x: np.float32(2)})
self.assertAllClose(result, np.float32(63), rtol=1e-1)
def testGradient(self):
"""Tests that the backprop function is properly compiled."""
def _Run(compiled):
@function.Defun(compiled=compiled)
def Forward(x):
return math_ops.log(x)
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y = Forward(x)
dx, = gradients_impl.gradients(y, [x], 1.0)
cfg = NoRewriteSessionConfig()
cfg.graph_options.optimizer_options.opt_level = (
config_pb2.OptimizerOptions.L1)
cfg.graph_options.optimizer_options.do_function_inlining = True
with session_lib.Session(graph=g, config=cfg) as sess:
run_metadata = config_pb2.RunMetadata()
dx_val = test_utils.RunWithWarmup(
sess,
dx,
feed_dict={x: 100.},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertAllClose(dx_val, 0.01)
return RunMetadataLabels(run_metadata)
# SymGrad[f=log(x)](x, dy) = 1/x * dy
#
# Note: we don't need to compute log(x) for dx due to graph pruning.
# Do not compile the backprop. We should see one Reciprocal and one Mul.
labels = _Run(compiled=False)
self.assertFalse(InLabels(labels, "Log"))
self.assertTrue(InLabels(labels, "Reciprocal"))
self.assertTrue(InLabels(labels, "Mul"))
self.assertFalse(InLabels(labels, "XlaCompile"))
self.assertFalse(InLabels(labels, "XlaRun"))
# Compile the backprop. One XlaCompile/XlaRun pair.
labels = _Run(compiled=True)
self.assertFalse(InLabels(labels, "Log"))
self.assertFalse(InLabels(labels, "Reciprocal"))
self.assertFalse(InLabels(labels, "Mul"))
self.assertTrue(InLabels(labels, "XlaCompile"))
self.assertTrue(InLabels(labels, "XlaRun"))
class ElementWiseFusionTest(test.TestCase):
# Runs a simple test with the input jit_level and fusion_only flag.
def simpleTest(self, arg0, arg1, global_jit_level):
config = config_pb2.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = global_jit_level
with session_lib.Session(config=config) as sess:
a1 = array_ops.placeholder(dtypes.float32, [2, 2], name="a1")
a2 = array_ops.placeholder(dtypes.float32, [2, 2], name="a2")
# Two element-wise ops. We need at least two ops since single
# element clusters are not passed to XLA in fusion_only mode.
a3 = a1 * a2
a4 = a3 + a1
# A matmul to break XLA clustering.
a5 = math_ops.matmul(a4, a1)
# Two more element-wise ops.
a6 = a5 - a4
a7 = a6 + a2
run_metadata = config_pb2.RunMetadata()
output = test_utils.RunWithWarmup(
sess,
a7, {
a1: arg0,
a2: arg1
},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
labels = RunMetadataLabels(run_metadata)
xla_compile_count = sum("XlaCompile(" in x for x in labels)
xla_run_count = sum("XlaRun(" in x for x in labels)
self.assertEqual(xla_compile_count, xla_run_count)
return output, xla_run_count
class LazyCompilationTest(test.TestCase):
def testLazyCompilation(self):
@function.Defun(compiled=True)
def CompiledFunction(x):
return math_ops.log(x)
with session_lib.Session(config=NoRewriteSessionConfig()) as sess:
x = array_ops.placeholder(dtypes.float32)
y = CompiledFunction(x)
# The very first run of the cluster is always compiled (non-lazily).
run_metadata_for_first_run = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [2., 10., 19., 77., 100.]},
run_metadata=run_metadata_for_first_run,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(
InLabels(
RunMetadataLabels(run_metadata_for_first_run), "_XlaCompile"))
self.assertTrue(
InLabels(RunMetadataLabels(run_metadata_for_first_run), "_XlaRun"))
run_metadata_before_warmup = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [2., 10.]},
run_metadata=run_metadata_before_warmup,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(
InLabels(
RunMetadataLabels(run_metadata_before_warmup), "_XlaCompile"))
self.assertFalse(
InLabels(RunMetadataLabels(run_metadata_before_warmup), "_XlaRun"))
# We compile when we see the same shape a second time.
run_metadata_after_warmup = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [2., 10.]},
run_metadata=run_metadata_after_warmup,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(
InLabels(RunMetadataLabels(run_metadata_after_warmup), "_XlaCompile"))
self.assertTrue(
InLabels(RunMetadataLabels(run_metadata_after_warmup), "_XlaRun"))
run_metadata_for_new_shape = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [2., 10., 12.]},
run_metadata=run_metadata_for_new_shape,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(
InLabels(
RunMetadataLabels(run_metadata_for_new_shape), "_XlaCompile"))
self.assertFalse(
InLabels(RunMetadataLabels(run_metadata_for_new_shape), "_XlaRun"))
def testIsMegamorphic(self):
@function.Defun(compiled=True)
def CompiledFunction(x):
return math_ops.log(x)
with session_lib.Session(config=NoRewriteSessionConfig()) as sess:
x = array_ops.placeholder(dtypes.float32)
y = CompiledFunction(x)
# Make the cluster go megamorphic by running it with lots of shape
# signatures where the cluster is executed with each signature only a few
# times. Then check that we don't compile the cluster ever again.
for shape in range(10, 50):
for _ in range(0, 49):
sess.run(y, feed_dict={x: [0.] * shape})
for _ in range(0, 50):
run_metadata = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [0.] * 60},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(
InLabels(RunMetadataLabels(run_metadata), "_XlaCompile"))
self.assertFalse(InLabels(RunMetadataLabels(run_metadata), "_XlaRun"))
def testIsNotMegamorphic(self):
@function.Defun(compiled=True)
def CompiledFunction(x):
return math_ops.log(x)
with session_lib.Session(config=NoRewriteSessionConfig()) as sess:
x = array_ops.placeholder(dtypes.float32)
y = CompiledFunction(x)
# Run the cluster with lots of shape signatures, but in a way that it
# isn't megamorphic (i.e. each shape signature sees a lot of executions).
# Then check that the cluster has not been marked as megamorphic.
for shape in range(10, 50):
for _ in range(0, 1000):
sess.run(y, feed_dict={x: [0.] * shape})
for _ in range(0, 10):
sess.run(y, feed_dict={x: [0.] * 60})
run_metadata = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [0.] * 60},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(InLabels(RunMetadataLabels(run_metadata), "_XlaCompile"))
self.assertTrue(InLabels(RunMetadataLabels(run_metadata), "_XlaRun"))
if __name__ == "__main__":
os.environ["TF_XLA_FLAGS"] = ("--tf_xla_enable_lazy_compilation=true " +
os.environ.get("TF_XLA_FLAGS", ""))
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/jit_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for 3D convolutions using the XLA JIT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
# Test cloned from
# tensorflow/python/kernel_tests/conv3d_backprop_filter_v2_grad_test.py
class Conv3DBackpropFilterV2GradTest(xla_test.XLATestCase):
def testGradient(self):
with self.session(), self.test_scope():
for padding in ["SAME", "VALID"]:
for stride in [1, 2]:
np.random.seed(1)
in_shape = [2, 4, 3, 3, 2]
in_val = constant_op.constant(
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 3, 2, 3]
strides = [1, stride, stride, stride, 1]
# Make a convolution op with the current settings, just to easily get
# the shape of the output.
conv_out = nn_ops.conv3d(in_val,
array_ops.zeros(filter_shape), strides,
padding)
out_backprop_shape = conv_out.get_shape().as_list()
out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
dtype=dtypes.float32)
output = nn_ops.conv3d_backprop_filter_v2(in_val, filter_shape,
out_backprop_val, strides,
padding)
err = gradient_checker.compute_gradient_error(
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
output, filter_shape)
print("conv3d_backprop_filter gradient err = %g " % err)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
# Test cloned from tensorflow/python/kernel_tests/conv3d_transpose_test.py
class Conv3DTransposeTest(xla_test.XLATestCase):
def testConv3DTransposeSingleStride(self):
with self.session(), self.test_scope():
strides = [1, 1, 1, 1, 1]
# Input, output: [batch, depth, height, width, channel]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 5, 6, 4, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
# We count the number of cells being added at the locations in the output.
# At the center, #cells = kernel_depth * kernel_height * kernel_width
# At the corners, #cells = ceil(kernel_depth/2) * ceil(kernel_height/2)
# * ceil(kernel_width/2)
# At the edges, #cells =
# kernel_depth * ceil(kernel_height/2) * ceil(kernel_width/2) or
# ceil(kernel_depth/2) * kernel_height * ceil(kernel_width/2) or
# ceil(kernel_depth/2) * ceil(kernel_height/2) * kernel_width
# At the borders, #cells =
# ceil(kernel_depth/2) * kernel_height * kernel_width or
# kernel_depth * ceil(kernel_height/2) * kernel_width or
# kernel_depth * kernel_height * ceil(kernel_width/2)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
for d in xrange(y_shape[1]):
d_in = d > 0 and d < y_shape[1] - 1
h_in = h > 0 and h < y_shape[2] - 1
w_in = w > 0 and w < y_shape[3] - 1
if d_in + h_in + w_in == 3:
target = 27 * 3.0
elif d_in + h_in + w_in == 2:
target = 18 * 3.0
elif d_in or h_in or w_in:
target = 12 * 3.0
else:
target = 8 * 3.0
self.assertAllClose(target, value[n, d, h, w, k])
def testConv3DTransposeSame(self):
with self.session(), self.test_scope():
strides = [1, 2, 2, 2, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 10, 12, 8, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
for d in xrange(y_shape[1]):
# We add a case for locations divisible by the stride.
d_in = d % strides[1] == 0 and 0 < d < y_shape[1] - 1
h_in = h % strides[2] == 0 and 0 < h < y_shape[2] - 1
w_in = w % strides[3] == 0 and 0 < w < y_shape[3] - 1
if d_in + h_in + w_in == 3:
target = 8 * 3.0
elif d_in + h_in + w_in == 2:
target = 4 * 3.0
elif d_in or h_in or w_in:
target = 2 * 3.0
else:
target = 3.0
self.assertAllClose(target, value[n, d, h, w, k])
def testConv3DTransposeValid(self):
with self.session(), self.test_scope():
strides = [1, 2, 2, 2, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 11, 13, 9, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
for d in xrange(y_shape[1]):
# We add a case for locations divisible by the stride.
d_in = d % strides[1] == 0 and pad < d < y_shape[1] - 1 - pad
h_in = h % strides[2] == 0 and pad < h < y_shape[2] - 1 - pad
w_in = w % strides[3] == 0 and pad < w < y_shape[3] - 1 - pad
if d_in + h_in + w_in == 3:
target = 8 * 3.0
elif d_in + h_in + w_in == 2:
target = 4 * 3.0
elif d_in or h_in or w_in:
target = 2 * 3.0
else:
target = 3.0
cache_values[n, d, h, w, k] = target
# copy values in the border
cache_values[n, :, :, 0, k] = cache_values[n, :, :, 1, k]
cache_values[n, :, :, -1, k] = cache_values[n, :, :, -2, k]
cache_values[n, :, 0, :, k] = cache_values[n, :, 1, :, k]
cache_values[n, :, -1, :, k] = cache_values[n, :, -2, :, k]
cache_values[n, 0, :, :, k] = cache_values[n, 1, :, :, k]
cache_values[n, -1, :, :, k] = cache_values[n, -2, :, :, k]
self.assertAllClose(cache_values, value)
def testGradient(self):
x_shape = [2, 3, 4, 3, 2]
f_shape = [3, 3, 3, 2, 2]
y_shape = [2, 6, 8, 6, 2]
strides = [1, 2, 2, 2, 1]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.session(), self.test_scope():
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
print("conv3d_transpose gradient err = %g " % err)
err_tolerance = 0.001
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/conv3d_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Conv2D via the XLA JIT.
The canned results in these tests are created by running each test using the
Tensorflow CPU device and saving the output.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
DATA_FORMATS = (
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
)
class Conv2DTest(xla_test.XLATestCase, parameterized.TestCase):
def _VerifyValues(self,
input_sizes=None,
filter_sizes=None,
strides=None,
dilations=None,
padding=None,
data_format_src="NHWC",
data_format_dst="NHWC",
expected=None):
"""Tests that tf.nn.conv2d produces the expected value.
Args:
input_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
strides: Strides.
dilations: RHS dilations.
padding: Padding type.
data_format_src: Data format input is in.
data_format_dst: Data format verification will run and input is converted
to.
expected: Expected output.
"""
total_size_1 = np.prod(input_sizes)
total_size_2 = np.prod(filter_sizes)
x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)
x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(filter_sizes)
strides = [1] + strides + [1]
if dilations is None:
dilations = [1, 1]
dilations = [1] + dilations + [1]
# Convert between data formats.
expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src,
data_format_dst)
x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src,
data_format_dst)
input_sizes = test_utils.PermuteDimsBetweenDataFormats(
input_sizes, data_format_src, data_format_dst)
strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src,
data_format_dst)
dilations = test_utils.PermuteDimsBetweenDataFormats(
dilations, data_format_src, data_format_dst)
with self.session() as sess:
t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)
t2 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)
with self.test_scope():
out = nn_ops.conv2d(
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format_dst,
dilations=dilations)
value = sess.run(out, {t1: x1, t2: x2})
self.assertAllClose(expected, value, 1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x1Filter(self, data_format):
expected_output = np.reshape([
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
], [1, 2, 3, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Filter(self, data_format):
expected_output = np.reshape(
[2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0], [1, 1, 2, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Filter2x1Dilation(self, data_format):
expected_output = np.array([[[[72], [82], [92]], [[112], [122], [132]]]])
self._VerifyValues(
input_sizes=[1, 4, 4, 1],
filter_sizes=[2, 2, 1, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2Filter(self, data_format):
expected_output = np.reshape([
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
], [1, 2, 2, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2(self, data_format):
expected_output = np.reshape([2271.0, 2367.0, 2463.0], [1, 1, 1, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2Same(self, data_format):
expected_output = np.reshape(
[2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0], [1, 1, 2, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DEmptyDilation(self, data_format):
self._VerifyValues(
input_sizes=[0, 2, 3, 3],
filter_sizes=[1, 1, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.zeros([0, 2, 3, 3]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterDilation(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.reshape([2667, 2781, 2895], [1, 1, 1, 3]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterDilation(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[1, 2, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.array([[[[231, 252, 273], [384, 423, 462]],
[[690, 765, 840], [843, 936, 1029]]]]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DKernelSizeMatchesInputSizeDilation(self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.reshape([108, 128], [1, 1, 1, 2]))
class Conv2DBackpropInputTest(xla_test.XLATestCase, parameterized.TestCase):
def _VerifyValues(self,
input_sizes=None,
filter_sizes=None,
out_backprop_sizes=None,
strides=None,
dilations=None,
padding=None,
data_format_src="NHWC",
data_format_dst="NHWC",
expected=None):
"""Tests that gen_nn_ops.conv2d_backprop_input produces the expected output.
Args:
input_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
out_backprop_sizes: Output gradients tensor dimensions.
strides: Strides.
dilations: Dilations.
padding: Padding type.
data_format_src: Data format input is in.
data_format_dst: Data format verification will run and input is converted
to.
expected: Expected output.
"""
total_size_1 = np.prod(filter_sizes)
total_size_2 = np.prod(out_backprop_sizes)
x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(filter_sizes)
x2 = np.arange(
1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes)
strides = [1] + strides + [1]
if dilations is not None:
dilations = [1] + dilations + [1]
expected = np.reshape(expected, input_sizes)
# Convert between data formats.
expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src,
data_format_dst)
x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src,
data_format_dst)
input_sizes = test_utils.PermuteDimsBetweenDataFormats(
input_sizes, data_format_src, data_format_dst)
out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(
out_backprop_sizes, data_format_src, data_format_dst)
strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src,
data_format_dst)
if dilations is not None:
dilations = test_utils.PermuteDimsBetweenDataFormats(
dilations, data_format_src, data_format_dst)
with self.session() as sess:
t1 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)
t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)
with self.test_scope():
out = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=t1,
out_backprop=t2,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format_dst)
value = sess.run(out, {t1: x1, t2: x2})
self.assertAllEqual(input_sizes, value.shape)
self.assertAllClose(expected, value, 1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x1Filter(self, data_format):
expected_output = [
5, 11, 17, 11, 25, 39, 17, 39, 61, 23, 53, 83, 29, 67, 105, 35, 81, 127,
41, 95, 149, 47, 109, 171, 53, 123, 193, 59, 137, 215, 65, 151, 237, 71,
165, 259, 77, 179, 281, 83, 193, 303, 89, 207, 325, 95, 221, 347.
]
self._VerifyValues(
input_sizes=[1, 4, 4, 3],
filter_sizes=[1, 1, 3, 2],
out_backprop_sizes=[1, 4, 4, 2],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width5(self, data_format):
expected_output = [1, 2, 0, 2, 4]
self._VerifyValues(
input_sizes=[1, 1, 5, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width6(self, data_format):
expected_output = [1, 2, 0, 2, 4, 0]
self._VerifyValues(
input_sizes=[1, 1, 6, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width7(self, data_format):
expected_output = [1, 2, 0, 2, 4, 0, 0]
self._VerifyValues(
input_sizes=[1, 1, 7, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterC1Same(self, data_format):
expected_output = [1, 4, 7, 7, 23, 33]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 2, 3, 1],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Filter(self, data_format):
expected_output = [
14, 32, 50, 100, 163, 226, 167, 212, 257, 122, 140, 158, 478, 541, 604,
437, 482, 527
]
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterSame(self, data_format):
expected_output = [
14, 32, 50, 100, 163, 226, 217, 334, 451, 190, 307, 424, 929, 1217,
1505, 1487, 1883, 2279
]
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2Filter(self, data_format):
expected_output = [1, 4, 4, 3, 10, 8, 5, 16, 12]
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 3, 2, 1],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterSame(self, data_format):
expected_output = [1, 4, 7, 4, 13, 16, 7, 22, 25]
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 3, 3, 1],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2(self, data_format):
expected_output = [1, 2, 5, 4, 6, 0, 0, 0, 0, 0, 3, 6, 13, 8, 12]
self._VerifyValues(
input_sizes=[1, 3, 5, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2Same(self, data_format):
expected_output = [1, 2, 2, 3, 4, 6]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(
self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[1, 4, 7, 10, 13, 10, 0, 0, 0, 0, 0, 0, 3, 10, 17, 24, 31, 20])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 1, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[1, 0, 2, 3, 0, 4])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DEmptyBackpropInputDilation1x2(self, data_format):
self._VerifyValues(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[0, 1, 1, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.zeros([0]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self, data_format):
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._VerifyValues(
input_sizes=[1, 3, 2, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 1, 1, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[
14, 32, 50, 68, 86, 104, 0, 0, 0, 0, 0, 0, 122, 140, 158, 176, 194,
212
])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(
self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
out_backprop_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[5, 0, 11, 0, 0, 0, 17, 0, 23])
class Conv2DBackpropFilterTest(xla_test.XLATestCase, parameterized.TestCase):
def _VerifyValues(self,
input_sizes=None,
filter_sizes=None,
out_backprop_sizes=None,
strides=None,
dilations=None,
padding=None,
data_format_src="NHWC",
data_format_dst="NHWC",
expected=None):
"""Tests that gen_nn_ops.conv2d_backprop_filter produces the right output.
Args:
input_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
out_backprop_sizes: Output gradients tensor dimensions.
strides: Stride.
dilations: Dilations.
padding: Padding type.
data_format_src: Data format input is in.
data_format_dst: Data format verification will run and input is converted
to.
expected: Expected output.
"""
total_size_1 = np.prod(input_sizes)
total_size_2 = np.prod(out_backprop_sizes)
x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)
x2 = np.arange(
1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes)
strides = [1] + strides + [1]
if dilations is not None:
dilations = [1] + dilations + [1]
expected = np.reshape(expected, filter_sizes)
# Convert between data formats.
x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src,
data_format_dst)
x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src,
data_format_dst)
input_sizes = test_utils.PermuteDimsBetweenDataFormats(
input_sizes, data_format_src, data_format_dst)
out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(
out_backprop_sizes, data_format_src, data_format_dst)
strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src,
data_format_dst)
if dilations is not None:
dilations = test_utils.PermuteDimsBetweenDataFormats(
dilations, data_format_src, data_format_dst)
with self.session() as sess:
t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)
t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)
with self.test_scope():
tensor = gen_nn_ops.conv2d_backprop_filter(
input=t1,
filter_sizes=filter_sizes,
out_backprop=t2,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format_dst)
value = sess.run(tensor, {t1: x1, t2: x2})
self.assertAllEqual(filter_sizes, value.shape)
self.assertAllClose(expected, value, 1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x1Filter(self, data_format):
expected_output = [8056, 8432, 8312, 8704, 8568, 8976]
self._VerifyValues(
input_sizes=[1, 4, 4, 3],
filter_sizes=[1, 1, 3, 2],
out_backprop_sizes=[1, 4, 4, 2],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2Filter(self, data_format):
expected_output = [120, 141]
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 3, 2, 1],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterDepth1(self, data_format):
expected_output = [5, 8, 14, 17]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Filter(self, data_format):
expected_output = [
17, 22, 27, 22, 29, 36, 27, 36, 45, 32, 43, 54, 37, 50, 63, 42, 57, 72,
62, 85, 108, 67, 92, 117, 72, 99, 126, 77, 106, 135, 82, 113, 144, 87,
120, 153
]
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width5(self, data_format):
expected_output = [9, 12]
self._VerifyValues(
input_sizes=[1, 1, 5, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width6(self, data_format):
expected_output = [9, 12]
self._VerifyValues(
input_sizes=[1, 1, 6, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width7(self, data_format):
expected_output = [9, 12]
self._VerifyValues(
input_sizes=[1, 1, 7, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x3Filter(self, data_format):
expected_output = [5, 8, 11]
self._VerifyValues(
input_sizes=[1, 1, 4, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x3FilterSame(self, data_format):
expected_output = [20, 30, 20]
self._VerifyValues(
input_sizes=[1, 1, 4, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 1, 4, 1],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x3FilterSameOutbackprop2(self, data_format):
expected_output = [7, 10, 3]
self._VerifyValues(
input_sizes=[1, 1, 4, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterC1Same(self, data_format):
expected_output = [91, 58, 32, 17]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 2, 3, 1],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2(self, data_format):
expected_output = [92, 102, 112]
self._VerifyValues(
input_sizes=[1, 3, 5, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2Same(self, data_format):
expected_output = [7, 2, 16, 5]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(
self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[55, 70, 235, 250])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 1, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[1, 3, 4, 6])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DEmptyBackpropFilterDilation1x2(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
out_backprop_sizes=[1, 1, 1, 0],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.zeros([0]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 4, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[
17, 22, 27, 22, 29, 36, 27, 36, 45, 47, 64, 81, 52, 71, 90, 57, 78,
99, 137, 190, 243, 142, 197, 252, 147, 204, 261, 167, 232, 297, 172,
239, 306, 177, 246, 315
])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(
self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
out_backprop_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[1, 2, 3, 6, 7, 14, 9, 18])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/conv2d_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slicing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class SliceTest(xla_test.XLATestCase):
def test1D(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.slice(i, [2], [4])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([2, 3, 4, 5], result)
def testZeroSlice(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[2])
with self.test_scope():
o = array_ops.slice(i, [0], [0])
params = {
i: [0, 1],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([], result)
def test3D(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
with self.test_scope():
o = array_ops.slice(i, [1, 2, 2], [1, 1, 4])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[6, 5, 4, 3]]], result)
def test3DWithDynamicBegin(self):
"""Tests a slice where the start offset is not known at compile time."""
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
begin = array_ops.placeholder(dtypes.int32, shape=[3])
with self.test_scope():
o = array_ops.slice(i, begin, [1, 1, 4])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]],
begin: [1, 2, 2]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[6, 5, 4, 3]]], result)
def test3DWithDynamicBeginAndNegativeSize(self):
"""Tests a slice where `begin` is fed dynamically and `size` contains -1."""
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
begin = array_ops.placeholder(dtypes.int32, shape=[3])
with self.test_scope():
o = array_ops.slice(i, begin, [1, -1, 4])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]],
begin: [1, 1, 2]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[1, 1, 1, 1], [6, 5, 4, 3]]], result)
class StridedSliceTest(xla_test.XLATestCase):
def test1D(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.strided_slice(i, [2], [6], [2])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([2, 4], result)
def test1DNegativeStride(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.strided_slice(i, [6], [2], [-2])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([6, 4], result)
def test2DDegenerate(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[2, 3])
with self.test_scope():
o = array_ops.strided_slice(i, [-1, 0], [0, 3])
params = {
i: [[0, 1, 2],
[3, 4, 5]]
}
result = o.eval(feed_dict=params)
self.assertEqual(tensor_shape.TensorShape((0, 3)), result.shape)
def test2DDegenerateNegativeStride(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[2, 3])
with self.test_scope():
o = array_ops.strided_slice(i, [0, 0], [-1, 3], [-1, 1])
params = {
i: [[0, 1, 2],
[3, 4, 5]]
}
result = o.eval(feed_dict=params)
self.assertEqual(tensor_shape.TensorShape((0, 3)), result.shape)
def test3D(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
with self.test_scope():
o = array_ops.strided_slice(i, [0, 2, 2], [2, 3, 6], [1, 1, 2])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[1, 9]], [[6, 4]]], result)
def test3DNegativeStride(self):
for dtype in self.numeric_types:
with self.session():
i = array_ops.placeholder(dtype, shape=[3, 4, 10])
with self.test_scope():
o = array_ops.strided_slice(i, [2, 2, 6], [0, 0, 2], [-1, -1, -2])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0],
[4, 5, 2, 4, 3, 7, 6, 8, 9, 4]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[4, 3, 4, 5, 7, 6, 5, 3, 4, 5],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7],
[7, 1, 7, 1, 8, 1, 8, 1, 3, 1]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9],
[9, 9, 5, 5, 6, 6, 3, 3, 6, 6]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[9, 8],
[1, 1]],
[[2, 4],
[5, 7]]], result)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/slice_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adadelta Optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adadelta
class AdadeltaOptimizerTest(xla_test.XLATestCase):
def testBasic(self):
num_updates = 4 # number of ADADELTA steps to perform
if "CPU" in self.device:
# To avoid timeout on CPU.
all_grad = [0.2, 0.01]
all_lr = [1.0, 0.1]
else:
all_grad = [0.2, 0.1, 0.01]
all_lr = [1.0, 0.5, 0.1]
for dtype in self.float_types:
with self.session(), self.test_scope():
for grad in all_grad:
for lr in all_lr:
var0_init = [1.0, 2.0]
var1_init = [3.0, 4.0]
var0 = resource_variable_ops.ResourceVariable(
var0_init, dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(
var1_init, dtype=dtype)
grads = constant_op.constant([grad, grad], dtype=dtype)
accum = 0.0
accum_update = 0.0
# ADADELTA gradient optimizer
rho = 0.95
epsilon = 1e-8
adadelta_opt = adadelta.AdadeltaOptimizer(
learning_rate=lr, rho=rho, epsilon=epsilon)
adadelta_update = adadelta_opt.apply_gradients(
zip([grads, grads], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
opt_vars = adadelta_opt.variables()
self.assertStartsWith(opt_vars[0].name, var0._shared_name)
self.assertStartsWith(opt_vars[1].name, var0._shared_name)
self.assertStartsWith(opt_vars[2].name, var1._shared_name)
self.assertStartsWith(opt_vars[3].name, var1._shared_name)
self.assertEqual(4, len(opt_vars))
# Assign slots
slot = [None] * 2
slot_update = [None] * 2
self.assertEqual(["accum", "accum_update"],
adadelta_opt.get_slot_names())
slot[0] = adadelta_opt.get_slot(var0, "accum")
self.assertEquals(slot[0].get_shape(), var0.get_shape())
self.assertFalse(slot[0] in variables.trainable_variables())
slot_update[0] = adadelta_opt.get_slot(var0, "accum_update")
self.assertEquals(slot_update[0].get_shape(), var0.get_shape())
self.assertFalse(slot_update[0] in variables.trainable_variables())
slot[1] = adadelta_opt.get_slot(var1, "accum")
self.assertEquals(slot[1].get_shape(), var1.get_shape())
self.assertFalse(slot[1] in variables.trainable_variables())
slot_update[1] = adadelta_opt.get_slot(var1, "accum_update")
self.assertEquals(slot_update[1].get_shape(), var1.get_shape())
self.assertFalse(slot_update[1] in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose(var0_init, self.evaluate(var0))
self.assertAllClose(var1_init, self.evaluate(var1))
update = [None] * num_updates
tot_update = 0
for step in range(num_updates):
# Run adadelta update for comparison
self.evaluate(adadelta_update)
# Perform initial update without previous accum values
accum = accum * rho + (grad**2) * (1 - rho)
update[step] = (
np.sqrt(accum_update + epsilon) *
(1. / np.sqrt(accum + epsilon)) * grad)
accum_update = (
accum_update * rho + (update[step]**2) * (1.0 - rho))
tot_update += update[step] * lr
# Check that the accumulators have been updated
for slot_idx in range(2):
self.assertAllCloseAccordingToType(
np.array([accum, accum], dtype=dtype),
self.evaluate(slot[slot_idx]),
rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([accum_update, accum_update], dtype=dtype),
self.evaluate(slot_update[slot_idx]),
rtol=1e-5)
# Check that the parameters have been updated
self.assertAllCloseAccordingToType(
np.array(
[var0_init[0] - tot_update, var0_init[1] - tot_update],
dtype=dtype),
self.evaluate(var0),
rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array(
[var1_init[0] - tot_update, var1_init[1] - tot_update],
dtype=dtype),
self.evaluate(var1),
rtol=1e-5)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/adadelta_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sorting operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class XlaSortOpTest(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self, op, args, expected):
with self.session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(*placeholders)
if isinstance(output, ops.Tensor):
output = [output]
results = session.run(output, feeds)
for result, v in zip(results, expected):
self.assertAllClose(v, result, rtol=1e-3)
def testSort(self):
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
x = np.arange(101, dtype=dtype)
np.random.shuffle(x)
self._assertOpOutputMatchesExpected(
xla.sort, [x], expected=[np.arange(101, dtype=dtype)])
def testKeyValueSort(self):
supported_key_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
supported_value_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32,
dtypes.int64.as_numpy_dtype, dtypes.uint64.as_numpy_dtype])
for key_type in supported_key_types.intersection(self.numeric_types):
for value_type in supported_value_types.intersection(self.numeric_types):
x = np.arange(101, dtype=key_type)
np.random.shuffle(x)
y = (-x).astype(value_type)
self._assertOpOutputMatchesExpected(
xla.key_value_sort, [x, y],
expected=[
np.arange(101, dtype=key_type),
-np.arange(101, dtype=value_type)
])
def testTopK(self):
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
# Use small input size for bfloat16. Otherwise, we'll get duplicate values
# after conversion to bfloat16, so the possible resulting index array is
# no longer unique.
if dtype == dtypes.bfloat16.as_numpy_dtype:
array_size = 20
k_options = [0, 1, 2, 10, 20]
else:
array_size = 200 * 1000
k_options = [0, 1, 2, 10, 20, 100, 1000, 200 * 1000]
for x in [np.arange(array_size)]:
np.random.shuffle(x)
for k in k_options:
indices = x.argsort()[::-1][:k]
def topk(v, k=k):
return nn_ops.top_k(v, k=k, sorted=True)
self._assertOpOutputMatchesExpected(
topk, [x.astype(dtype)],
expected=[x[indices].astype(dtype), indices])
def testTopK2D(self):
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
# Use small input size for bfloat16. Otherwise, we'll get duplicate values
# after conversion to bfloat16, so the possible resulting index array is
# no longer unique.
if dtype == dtypes.bfloat16.as_numpy_dtype:
array_size = 10
k_options = [0, 1, 2, 10]
else:
array_size = 200 * 1000
k_options = [0, 1, 2, 10, 20, 100, 1000, 200 * 1000]
batch = 16
for x in [np.arange(batch * array_size)]:
np.random.shuffle(x)
x = np.reshape(x, [batch, array_size])
for k in k_options:
indices = x.argsort(axis=1)[::, -1:-k - 1:-1]
expected = np.sort(x, axis=1)[::, -1:-k - 1:-1]
def topk(v, k=k):
return nn_ops.top_k(v, k=k, sorted=True)
self._assertOpOutputMatchesExpected(
topk, [x.astype(dtype)],
expected=[expected.astype(dtype), indices])
def testTopKZeros(self):
"""Tests that positive and negative zeros sort correctly."""
# Only bfloat16 is implemented.
bfloat16 = dtypes.bfloat16.as_numpy_dtype
if bfloat16 not in self.numeric_types:
return
with self.session() as sess:
p = array_ops.placeholder(dtypes.bfloat16)
with self.test_scope():
topk = nn_ops.top_k(p, k=4)
results = sess.run(
topk,
{p: np.array([0., -0., 0., 3., -0., -4., 0., -0.], dtype=bfloat16)})
self.assertAllEqual(
np.array([3., 0., 0., 0.], dtype=bfloat16), results[0])
self.assertEqual(list([3, 0, 2, 6]), list(results[1]))
def testTopKInfinities(self):
"""Tests that positive and negative infinity sort correctly."""
# Only bfloat16 is implemented.
bfloat16 = dtypes.bfloat16.as_numpy_dtype
if bfloat16 not in self.numeric_types:
return
with self.session() as sess:
p = array_ops.placeholder(dtypes.bfloat16)
with self.test_scope():
topk = nn_ops.top_k(p, k=6)
results = sess.run(topk, {
p: np.array(
[1, 2, float("inf"), -float("inf"), -1, -2], dtype=bfloat16)
})
self.assertAllEqual(
np.array(
[float("inf"), 2.0, 1.0, -1.0, -2.0, -float("inf")],
dtype=bfloat16), results[0])
self.assertEqual(list([2, 1, 0, 4, 5, 3]), list(results[1]))
def testInTopK(self):
supported_types = set([np.int32, np.int64])
for dtype in supported_types.intersection(self.numeric_types):
array_size = 200 * 1000
k_options = [0, 1, 2, 10, 20, 100, 1000, 200 * 1000]
batch = 16
for x in [np.arange(batch * array_size)]:
np.random.shuffle(x)
x = np.reshape(x, [batch, array_size])
y = np.random.randint(0, array_size, size=batch)
for k in k_options:
indices = x.argsort(axis=1)[::, -1:-k - 1:-1]
expected = [y[i] in indices[i] for i in range(batch)]
def in_topk(predictions, targets, k=k):
return nn_ops.in_top_k(predictions, targets, k)
self._assertOpOutputMatchesExpected(
in_topk,
[x.astype(np.float32), y.astype(dtype)],
expected=[expected])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/sort_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.platform import googletest
class FakeQuantWithMinMaxArgsTest(xla_test.XLATestCase):
"""Test cases for FakeQuantWithMinMaxArgs operation."""
# 8 bits, wide range.
def testOp_with8BitsNoScalingNoNudging(self):
self._TestOp(0.0, 255.0, 8, False, 0.0, 255.0, 1.0)
def testOp_with8BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 128.0, 8, False, 0.0, 127.5, 0.5)
def testOp_with8BitsScalingAndNudgingUp(self):
self._TestOp(-128.0, -0.5, 8, False, -127.5, 0.0, 0.5)
def testOp_with8BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 127.4, 8, False, 0.0, 127.5, 0.5)
# 8 bits, narrow range.
def testOp_with8BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 254.0, 8, True, 0.0, 254.0, 1.0)
def testOp_with8BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 127.1, 8, True, 0.0, 127.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-127.1, -0.1, 8, True, -127.0, 0.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 126.9, 8, True, 0.0, 127.0, 0.5)
# 7 bits, wide range.
def testOp_with7BitsNoScalingNoNudging(self):
self._TestOp(0.0, 127.0, 7, False, 0.0, 127.0, 1.0)
def testOp_with7BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 64.0, 7, False, 0.0, 63.5, 0.5)
def testOp_with7BitsScalingAndNudgingUp(self):
self._TestOp(-64.0, -0.5, 7, False, -63.5, 0.0, 0.5)
def testOp_with7BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 63.4, 7, False, 0.0, 63.5, 0.5)
# 7 bits, narrow range.
def testOp_with7BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 126.0, 7, True, 0.0, 126.0, 1.0)
def testOp_with7BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 63.1, 7, True, 0.0, 63.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-63.1, -0.1, 7, True, -63.0, 0.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 62.9, 7, True, 0.0, 63.0, 0.5)
def _TestOp(self, input_min, input_max, num_bits, narrow_range,
expected_nudged_input_min, expected_nudged_input_max,
expected_step):
inputs = np.array(
[
expected_nudged_input_min - expected_step,
expected_nudged_input_min - 0.01, expected_nudged_input_min,
expected_nudged_input_min + 0.01,
expected_nudged_input_min + expected_step - 0.01,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step + 0.01,
expected_nudged_input_max - 0.01, expected_nudged_input_max,
expected_nudged_input_max + 0.01,
expected_nudged_input_max + expected_step
],
dtype=np.float32)
expected = np.array(
[
expected_nudged_input_min, expected_nudged_input_min,
expected_nudged_input_min, expected_nudged_input_min,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step,
expected_nudged_input_max, expected_nudged_input_max,
expected_nudged_input_max, expected_nudged_input_max
],
dtype=np.float32)
with self.session() as session:
with self.test_scope():
input_placeholder = array_ops.placeholder(
dtypes.float32, inputs.shape, name="inputs")
outputs = array_ops.fake_quant_with_min_max_args(
input_placeholder,
min=input_min,
max=input_max,
num_bits=num_bits,
narrow_range=narrow_range)
result = session.run(outputs, {input_placeholder: inputs})
self.assertAllCloseAccordingToType(
result, expected, rtol=1e-3, atol=1e-5, bfloat16_rtol=0.03)
class FakeQuantWithMinMaxArgsGradientTest(xla_test.XLATestCase):
"""Test cases for FakeQuantWithMinMaxArgsGradient operation."""
# 8 bits, wide range.
def testOp_with8BitsNoScalingNoNudging(self):
self._TestOp(0.0, 255.0, 8, False, 0.0, 255.0, 1.0)
def testOp_with8BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 128.0, 8, False, 0.0, 127.5, 0.5)
def testOp_with8BitsScalingAndNudgingUp(self):
self._TestOp(-128.0, -0.5, 8, False, -127.5, 0.0, 0.5)
def testOp_with8BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 127.4, 8, False, 0.0, 127.5, 0.5)
# 8 bits, narrow range.
def testOp_with8BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 254.0, 8, True, 0.0, 254.0, 1.0)
def testOp_with8BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 127.1, 8, True, 0.0, 127.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-127.1, -0.1, 8, True, -127.0, 0.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 126.9, 8, True, 0.0, 127.0, 0.5)
# 7 bits, wide range.
def testOp_with7BitsNoScalingNoNudging(self):
self._TestOp(0.0, 127.0, 7, False, 0.0, 127.0, 1.0)
def testOp_with7BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 64.0, 7, False, 0.0, 63.5, 0.5)
def testOp_with7BitsScalingAndNudgingUp(self):
self._TestOp(-64.0, -0.5, 7, False, -63.5, 0.0, 0.5)
def testOp_with7BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 63.4, 7, False, 0.0, 63.5, 0.5)
# 7 bits, narrow range.
def testOp_with7BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 126.0, 7, True, 0.0, 126.0, 1.0)
def testOp_with7BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 63.1, 7, True, 0.0, 63.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-63.1, -0.1, 7, True, -63.0, 0.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 62.9, 7, True, 0.0, 63.0, 0.5)
def _TestOp(self, input_min, input_max, num_bits, narrow_range,
expected_nudged_input_min, expected_nudged_input_max,
expected_step):
inputs = np.array(
[
expected_nudged_input_min - expected_step,
expected_nudged_input_min - 0.01, expected_nudged_input_min,
expected_nudged_input_min + 0.01,
expected_nudged_input_min + expected_step - 0.01,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step + 0.01,
expected_nudged_input_max - 0.01, expected_nudged_input_max,
expected_nudged_input_max + 0.01,
expected_nudged_input_max + expected_step
],
dtype=np.float32)
gradients = np.arange(1, len(inputs) + 1, dtype=np.float32)
expected_backprops = np.array(
[0.0, 0.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 0.0, 0.0],
dtype=np.float32)
with self.session() as session:
with self.test_scope():
gradient_placeholder = array_ops.placeholder(
dtypes.float32, gradients.shape, name="gradients")
input_placeholder = array_ops.placeholder(
dtypes.float32, inputs.shape, name="inputs")
outputs = gen_array_ops.fake_quant_with_min_max_args_gradient(
gradient_placeholder,
input_placeholder,
min=input_min,
max=input_max,
num_bits=num_bits,
narrow_range=narrow_range)
backprops = session.run(outputs, {
gradient_placeholder: gradients,
input_placeholder: inputs
})
self.assertAllCloseAccordingToType(
backprops,
expected_backprops,
rtol=1e-3,
atol=1e-5,
bfloat16_rtol=0.03)
class FakeQuantWithMinMaxVarsTest(xla_test.XLATestCase):
"""Test cases for FakeQuantWithMinMaxVars operation."""
# 8 bits, wide range.
def testOp_with8BitsNoScalingNoNudging(self):
self._TestOp(0.0, 255.0, 8, False, 0.0, 255.0, 1.0)
def testOp_with8BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 128.0, 8, False, 0.0, 127.5, 0.5)
def testOp_with8BitsScalingAndNudgingUp(self):
self._TestOp(-128.0, -0.5, 8, False, -127.5, 0.0, 0.5)
def testOp_with8BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 127.4, 8, False, 0.0, 127.5, 0.5)
# 8 bits, narrow range.
def testOp_with8BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 254.0, 8, True, 0.0, 254.0, 1.0)
def testOp_with8BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 127.1, 8, True, 0.0, 127.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-127.1, -0.1, 8, True, -127.0, 0.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 126.9, 8, True, 0.0, 127.0, 0.5)
# 7 bits, wide range.
def testOp_with7BitsNoScalingNoNudging(self):
self._TestOp(0.0, 127.0, 7, False, 0.0, 127.0, 1.0)
def testOp_with7BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 64.0, 7, False, 0.0, 63.5, 0.5)
def testOp_with7BitsScalingAndNudgingUp(self):
self._TestOp(-64.0, -0.5, 7, False, -63.5, 0.0, 0.5)
def testOp_with7BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 63.4, 7, False, 0.0, 63.5, 0.5)
# 7 bits, narrow range.
def testOp_with7BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 126.0, 7, True, 0.0, 126.0, 1.0)
def testOp_with7BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 63.1, 7, True, 0.0, 63.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-63.1, -0.1, 7, True, -63.0, 0.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 62.9, 7, True, 0.0, 63.0, 0.5)
def _TestOp(self, input_min, input_max, num_bits, narrow_range,
expected_nudged_input_min, expected_nudged_input_max,
expected_step):
inputs = np.array(
[
expected_nudged_input_min - expected_step,
expected_nudged_input_min - 0.01, expected_nudged_input_min,
expected_nudged_input_min + 0.01,
expected_nudged_input_min + expected_step - 0.01,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step + 0.01,
expected_nudged_input_max - 0.01, expected_nudged_input_max,
expected_nudged_input_max + 0.01,
expected_nudged_input_max + expected_step
],
dtype=np.float32)
expected = np.array(
[
expected_nudged_input_min, expected_nudged_input_min,
expected_nudged_input_min, expected_nudged_input_min,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step,
expected_nudged_input_max, expected_nudged_input_max,
expected_nudged_input_max, expected_nudged_input_max
],
dtype=np.float32)
with self.session() as session:
with self.test_scope():
input_placeholder = array_ops.placeholder(
dtypes.float32, inputs.shape, name="inputs")
min_placeholder = array_ops.placeholder(dtypes.float32, (), name="min")
max_placeholder = array_ops.placeholder(dtypes.float32, (), name="max")
outputs = array_ops.fake_quant_with_min_max_vars(
input_placeholder,
min_placeholder,
max_placeholder,
num_bits=num_bits,
narrow_range=narrow_range)
result = session.run(
outputs, {
input_placeholder: inputs,
min_placeholder: input_min,
max_placeholder: input_max
})
self.assertAllCloseAccordingToType(
result, expected, rtol=1e-3, atol=1e-5, bfloat16_rtol=0.03)
class FakeQuantWithMinMaxVarsGradientTest(xla_test.XLATestCase):
"""Test cases for FakeQuantWithMinMaxVarsGradient operation."""
# 8 bits, wide range.
def testOp_with8BitsNoScalingNoNudging(self):
self._TestOp(0.0, 255.0, 8, False, 0.0, 255.0, 1.0)
def testOp_with8BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 128.0, 8, False, 0.0, 127.5, 0.5)
def testOp_with8BitsScalingAndNudgingUp(self):
self._TestOp(-128.0, -0.5, 8, False, -127.5, 0.0, 0.5)
def testOp_with8BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 127.4, 8, False, 0.0, 127.5, 0.5)
# 8 bits, narrow range.
def testOp_with8BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 254.0, 8, True, 0.0, 254.0, 1.0)
def testOp_with8BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 127.1, 8, True, 0.0, 127.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-127.1, -0.1, 8, True, -127.0, 0.0, 0.5)
def testOp_with8BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 126.9, 8, True, 0.0, 127.0, 0.5)
# 7 bits, wide range.
def testOp_with7BitsNoScalingNoNudging(self):
self._TestOp(0.0, 127.0, 7, False, 0.0, 127.0, 1.0)
def testOp_with7BitsScalingAndNudgingDown(self):
self._TestOp(0.5, 64.0, 7, False, 0.0, 63.5, 0.5)
def testOp_with7BitsScalingAndNudgingUp(self):
self._TestOp(-64.0, -0.5, 7, False, -63.5, 0.0, 0.5)
def testOp_with7BitsScalingAndNudgingBetween(self):
self._TestOp(-0.1, 63.4, 7, False, 0.0, 63.5, 0.5)
# 7 bits, narrow range.
def testOp_with7BitsNarrowRangeNoScalingNoNudging(self):
self._TestOp(0.0, 126.0, 7, True, 0.0, 126.0, 1.0)
def testOp_with7BitsNarrowRangeScalingAndNudgingDown(self):
self._TestOp(0.1, 63.1, 7, True, 0.0, 63.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingUp(self):
self._TestOp(-63.1, -0.1, 7, True, -63.0, 0.0, 0.5)
def testOp_with7BitsNarrowRangeScalingAndNudgingBetween(self):
self._TestOp(-0.1, 62.9, 7, True, 0.0, 63.0, 0.5)
def _TestOp(self, input_min, input_max, num_bits, narrow_range,
expected_nudged_input_min, expected_nudged_input_max,
expected_step):
inputs = np.array(
[
expected_nudged_input_min - expected_step,
expected_nudged_input_min - 0.01, expected_nudged_input_min,
expected_nudged_input_min + 0.01,
expected_nudged_input_min + expected_step - 0.01,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step + 0.01,
expected_nudged_input_max - 0.01, expected_nudged_input_max,
expected_nudged_input_max + 0.01,
expected_nudged_input_max + expected_step
],
dtype=np.float32)
gradients = np.arange(1, len(inputs) + 1, dtype=np.float32)
expected_backprops_wrt_input = np.array(
[0.0, 0.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 0.0, 0.0],
dtype=np.float32)
expected_backprops_wrt_min = 1.0 + 2.0
expected_backprops_wrt_max = 10.0 + 11.0
with self.session() as session:
with self.test_scope():
gradient_placeholder = array_ops.placeholder(
dtypes.float32, gradients.shape, name="gradients")
input_placeholder = array_ops.placeholder(
dtypes.float32, inputs.shape, name="inputs")
min_placeholder = array_ops.placeholder(dtypes.float32, (), name="min")
max_placeholder = array_ops.placeholder(dtypes.float32, (), name="max")
outputs = array_ops.fake_quant_with_min_max_vars_gradient(
gradient_placeholder,
input_placeholder,
min_placeholder,
max_placeholder,
num_bits=num_bits,
narrow_range=narrow_range)
backprops_wrt_input, backprops_wrt_min, backprops_wrt_max = session.run(
outputs, {
gradient_placeholder: gradients,
input_placeholder: inputs,
min_placeholder: input_min,
max_placeholder: input_max
})
self.assertAllCloseAccordingToType(
backprops_wrt_input,
expected_backprops_wrt_input,
rtol=1e-3,
atol=1e-5,
bfloat16_rtol=0.03)
self.assertAllCloseAccordingToType(
backprops_wrt_min,
expected_backprops_wrt_min,
rtol=1e-3,
atol=1e-5,
bfloat16_rtol=0.03)
self.assertAllCloseAccordingToType(
backprops_wrt_max,
expected_backprops_wrt_max,
rtol=1e-3,
atol=1e-5,
bfloat16_rtol=0.03)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/fake_quant_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Momentum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class MomentumOptimizerTest(xla_test.XLATestCase):
def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum):
var += accum * lr * momentum
accum = accum * momentum + g
var -= lr * accum
var -= accum * lr * momentum
return var, accum
def testBasic(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in variables.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
def testNesterovMomentum(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([0.1, 0.2], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.3, 0.4], dtype=dtype)
var0_np = np.array([0.1, 0.2], dtype=dtype)
var1_np = np.array([0.3, 0.4], dtype=dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype)
cost = 0.4 * var0 * var0 + 0.9 * var1
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int32), name="global_step")
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=0.1, momentum=0.9, use_nesterov=True)
opt_op = mom_op.minimize(cost, global_step, [var0, var1])
variables.global_variables_initializer().run()
for _ in range(1, 5):
opt_op.run()
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 0.8, 0.1, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(
var1_np, accum1_np, 0.9, 0.1, 0.9)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTensorLearningRateAndMomentum(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=constant_op.constant(2.0),
momentum=constant_op.constant(0.9))
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in variables.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/momentum_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.cond in XLA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.client import session
from tensorflow.python.compiler.xla import xla
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
@test_util.with_control_flow_v2
class CondTest(xla_test.XLATestCase):
def testCondAndTensorArrayInDefun(self):
# TODO(b/132430685): Make test more useful. Also b/129396295, b/127846988
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
@function.defun
def f():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=1)
output = control_flow_ops.cond(
constant_op.constant(True),
lambda: ta.write(0, 5.), lambda: ta.write(0, 10.))
return output.stack()
output_t = f()
self.assertAllEqual([5.], self.evaluate(output_t))
xla_context.Exit()
def testCondAndTensorArrayInDefun_constFolding(self):
g = ops.Graph()
with session.Session(graph=g), g.as_default(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
@function.defun
def f():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=1)
output = control_flow_ops.cond(
constant_op.constant(False),
lambda: ta.write(0, 5.), lambda: ta.write(0, 10.))
return output.stack()
output_t = f()
self.assertAllEqual([10.], self.evaluate(output_t))
xla_context.Exit()
def testCondAndTensorArray_xlaCompile(self):
self.skipTest("b/127846988")
# Fails with "Uninitialized arguments" in XlaIfOp::Compile
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
def f():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=1)
output = control_flow_ops.cond(
constant_op.constant(True),
lambda: ta.write(0, 5.), lambda: ta.write(0, 10.))
return output.stack()
output_t, = xla.compile(f)
self.assertAllEqual([5.], self.evaluate(output_t))
xla_context.Exit()
def testCondConstPropagation(self):
with self.session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
x = array_ops.placeholder(dtypes.float32)
p = array_ops.placeholder(dtypes.int32)
# TODO(b/129021699): Wrapping this in a tf.function does not work.
def if_true():
# This emits a StridedSlice op which expects the index to be a
# compile-time const.
return x[p]
def if_false():
return 5.
output = control_flow_ops.cond(
constant_op.constant(True), if_true, if_false)
self.assertAllEqual(1.,
sess.run(output, feed_dict={
x: [0., 1., 2.],
p: 1
}))
xla_context.Exit()
def testCondConstPropagation_xlaCompile(self):
self.skipTest("b/132430685")
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
x = array_ops.placeholder_with_default([0., 1., 2.], shape=[3])
p = constant_op.constant(1)
def f():
# TODO(b/129021699): Wrapping this in a tf.function does not work.
def if_true():
# This emits a StridedSlice op which expects the index to be a
# compile-time const.
return x[p]
def if_false():
return 5.
return control_flow_ops.cond(
constant_op.constant(True), if_true, if_false)
output = xla.compile(f)
self.assertAllEqual(1., self.evaluate(output))
xla_context.Exit()
def testCondConstPropagation_errorMsg(self):
self.skipTest("b/132430685")
with self.session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
x = array_ops.placeholder(dtypes.float32)
p = random_ops.random_uniform([], minval=1, maxval=3, dtype=dtypes.int32)
# TODO(b/129021699): Wrapping this in a tf.function does not work.
def if_true():
# This emits a StridedSlice op which expects the index to be a
# compile-time const.
return x[:p]
def if_false():
return array_ops.fill([p], 5.)
output = control_flow_ops.cond(
constant_op.constant(True), if_true, if_false)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"must be a compile-time constant"):
sess.run(
output, feed_dict={
x: [0., 1., 2.],
})
xla_context.Exit()
def testCondConstPropagation_errorMsg_xlaCompile(self):
with self.session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
x = array_ops.placeholder(dtypes.float32)
p = random_ops.random_uniform([], minval=1, maxval=3, dtype=dtypes.int32)
condition = math_ops.cast(
random_ops.random_uniform([], minval=0, maxval=2, dtype=dtypes.int32),
dtypes.bool)
def f():
# TODO(b/129021699): Wrapping this in a tf.function does not work.
def if_true():
# This emits a StridedSlice op which expects the index to be a
# compile-time const.
return x[:p]
def if_false():
return array_ops.fill([p], 5.)
return control_flow_ops.cond(condition, if_true, if_false)
output = xla.compile(f)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"must be a compile-time constant"):
sess.run(
output, feed_dict={
x: [0., 1., 2.],
})
xla_context.Exit()
def testSwitchCaseAndTensorArrayInDefun(self):
self.skipTest("b/127846988")
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
@function.defun
def f():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=1)
output = control_flow_ops.switch_case(
constant_op.constant(1), {
0: lambda: ta.write(0, 5.),
1: lambda: ta.write(0, 10.),
2: lambda: ta.write(0, 15.),
})
return output.stack()
output_t = f()
self.assertAllEqual([10.], self.evaluate(output_t))
xla_context.Exit()
def testSwitchCaseAndTensorArray_xlaCompile(self):
self.skipTest("b/127846988")
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
def f():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=1)
output = control_flow_ops.switch_case(
constant_op.constant(1), {
0: lambda: ta.write(0, 5.),
1: lambda: ta.write(0, 10.),
2: lambda: ta.write(0, 15.),
})
return output.stack()
output_t, = xla.compile(f)
self.assertAllEqual([10.], self.evaluate(output_t))
xla_context.Exit()
def testSwitchCaseConstPropagation(self):
self.skipTest("b/127846988")
with self.session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
x = array_ops.placeholder(dtypes.float32)
p = array_ops.placeholder(dtypes.int32)
def branch0():
return 5.
def branch1():
return 15.
# TODO(b/129021699): Wrapping this in a tf.function does not work.
def branch2():
# This emits a StridedSlice op which expects the index to be a
# compile-time const.
return x[p]
output = control_flow_ops.switch_case(
constant_op.constant(2), {
0: branch0,
1: branch1,
2: branch2,
})
self.assertAllEqual(7.,
sess.run(output, feed_dict={
x: [0., 1., 7.],
p: 2,
}))
xla_context.Exit()
def testCondNoInputs(self):
"""Verifies against `Failed precondition: Expected one input shape`."""
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
for pred in True, False:
cond_out = control_flow_ops.cond(
array_ops.placeholder_with_default(pred, []),
lambda: constant_op.constant(2.),
lambda: constant_op.constant(1.))
self.assertEqual(int(pred) + 1., self.evaluate(cond_out))
xla_context.Exit()
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/cond_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RMSProp optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import rmsprop
class RmspropTest(xla_test.XLATestCase):
def _rmsprop_update_numpy(self,
var,
g,
mg,
rms,
mom,
lr,
decay=0.9,
momentum=0.0,
epsilon=1e-10,
centered=False):
rms_t = rms * decay + (1 - decay) * g * g
denom_t = rms_t + epsilon
if centered:
mg_t = mg * decay + (1 - decay) * g
denom_t -= mg_t * mg_t
else:
mg_t = mg
mom_t = momentum * mom + lr * g / np.sqrt(denom_t, dtype=denom_t.dtype)
var_t = var - mom_t
return var_t, mg_t, rms_t, mom_t
def testBasic(self):
for dtype in self.float_types:
for centered in [False, True]:
with self.session(), self.test_scope():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
mg0_np = np.array([0.0, 0.0], dtype=dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype)
rms0_np = np.array([1.0, 1.0], dtype=dtype)
rms1_np = np.array([1.0, 1.0], dtype=dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
rms_opt = rmsprop.RMSPropOptimizer(learning_rate, centered=centered)
rms_update = rms_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
mg0 = rms_opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = rms_opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
rms0 = rms_opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = rms_opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mom0 = rms_opt.get_slot(var0, "momentum")
self.assertTrue(mom0 is not None)
mom1 = rms_opt.get_slot(var1, "momentum")
self.assertTrue(mom1 is not None)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSProp
for _ in range(3):
self.evaluate(rms_update)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np,
grads0_np,
mg0_np,
rms0_np,
mom0_np,
learning_rate,
centered=centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np,
grads1_np,
mg1_np,
rms1_np,
mom1_np,
learning_rate,
centered=centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/rmsprop_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for binary operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.compat import compat
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class BinaryOpsTest(xla_test.XLATestCase):
"""Test cases for binary operators."""
def _testBinary(self, op, a, b, expected, equality_test=None):
with self.session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
output = op(pa, pb)
result = session.run(output, {pa: a, pb: b})
if equality_test is None:
equality_test = self.assertAllCloseAccordingToType
equality_test(result, expected, rtol=1e-3)
def _testSymmetricBinary(self, op, a, b, expected, equality_test=None):
self._testBinary(op, a, b, expected, equality_test)
self._testBinary(op, b, a, expected, equality_test)
def ListsAreClose(self, result, expected, rtol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in range(len(result)):
self.assertAllCloseAccordingToType(result[i], expected[i], rtol)
def testFloatOps(self):
for dtype in self.float_types:
if dtype == dtypes.bfloat16.as_numpy_dtype:
a = -1.01
b = 4.1
else:
a = -1.001
b = 4.01
self._testBinary(
lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),
np.array([[[[-1, 2.00009999], [-3, b]]]], dtype=dtype),
np.array([[[[a, 2], [-3.00009, 4]]]], dtype=dtype),
expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
self._testBinary(
gen_math_ops.real_div,
np.array([3, 3, -1.5, -8, 44], dtype=dtype),
np.array([2, -2, 7, -4, 0], dtype=dtype),
expected=np.array(
[1.5, -1.5, -0.2142857, 2, float("inf")], dtype=dtype))
self._testBinary(math_ops.pow, dtype(3), dtype(4), expected=dtype(81))
self._testBinary(
math_ops.pow,
np.array([1, 2], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([10, 4], dtype=dtype),
np.array([2, 3], dtype=dtype),
expected=np.array([100, 64], dtype=dtype))
self._testBinary(
math_ops.pow,
dtype(2),
np.array([3, 4], dtype=dtype),
expected=np.array([8, 16], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([[2], [3]], dtype=dtype),
dtype(4),
expected=np.array([[16], [81]], dtype=dtype))
self._testBinary(
math_ops.atan2,
np.array([0, np.sqrt(2), 1, np.sqrt(2), 0], dtype),
np.array([1, np.sqrt(2), 0, -np.sqrt(2), -1], dtype),
expected=np.array(
[0, np.pi / 4, np.pi / 2, np.pi * 3 / 4, np.pi], dtype=dtype))
self._testBinary(
gen_math_ops.reciprocal_grad,
np.array([4, -3, -2, 1], dtype=dtype),
np.array([5, -6, 7, -8], dtype=dtype),
expected=np.array([-80, 54, -28, 8], dtype=dtype))
self._testBinary(
gen_math_ops.sigmoid_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-60, -36, -14, 0], dtype=dtype))
self._testBinary(
gen_math_ops.rsqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-160, -81, -28, -4], dtype=dtype))
self._testBinary(
gen_math_ops.sqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([0.625, 1, 1.75, 4], dtype=dtype))
self._testBinary(
gen_nn_ops.softplus_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array(
[3.97322869, 2.99258232, 1.99817801, 0.99966466], dtype=dtype))
self._testBinary(
gen_nn_ops.softsign_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array(
[0.11111111, 0.06122449, 0.03125, 0.01234568], dtype=dtype))
self._testBinary(
gen_math_ops.tanh_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-75, -48, -21, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.elu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, 0, .2, .4], dtype=dtype),
expected=np.array([0.4, 1.2, 2.4, 4, 5, 6], dtype=dtype))
self._testBinary(
gen_nn_ops.selu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, .2, .4, .6], dtype=dtype),
expected=np.array(
[1.158099340847, 2.7161986816948, 4.67429802254,
4.202803949422, 5.2535049367774, 6.30420592413], dtype=dtype))
self._testBinary(
gen_nn_ops.relu_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10], dtype=dtype))
self._testBinary(
gen_nn_ops.relu6_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtype),
np.array(
[0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9, 6.1, 10.0], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 0, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.leaky_relu_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9], dtype=dtype),
expected=np.array([0.2, 0.4, 0.6, 0.8, 1, 6, 7, 8, 9, 10],
dtype=dtype))
self._testBinary(
gen_nn_ops.softmax_cross_entropy_with_logits,
np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=dtype),
np.array([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]], dtype=dtype),
expected=[
np.array([1.44019, 2.44019], dtype=dtype),
np.array([[-0.067941, -0.112856, -0.063117, 0.243914],
[-0.367941, -0.212856, 0.036883, 0.543914]],
dtype=dtype),
],
equality_test=self.ListsAreClose)
# TODO(b/68813416): Fails with bfloat16.
if dtype != dtypes.bfloat16.as_numpy_dtype:
self._testBinary(
gen_nn_ops.sparse_softmax_cross_entropy_with_logits,
np.array(
[[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8],
[0.9, 1.0, 1.1, 1.2]],
dtype=dtype),
np.array([2, 1, 7], dtype=np.int32),
expected=[
np.array([1.342536, 1.442536, np.nan], dtype=dtype),
np.array(
[[0.213838, 0.236328, -0.738817, 0.288651], [
0.213838, -0.763672, 0.261183, 0.288651
], [np.nan, np.nan, np.nan, np.nan]],
dtype=dtype),
],
equality_test=self.ListsAreClose)
# TF doesn't define these for bf16.
if dtype != dtypes.bfloat16.as_numpy_dtype:
self._testBinary(
gen_math_ops.xdivy,
np.array([0, 4, 3, 2, 1, 0], dtype=dtype),
np.array([0, 5, 6, 7, 8, float("NaN")], dtype=dtype),
expected=np.array([0, 0.8, 0.5, 0.285714, 0.125, 0], dtype=dtype))
self._testBinary(
gen_math_ops.xlogy,
np.array([0, 4, 3, 2, 1, 0], dtype=dtype),
np.array([0, 5, 6, 7, 8, float("NaN")], dtype=dtype),
expected=np.array([0, 6.437752, 5.375278, 3.89182, 2.079442, 0],
dtype=dtype))
def testIntOps(self):
for dtype in self.signed_int_types:
self._testBinary(
gen_math_ops.truncate_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -1, 0, -4, 2], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_and,
np.array([0b1, 0b101, 0b1000], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b0, 0b101, 0b1000], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_or,
np.array([0b1, 0b101, 0b1000], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b1, 0b101, 0b1001], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_xor,
np.array([0b1, 0b111, 0b1100], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b1, 0b010, 0b0101], dtype=dtype))
lhs = np.array([0, 5, 3, 14], dtype=dtype)
rhs = np.array([5, 0, 7, 11], dtype=dtype)
self._testBinary(
bitwise_ops.left_shift, lhs, rhs,
expected=np.left_shift(lhs, rhs))
self._testBinary(
bitwise_ops.right_shift, lhs, rhs,
expected=np.right_shift(lhs, rhs))
if dtype in [np.int8, np.int16, np.int32, np.int64]:
lhs = np.array([-1, -5, -3, -14, -2], dtype=dtype)
rhs = np.array([5, 0, 1, 11, 36], dtype=dtype)
# HLO has saturating shift behavior.
bits = np.ceil(
np.log(np.iinfo(dtype).max - np.iinfo(dtype).min) / np.log(2))
expected = [
np.right_shift(l, r) if r < bits else np.sign(l)
for l, r in zip(lhs, rhs)
]
self._testBinary(bitwise_ops.right_shift, lhs, rhs, expected=expected)
def testNumericOps(self):
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([11, 22], dtype=dtype))
self._testBinary(
math_ops.add,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([6, 7], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[8], [9]], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([1, 2, 100], dtype=dtype),
np.array([10, 20, -1], dtype=dtype),
expected=np.array([-9, -18, 101], dtype=dtype))
self._testBinary(
math_ops.subtract,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([4, 3], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[-6], [-5]], dtype=dtype))
if dtype in [np.float32, np.float64]:
x = np.array([
-0.0, 0.0, -0.0, +0.0, np.inf, np.inf, -np.inf, -np.inf, 2.0, 2.0,
1.0
],
dtype=dtype)
y = np.array(
[-0.0, 0.0, +0.0, -0.0, 1.0, -1.0, 1.0, -1.0, 2.0, 1.0, 2.0],
dtype=dtype)
expected = np.nextafter(x, y)
# We use assertAllEqual to expose any bugs hidden by relative or
# absolute error tolerances.
def NextAfterEqualityTest(result, expected, rtol):
del rtol
return self.assertAllEqual(result, expected)
self._testBinary(
math_ops.nextafter,
x,
y,
expected=expected,
equality_test=NextAfterEqualityTest)
# min/max not supported for complex
if dtype not in self.complex_types | {np.uint8, np.int8}:
self._testBinary(
math_ops.maximum,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([10, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[10], [7]], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._testBinary(
math_ops.minimum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([1, 5], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[7], [2]], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([10, 40], dtype=dtype))
self._testBinary(
math_ops.multiply,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 100], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[70], [14]], dtype=dtype))
# Complex support for squared_difference is incidental, see b/68205550
if dtype not in self.complex_types | {np.uint8, np.int8}:
self._testBinary(
math_ops.squared_difference,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([81, 324], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([16, 9], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[36], [25]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[3, 1], [5, 3]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[[[1, 2], [3, 4]]]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[[[3, 1], [5, 3]]]], dtype=dtype))
if np.int64 in self.numeric_types:
self._testBinary(
math_ops.add,
np.array([0xffffffff, 0xfffffffff, 1, 1], dtype=np.int64),
np.array([1, 1, 0xffffffff, 0xfffffffff], dtype=np.int64),
expected=np.array([1 << 32, 1 << 36, 1 << 32, 1 << 36],
dtype=np.int64))
def testComplexOps(self):
for dtype in self.complex_types:
ctypes = {np.complex64: np.float32, np.complex128: np.float64}
self._testBinary(
math_ops.complex,
np.array([[[[-1, 2], [2, 0]]]], dtype=ctypes[dtype]),
np.array([[[[2, -3], [0, 4]]]], dtype=ctypes[dtype]),
expected=np.array([[[[-1 + 2j, 2 - 3j], [2, 4j]]]], dtype=dtype))
self._testBinary(
lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),
np.array(
[[[[-1 + 2j, 2.00009999 - 3j], [2 - 3j, 3 + 4.01j]]]],
dtype=dtype),
np.array(
[[[[-1.001 + 2j, 2 - 3j], [2 - 3.00009j, 3 + 4j]]]], dtype=dtype),
expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
self._testBinary(
gen_math_ops.real_div,
np.array([3, 3j, -1.5j, -8, 2 + 3j, 2 + 4j], dtype=dtype),
np.array([2, -2, 7j, -4j, 4 - 6j, 1 + 2j], dtype=dtype),
expected=np.array(
[1.5, -1.5j, -0.2142857, -2j, (2 + 3j) / (4 - 6j), 2],
dtype=dtype))
# Test inf/nan scenarios.
self._testBinary(
gen_math_ops.real_div,
np.array([4 + 3j, 4, 3j, -4, -4j, 2 - 3j], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0], dtype=dtype),
expected=np.array(
[
dtype(1 + 1j) / 0,
dtype(1) / 0,
dtype(1j) / 0,
dtype(-1) / 0,
dtype(-1j) / 0,
dtype(1 - 1j) / 0
],
dtype=dtype))
self._testBinary(
math_ops.pow,
dtype(3 + 2j),
dtype(4 - 5j),
expected=np.power(dtype(3 + 2j), dtype(4 - 5j)))
self._testBinary( # empty rhs
math_ops.pow,
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype))
self._testBinary( # to zero power
math_ops.pow,
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.zeros(shape=[1, 2], dtype=dtype),
expected=np.ones(shape=[1, 2], dtype=dtype))
lhs = np.array([1 - 2j, 4 + 3j, 2 - 3j, 3, 2j, 1, 4], dtype=dtype)
rhs = np.array([2, 3j, 3 + 4j, 2 + 3j, 3 - 2j, 2, 3 + 3j], dtype=dtype)
scalar = dtype(2 + 2j)
self._testBinary(math_ops.pow, lhs, rhs, expected=np.power(lhs, rhs))
self._testBinary(
math_ops.pow, scalar, rhs, expected=np.power(scalar, rhs))
self._testBinary(math_ops.pow, lhs, scalar, np.power(lhs, scalar))
lhs = np.array([4 + 2j, -3 - 1j, 2j, 1], dtype=dtype)
rhs = np.array([5, -6j, 7 - 3j, -8j], dtype=dtype)
self._testBinary(
gen_math_ops.reciprocal_grad, lhs, rhs, expected=-rhs * lhs * lhs)
self._testBinary(
gen_math_ops.sigmoid_grad, lhs, rhs, expected=rhs * lhs * (1 - lhs))
self._testBinary(
gen_math_ops.rsqrt_grad, lhs, rhs, expected=lhs**3 * rhs / -2)
self._testBinary(
gen_math_ops.sqrt_grad, lhs, rhs, expected=rhs / (2 * lhs))
self._testBinary(
gen_math_ops.tanh_grad, lhs, rhs, expected=rhs * (1 - lhs * lhs))
def testComplexMath(self):
for dtype in self.complex_types:
self._testBinary(
math_ops.add,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array([11 - 1j, 22 + 24j], dtype=dtype))
self._testBinary(
math_ops.add,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array([6 - 5j, 7 - 3j], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array([[8 + 3j], [9 + 6j]], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array([-9 + 7j, -18 - 10j], dtype=dtype))
self._testBinary(
math_ops.subtract,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array([4 - 9j, 3 - 11j], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array([[-6 - 7j], [-5 - 4j]], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array(
[(1 + 3j) * (10 - 4j), (2 + 7j) * (20 + 17j)], dtype=dtype))
self._testBinary(
math_ops.multiply,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array(
[(5 - 7j) * (1 + 2j), (5 - 7j) * (2 + 4j)], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array(
[[(7 + 5j) * (1 - 2j)], [(7 + 5j) * (2 + 1j)]], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([8 - 1j, 2 + 16j], dtype=dtype),
np.array([2 + 4j, 4 - 8j], dtype=dtype),
expected=np.array(
[(8 - 1j) / (2 + 4j), (2 + 16j) / (4 - 8j)], dtype=dtype))
self._testBinary(
math_ops.div,
dtype(1 + 2j),
np.array([2 + 4j, 4 - 8j], dtype=dtype),
expected=np.array(
[(1 + 2j) / (2 + 4j), (1 + 2j) / (4 - 8j)], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([2 + 4j, 4 - 8j], dtype=dtype),
dtype(1 + 2j),
expected=np.array(
[(2 + 4j) / (1 + 2j), (4 - 8j) / (1 + 2j)], dtype=dtype))
# TODO(b/68205550): math_ops.squared_difference shouldn't be supported.
self._testBinary(
nn_ops.bias_add,
np.array([[1 + 2j, 2 + 7j], [3 - 5j, 4 + 2j]], dtype=dtype),
np.array([2 + 6j, -1 - 3j], dtype=dtype),
expected=np.array([[3 + 8j, 1 + 4j], [5 + 1j, 3 - 1j]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[[[1 + 4j, 2 - 1j], [3 + 7j, 4]]]], dtype=dtype),
np.array([2 + 1j, -1 + 2j], dtype=dtype),
expected=np.array(
[[[[3 + 5j, 1 + 1j], [5 + 8j, 3 + 2j]]]], dtype=dtype))
def _testDivision(self, dtype):
"""Test cases for division operators."""
self._testBinary(
math_ops.div,
np.array([10, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 10], dtype=dtype))
self._testBinary(
math_ops.div,
dtype(40),
np.array([2, 20], dtype=dtype),
expected=np.array([20, 2], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([[10], [4]], dtype=dtype),
dtype(2),
expected=np.array([[5], [2]], dtype=dtype))
if dtype in [np.float32, np.float64]:
nums = np.arange(-10, 10, .25, dtype=dtype).reshape(80, 1)
divs = np.arange(-3, 3, .25, dtype=dtype).reshape(1, 24)
np_result = np.true_divide(nums, divs)
np_result[:, divs[0] == 0] = 0
self._testBinary(gen_math_ops.div_no_nan, nums, divs, expected=np_result)
if dtype not in self.complex_types: # floordiv unsupported for complex.
self._testBinary(
gen_math_ops.floor_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -2, -1, -5, 2], dtype=dtype))
def testIntDivision(self):
for dtype in self.signed_int_types:
self._testDivision(dtype)
def testFloatDivision(self):
for dtype in self.float_types | self.complex_types:
self._testDivision(dtype)
def _testRemainder(self, dtype):
"""Test cases for remainder operators."""
self._testBinary(
gen_math_ops.floor_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, -1, 6, 0], dtype=dtype))
self._testBinary(
gen_math_ops.truncate_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, 1, -1, 0], dtype=dtype))
def testIntRemainder(self):
for dtype in self.signed_int_types - {np.int8}:
self._testRemainder(dtype)
def testFloatRemainder(self):
for dtype in self.float_types:
self._testRemainder(dtype)
def testLogicalOps(self):
self._testBinary(
math_ops.logical_and,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[False, False], [False, True]], dtype=np.bool))
self._testBinary(
math_ops.logical_or,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[True, True], [False, True]], dtype=np.bool))
def testComparisons(self):
self._testBinary(
math_ops.equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [False]], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [True]], dtype=np.bool))
for greater_op in [math_ops.greater, (lambda x, y: x > y)]:
self._testBinary(
greater_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
greater_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
greater_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [False]], dtype=np.bool))
for greater_equal_op in [math_ops.greater_equal, (lambda x, y: x >= y)]:
self._testBinary(
greater_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [True], [False]], dtype=np.bool))
for less_op in [math_ops.less, (lambda x, y: x < y)]:
self._testBinary(
less_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
less_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
less_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [False], [True]], dtype=np.bool))
if np.int64 in self.numeric_types:
self._testBinary(
less_op,
np.array([[10], [7], [2], [-1]], dtype=np.int64),
np.int64(7),
expected=np.array(
[[False], [False], [True], [True]], dtype=np.bool))
for less_equal_op in [math_ops.less_equal, (lambda x, y: x <= y)]:
self._testBinary(
less_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
less_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
less_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [True]], dtype=np.bool))
def testS64Comparisons(self):
for op in [(lambda x, y: x < y), (lambda x, y: x <= y),
(lambda x, y: x >= y), (lambda x, y: x > y)]:
lhs = np.array(
[
np.int64(0x000000007FFFFFFF),
np.int64(0x000000007FFFFFFF),
np.int64(0x0000000080000000),
np.int64(0x0000000080000000),
np.int64(0x0000000080000001),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFF),
np.int64(0x0000000100000000),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x7ffffffefff00010),
np.int64(0x7ffffffefff00010),
np.int64(-1),
np.int64(-1)
],
dtype=np.int64)
rhs = np.array(
[
np.int64(0x000000007FFFFFFE),
np.int64(0x000000007FFFFFFF),
np.int64(0x000000007FFFFFFF),
np.int64(0x0000000080000000),
np.int64(0x0000000080000001),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFF0001),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFF),
np.int64(0x0000000100000001),
np.int64(0x0000000100000002),
np.int64(0x0000000100000003),
np.int64(0x0000000200000001),
np.int64(0x0000000200000002),
np.int64(0x0000000200000003),
np.int64(0x0000000300000001),
np.int64(0x0000000300000002),
np.int64(0x0000000300000003),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000001),
np.int64(-2),
np.int64(-1)
],
dtype=np.int64)
expected = np.array([op(l, r) for l, r in zip(lhs, rhs)], dtype=np.bool)
self._testBinary(op, lhs, rhs, expected=expected)
def testBroadcasting(self):
"""Tests broadcasting behavior of an operator."""
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array(3, dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([13, 23], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([10, 20], dtype=dtype),
np.array(4, dtype=dtype),
expected=np.array([14, 24], dtype=dtype))
# [1,3] x [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([[10, 20, 30]], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
# [3] * [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([10, 20, 30], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
def testFill(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.fill,
np.array([], dtype=np.int32),
dtype(-42),
expected=dtype(-42))
self._testBinary(
array_ops.fill,
np.array([1, 2], dtype=np.int32),
dtype(7),
expected=np.array([[7, 7]], dtype=dtype))
self._testBinary(
array_ops.fill,
np.array([3, 2], dtype=np.int32),
dtype(50),
expected=np.array([[50, 50], [50, 50], [50, 50]], dtype=dtype))
# Helper method used by testMatMul, testSparseMatMul, testBatchMatMul below.
def _testMatMul(self, op):
for dtype in self.float_types:
self._testBinary(
op,
np.array([[-0.25]], dtype=dtype),
np.array([[8]], dtype=dtype),
expected=np.array([[-2]], dtype=dtype))
self._testBinary(
op,
np.array([[100, 10, 0.5]], dtype=dtype),
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
expected=np.array([[123, 354]], dtype=dtype))
self._testBinary(
op,
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
np.array([[100], [10]], dtype=dtype),
expected=np.array([[130], [250], [680]], dtype=dtype))
self._testBinary(
op,
np.array([[1000, 100], [10, 1]], dtype=dtype),
np.array([[1, 2], [3, 4]], dtype=dtype),
expected=np.array([[1300, 2400], [13, 24]], dtype=dtype))
self._testBinary(
op,
np.array([], dtype=dtype).reshape((2, 0)),
np.array([], dtype=dtype).reshape((0, 3)),
expected=np.array([[0, 0, 0], [0, 0, 0]], dtype=dtype))
def testMatMul(self):
self._testMatMul(math_ops.matmul)
# TODO(phawkins): failing on GPU, no registered kernel.
def DISABLED_testSparseMatMul(self):
# Binary wrappers for sparse_matmul with different hints
def SparseMatmulWrapperTF(a, b):
return math_ops.sparse_matmul(a, b, a_is_sparse=True)
def SparseMatmulWrapperFT(a, b):
return math_ops.sparse_matmul(a, b, b_is_sparse=True)
def SparseMatmulWrapperTT(a, b):
return math_ops.sparse_matmul(a, b, a_is_sparse=True, b_is_sparse=True)
self._testMatMul(math_ops.sparse_matmul)
self._testMatMul(SparseMatmulWrapperTF)
self._testMatMul(SparseMatmulWrapperFT)
self._testMatMul(SparseMatmulWrapperTT)
def testBatchMatMul(self):
# Same tests as for tf.matmul above.
self._testMatMul(math_ops.matmul)
# Tests with batches of matrices.
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]]], dtype=np.float32),
np.array([[[8]]], dtype=np.float32),
expected=np.array([[[-2]]], dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]], [[4]]], dtype=np.float32),
np.array([[[8]], [[2]]], dtype=np.float32),
expected=np.array([[[-2]], [[8]]], dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array(
[[[[7, 13], [10, 1]], [[2, 0.25], [20, 2]]],
[[[3, 5], [30, 3]], [[0.75, 1], [40, 4]]]],
dtype=np.float32),
np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[11, 22], [33, 44]],
[[55, 66], [77, 88]]]],
dtype=np.float32),
expected=np.array(
[[[[46, 66], [13, 24]], [[11.75, 14], [114, 136]]],
[[[198, 286], [429, 792]], [[118.25, 137.5], [2508, 2992]]]],
dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([], dtype=np.float32).reshape((2, 2, 0)),
np.array([], dtype=np.float32).reshape((2, 0, 3)),
expected=np.array(
[[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([], dtype=np.float32).reshape((0, 2, 4)),
np.array([], dtype=np.float32).reshape((0, 4, 3)),
expected=np.array([], dtype=np.float32).reshape(0, 2, 3))
# Regression test for b/31472796.
if hasattr(np, "matmul"):
x = np.arange(0, 3 * 5 * 2 * 7, dtype=np.float32).reshape((3, 5, 2, 7))
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_b=True),
x, x,
expected=np.matmul(x, x.transpose([0, 1, 3, 2])))
def testExpandDims(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.expand_dims,
dtype(7),
np.int32(0),
expected=np.array([7], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([42], dtype=dtype),
np.array([0], dtype=np.int64),
expected=np.array([[42]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([], dtype=dtype),
np.int32(0),
expected=np.array([[]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(0),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(1),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(2),
expected=np.array([[[[1, 2]], [[3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(3),
expected=np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.array([2], dtype=np.int64),
expected=np.array([[[[1, 2]], [[3, 4]]]], dtype=dtype))
def testBatchMatMulBroadcast(self):
"""Tests broadcasting behavior of BatchMatMul."""
with compat.forward_compatibility_horizon(2019, 4, 26):
# [2, 3] @ [1, 3, 4] -> [1, 2, 4]
self._testBinary(
math_ops.matmul,
np.array([[10, 20, 30], [11, 21, 31]], dtype=np.float32),
np.array([[[1, 2, 3, 4], [2, 4, 6, 8], [3, 6, 9, 12]]],
dtype=np.float32),
expected=np.array([[[140, 280, 420, 560], [146, 292, 438, 584]]],
dtype=np.float32))
# [1, 2, 3] @ [3, 4] -> [1, 2, 4]
self._testBinary(
math_ops.matmul,
np.array([[[10, 20, 30], [11, 21, 31]]], dtype=np.float32),
np.array([[1, 2, 3, 4], [2, 4, 6, 8], [3, 6, 9, 12]],
dtype=np.float32),
expected=np.array([[[140, 280, 420, 560], [146, 292, 438, 584]]],
dtype=np.float32))
# [2, 1, 3] @ [3, 1] -> [2, 1, 1]
self._testBinary(
math_ops.matmul,
np.array([[[10, 20, 30]], [[11, 21, 31]]], dtype=np.float32),
np.array([[1], [2], [3]], dtype=np.float32),
expected=np.array([[[140]], [[146]]], dtype=np.float32))
# [2, 1, 3] @ [1, 3] -> [2, 1, 1] (adjoint_b)
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_b=True),
np.array([[[10, 20, 30]], [[11, 21, 31]]], dtype=np.float32),
np.array([[1, 2, 3]], dtype=np.float32),
expected=np.array([[[140]], [[146]]], dtype=np.float32))
# [2, 3, 1] @ [3, 1] -> [2, 1, 1] (adjoint_a)
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_a=True),
np.array([[[10], [20], [30]], [[11], [21], [31]]], dtype=np.float32),
np.array([[1], [2], [3]], dtype=np.float32),
expected=np.array([[[140]], [[146]]], dtype=np.float32))
# [2, 3, 1] @ [1, 3] -> [2, 1, 1] (adjoint_a and adjoint_b)
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_a=True, adjoint_b=True),
np.array([[[10], [20], [30]], [[11], [21], [31]]], dtype=np.float32),
np.array([[1, 2, 3]], dtype=np.float32),
expected=np.array([[[140]], [[146]]], dtype=np.float32))
# [5, 1, 2, 3] @ [1, 7, 3, 4] -> [5, 7, 2, 4]
self._testBinary(
math_ops.matmul,
np.ones([5, 1, 2, 3], dtype=np.float32),
np.ones([1, 7, 3, 4], dtype=np.float32),
expected=np.full([5, 7, 2, 4], 3, dtype=np.float32))
# [4, 5, 1, 2, 3] @ [1, 1, 3, 5] -> [4, 5, 1, 2, 5]
self._testBinary(
math_ops.matmul,
np.full([4, 5, 1, 2, 3], 2., dtype=np.float32),
np.full([1, 1, 3, 5], 3., dtype=np.float32),
expected=np.full([4, 5, 1, 2, 5], 18., dtype=np.float32))
def testPad(self):
for dtype, pad_type in itertools.product(
self.numeric_types, [np.int32, np.int64]):
self._testBinary(
array_ops.pad,
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array(
[[1, 2], [2, 1]], dtype=pad_type),
expected=np.array(
[[0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0],
[0, 0, 4, 5, 6, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
dtype=dtype))
self._testBinary(
lambda x, y: array_ops.pad(x, y, constant_values=7),
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array(
[[0, 3], [2, 1]], dtype=pad_type),
expected=np.array(
[[7, 7, 1, 2, 3, 7],
[7, 7, 4, 5, 6, 7],
[7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7]],
dtype=dtype))
def testSymmetricMirrorPad(self):
mirror_pad = lambda t, paddings: array_ops.pad(t, paddings, "SYMMETRIC")
for dtype in self.numeric_types:
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
],
dtype=dtype),
np.array([[
2,
2,
], [3, 3]], dtype=np.int32),
expected=np.array(
[
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[0, 0], [0, 0]], dtype=np.int32),
expected=np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype))
def testReflectMirrorPad(self):
mirror_pad = lambda t, paddings: array_ops.pad(t, paddings, "REFLECT")
for dtype in self.numeric_types:
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
],
dtype=dtype),
np.array([[
1,
1,
], [2, 2]], dtype=np.int32),
expected=np.array(
[
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1], #
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[0, 0], [0, 0]], dtype=np.int32),
expected=np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9]
],
dtype=dtype),
np.array([[2, 2], [0, 0]], dtype=np.int32),
expected=np.array(
[
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]],
], dtype=dtype),
np.array([[0, 0], [1, 1], [1, 1]], dtype=np.int32),
expected=np.array(
[
[
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
],
[
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
]
],
dtype=dtype))
def testReshape(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.reshape,
np.array([], dtype=dtype),
np.array([0, 4], dtype=np.int32),
expected=np.zeros(shape=[0, 4], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array([[0, 1], [2, 3], [4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 6], dtype=np.int32),
expected=np.array([[0, 1, 2, 3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([6, -1], dtype=np.int32),
expected=np.array([[0], [1], [2], [3], [4], [5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, -1], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
def testSplit(self):
for dtype in self.numeric_types:
for axis in [0, -3]:
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=3, axis=x),
np.int32(axis),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1], [2]]], dtype=dtype),
np.array([[[3], [4]]], dtype=dtype),
np.array([[[5], [6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
for axis in [1, -2]:
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=2, axis=x),
np.int32(axis),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1]], [[3]], [[5]]], dtype=dtype),
np.array([[[2]], [[4]], [[6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
def splitvOp(x, y): # pylint: disable=invalid-name
return array_ops.split(value=y, num_or_size_splits=[2, 3], axis=x)
for axis in [1, -1]:
self._testBinary(
splitvOp,
np.int32(axis),
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
dtype=dtype),
expected=[
np.array([[0, 1], [5, 6]], dtype=dtype),
np.array([[2, 3, 4], [7, 8, 9]], dtype=dtype),
],
equality_test=self.ListsAreClose)
def testTile(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.tile,
np.array([[6], [3], [4]], dtype=dtype),
np.array([2, 0], dtype=np.int32),
expected=np.empty([6, 0], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[6, 3, 4]], dtype=dtype),
np.array([2, 0], dtype=np.int32),
expected=np.empty([2, 0], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[6]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[6, 6]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1], [2]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[1, 1], [2, 2]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array(
[[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2]], dtype=dtype),
np.array([3, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[1, 2],
[1, 2]],
dtype=dtype))
def testTranspose(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.transpose,
np.zeros(shape=[1, 0, 4], dtype=dtype),
np.array([1, 2, 0], dtype=np.int32),
expected=np.zeros(shape=[0, 4, 1], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 0], dtype=np.int32),
expected=np.array([[1, 3], [2, 4]], dtype=dtype))
def testConjugateTranspose(self):
for dtype in self.complex_types:
self._testBinary(
array_ops.conjugate_transpose,
np.zeros(shape=[1, 0, 4], dtype=dtype),
np.array([1, 2, 0], dtype=np.int32),
expected=np.zeros(shape=[0, 4, 1], dtype=dtype))
self._testBinary(
array_ops.conjugate_transpose,
np.array([[1 - 1j, 2 + 2j], [3 - 3j, 4 + 4j]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
expected=np.array([[1 + 1j, 2 - 2j], [3 + 3j, 4 - 4j]], dtype=dtype))
self._testBinary(
array_ops.conjugate_transpose,
np.array([[1 - 1j, 2 + 2j], [3 - 3j, 4 + 4j]], dtype=dtype),
np.array([1, 0], dtype=np.int32),
expected=np.array([[1 + 1j, 3 + 3j], [2 - 2j, 4 - 4j]], dtype=dtype))
def testCross(self):
for dtype in self.float_types:
self._testBinary(
gen_math_ops.cross,
np.zeros((4, 3), dtype=dtype),
np.zeros((4, 3), dtype=dtype),
expected=np.zeros((4, 3), dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([1, 2, 3], dtype=dtype),
np.array([4, 5, 6], dtype=dtype),
expected=np.array([-3, 6, -3], dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([[1, 2, 3], [10, 11, 12]], dtype=dtype),
np.array([[4, 5, 6], [40, 50, 60]], dtype=dtype),
expected=np.array([[-3, 6, -3], [60, -120, 60]], dtype=dtype))
def testBroadcastArgs(self):
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([1], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([5], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 5], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([3, 1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 1], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 1, 5], dtype=np.int32),
np.array([3, 1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 1], dtype=np.int32),
np.array([2, 1, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"Incompatible shapes"):
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([1, 2, 3], dtype=np.int32),
np.array([4, 5, 6], dtype=np.int32),
expected=None)
def testMatrixSetDiag(self):
# TODO(penporn): Once XLA supports MatrixSetDiagV2, change the call to
# gen_array_ops.matrix_set_diag (V1) to array_ops.matrix_set_diag (V2).
for dtype in self.numeric_types:
# Square
self._testBinary(
gen_array_ops.matrix_set_diag,
np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]],
dtype=dtype),
np.array([1.0, 2.0, 3.0], dtype=dtype),
expected=np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0], [1.0, 1.0, 3.0]],
dtype=dtype))
self._testBinary(
gen_array_ops.matrix_set_diag,
np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0], [2.0, 0.0, 6.0]]],
dtype=dtype),
np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]], dtype=dtype),
expected=np.array(
[[[-1.0, 0.0, 3.0], [0.0, 0.0, 0.0], [1.0, 0.0, -3.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0], [2.0, 0.0, -6.0]]],
dtype=dtype))
# Rectangular
self._testBinary(
gen_array_ops.matrix_set_diag,
np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=dtype),
np.array([3.0, 4.0], dtype=dtype),
expected=np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]], dtype=dtype))
self._testBinary(
gen_array_ops.matrix_set_diag,
np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]], dtype=dtype),
np.array([3.0, 4.0], dtype=dtype),
expected=np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]], dtype=dtype))
self._testBinary(
gen_array_ops.matrix_set_diag,
np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]], dtype=dtype),
np.array([[-1.0, -2.0], [-4.0, -5.0]],
dtype=dtype),
expected=np.array([[[-1.0, 0.0, 3.0], [0.0, -2.0, 0.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0]]],
dtype=dtype))
def testBroadcastTo(self):
for dtype in self.all_types:
x = np.random.randint(0, high=100, size=[2, 3])
self._testBinary(
array_ops.broadcast_to,
x,
np.array([2, 3], dtype=np.int32),
expected=x)
self._testBinary(
array_ops.broadcast_to,
x,
np.array([6, 6], dtype=np.int32),
expected=np.tile(x, [3, 2]))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 4, 3], dtype=np.int32),
expected=np.tile(x, [7, 2, 1]))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 0, 3], dtype=np.int32),
expected=np.zeros([7, 0, 3], dtype=dtype))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 1, 2, 9], dtype=np.int32),
expected=np.tile(x, [7, 1, 1, 3]))
self._testBinary(
array_ops.broadcast_to,
np.zeros([2, 0], dtype=dtype),
np.array([4, 0], dtype=np.int32),
expected=np.zeros([4, 0], dtype=dtype))
x = np.arange(3).reshape((3, 1, 1, 1)).astype(dtype)
self._testBinary(
array_ops.broadcast_to,
x,
np.array((3, 7, 8, 9), dtype=np.int32),
expected=np.tile(x, (1, 7, 8, 9)))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/binary_ops_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.svd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test
class SvdOpTest(xla_test.XLATestCase, parameterized.TestCase):
def _compute_usvt(self, s, u, v):
m = u.shape[-1]
n = v.shape[-1]
if m <= n:
v = v[..., :m]
else:
u = u[..., :n]
return np.matmul(u * s[..., None, :], np.swapaxes(v, -1, -2))
def _testSvdCorrectness(self, dtype, shape):
np.random.seed(1)
x_np = np.random.uniform(low=-1.0, high=1.0, size=shape).astype(dtype)
m, n = shape[-2], shape[-1]
_, s_np, _ = np.linalg.svd(x_np)
with self.session() as sess:
x_tf = array_ops.placeholder(dtype)
with self.test_scope():
s, u, v = linalg_ops.svd(x_tf, full_matrices=True)
s_val, u_val, v_val = sess.run([s, u, v], feed_dict={x_tf: x_np})
u_diff = np.matmul(u_val, np.swapaxes(u_val, -1, -2)) - np.eye(m)
v_diff = np.matmul(v_val, np.swapaxes(v_val, -1, -2)) - np.eye(n)
# Check u_val and v_val are orthogonal matrices.
self.assertLess(np.linalg.norm(u_diff), 1e-2)
self.assertLess(np.linalg.norm(v_diff), 1e-2)
# Check that the singular values are correct, i.e., close to the ones from
# numpy.lingal.svd.
self.assertLess(np.linalg.norm(s_val - s_np), 1e-2)
# The tolerance is set based on our tests on numpy's svd. As our tests
# have batch dimensions and all our operations are on float32, we set the
# tolerance a bit larger. Numpy's svd calls LAPACK's svd, which operates
# on double precision.
self.assertLess(
np.linalg.norm(self._compute_usvt(s_val, u_val, v_val) - x_np), 2e-2)
# Check behavior with compute_uv=False. We expect to still see 3 outputs,
# with a sentinel scalar 0 in the last two outputs.
with self.test_scope():
no_uv_s, no_uv_u, no_uv_v = gen_linalg_ops.svd(
x_tf, full_matrices=True, compute_uv=False)
no_uv_s_val, no_uv_u_val, no_uv_v_val = sess.run(
[no_uv_s, no_uv_u, no_uv_v], feed_dict={x_tf: x_np})
self.assertAllClose(no_uv_s_val, s_val, atol=1e-4, rtol=1e-4)
self.assertEqual(no_uv_u_val, 0.0)
self.assertEqual(no_uv_v_val, 0.0)
SIZES = [1, 2, 5, 10, 32, 64]
DTYPES = [np.float32]
PARAMS = itertools.product(SIZES, DTYPES)
@parameterized.parameters(*PARAMS)
def testSvd(self, n, dtype):
for batch_dims in [(), (3,)] + [(3, 2)] * (n < 10):
self._testSvdCorrectness(dtype, batch_dims + (n, n))
self._testSvdCorrectness(dtype, batch_dims + (2 * n, n))
self._testSvdCorrectness(dtype, batch_dims + (n, 2 * n))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/svd_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple LSTM layer with benchmarks.
This sets up a simple LSTM (Long Short Term Memory) layer, unrolled to a fixed
length sequence. The only deviation from standard LSTM cells is that
activations are clipped, inspired by the GNMT machine translation model.
The GNMT paper has more details: https://arxiv.org/abs/1609.08144
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
def Clip(x):
"""Clips x to the range [-1., 1.]."""
return math_ops.maximum(math_ops.minimum(x, 1.), -1.)
def LSTMCellWeightsShape(num_inputs, num_nodes):
"""Returns the shape of the weights for a single LSTM cell."""
# Dimension 0 accounts for combining x with the previous m state.
# Dimension 1 accounts for the in value and the (in, forget, out) gates.
return [num_inputs + num_nodes, 4 * num_nodes]
def LSTMCell(weights, m_prev, c_prev, x, pad):
"""Unrolls a single LSTM cell with clipped activations forward by one step.
Args:
weights: Weight matrix with shape LSTMCellWeightsShape.
m_prev: Previous m states with shape [batch_size, num_nodes].
c_prev: Previous c states with shape [batch_size, num_nodes].
x: Input with shape [batch_size, num_inputs].
pad: Padding with shape [batch_size, 1]. Each padding value is either
0 or 1, where 1 indicates padding; i.e. the input is shorter than the
sequence length, and the (m, c) states should simply be passed through
from the previous states.
Returns:
The next (m, c) states, each with shape [batch_size, num_nodes].
"""
# Apply weights to the input and previous hidden state.
# The matmul here is the "big" operation.
xm = array_ops.concat([x, m_prev], 1)
xmw = math_ops.matmul(xm, weights)
# Element-wise ops for the standard LSTM cell, with clipped activations.
# XLA can fuse these operations into a single loop.
in_value, in_gate, forget_gate, out_gate = array_ops.split(
value=xmw, num_or_size_splits=4, axis=1)
in_value = math_ops.tanh(in_value)
in_gate = math_ops.sigmoid(in_gate)
forget_gate = math_ops.sigmoid(forget_gate)
out_gate = math_ops.sigmoid(out_gate)
c_next = Clip(Clip(forget_gate * c_prev) + Clip(in_gate * in_value))
m_next = Clip(out_gate * c_next)
# Account for padding.
c_next = c_prev * pad + c_next * (1.0 - pad)
m_next = m_prev * pad + m_next * (1.0 - pad)
return m_next, c_next
def LSTMLayer(cell_name, weights, m, c, x_seq, pad_seq):
"""Unrolls a layer of LSTM cells forward by the sequence length.
The sequence length is determined by the length of x_seq and pad_seq, which
must be the same.
Args:
cell_name: Base name of each cell.
weights: Weight matrix with shape LSTMCellWeightsShape.
m: Initial m states with shape [batch_size, num_nodes].
c: Initial c states with shape [batch_size, num_nodes].
x_seq: List of inputs, each with shape [batch_size, num_inputs].
The length of the list is the sequence length.
pad_seq: List of paddings, each with shape [batch_size, 1].
The length of the list is the sequence length.
Each padding value is either 0 or 1, where 1 indicates padding;
i.e. the input is shorter than the sequence length.
Returns:
List of per-sequence-step outputs, each with shape [batch_size, num_nodes].
Raises:
ValueError: If len(x_seq) != len(pad_seq).
"""
if len(x_seq) != len(pad_seq):
raise ValueError('length of x_seq(%d) != pad_seq(%d)' %
(len(x_seq), len(pad_seq)))
out_seq = []
for seq in range(len(x_seq)):
with ops.name_scope('%s_%d' % (cell_name, seq)):
m, c = LSTMCell(weights, m, c, x_seq[seq], pad_seq[seq])
out_seq.append(array_ops.identity(m, name='out'))
return out_seq
def RandomVar(shape, name=None):
"""Returns a variable of the given shape initialized to random values."""
return variables.VariableV1(
random_ops.random_uniform(shape), dtype=dtypes.float32, name=name)
def RandomInputs(batch_size, seq_length, num_inputs):
"""Returns randomly initialized (x_seq, pad_seq) sequences."""
x_seq = []
pad_seq = []
with ops.name_scope('inputs'):
for seq in range(seq_length):
x_seq.append(RandomVar([batch_size, num_inputs], name='x_seq_%d' % seq))
# Real padding values are always a sequence of 0 followed by a
# sequence of 1, but random values are fine for benchmarking.
pad_seq.append(RandomVar([batch_size, 1], name='pad_seq_%d' % seq))
return x_seq, pad_seq
def BuildLSTMLayer(batch_size, seq_length, num_inputs, num_nodes):
"""Builds a single LSTM layer with random weights and inputs.
Args:
batch_size: Inputs are fed in batches of this size.
seq_length: The sequence length to unroll the LSTM layer.
num_inputs: Dimension of inputs that are fed into each LSTM cell.
num_nodes: The number of nodes in each LSTM cell.
Returns:
(out_seq, weights) pair. The out_seq is a list of per-sequence-step
outputs, each with shape [batch_size, num_nodes]. The weights are a list of
weight variables that may be trained.
"""
weights = RandomVar(
LSTMCellWeightsShape(num_inputs, num_nodes), name='weights')
m = array_ops.zeros([batch_size, num_nodes], name='init_m')
c = array_ops.zeros([batch_size, num_nodes], name='init_c')
x_seq, pad_seq = RandomInputs(batch_size, seq_length, num_inputs)
out_seq = LSTMLayer('lstm', weights, m, c, x_seq, pad_seq)
return out_seq, [weights]
|
tensorflow-master
|
tensorflow/compiler/tests/lstm.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the LSTM cell and layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import numpy as np
from tensorflow.compiler.tests import lstm
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _DumpGraph(graph, basename):
if FLAGS.dump_graph_dir:
name = os.path.join(FLAGS.dump_graph_dir, basename + '.pbtxt')
with open(name, 'w') as f:
f.write(str(graph.as_graph_def()))
def _Sigmoid(x):
return 1. / (1. + np.exp(-x))
def _Clip(x):
return np.maximum(np.minimum(x, 1.), -1.)
class LSTMTest(test.TestCase):
def setUp(self):
# The tests for a single LSTM cell and LSTM layer use these values as
# inputs. We always set the dimensionality of num_inputs=1; thus batch_size
# actually represents the different input cases.
self._inputs = np.array([[-1.], [-.5], [0.], [.5], [1.]], np.float32)
self._batch_size = len(self._inputs)
def _NextC(self, inputs, weight, m_prev, c_prev):
"""Returns the next c states of an LSTM cell."""
x = (inputs + m_prev) * weight
return _Clip(_Clip(_Sigmoid(x) * c_prev) + _Clip(_Sigmoid(x) * np.tanh(x)))
def _NextM(self, inputs, weight, m_prev, c_prev):
"""Returns the next m states of an LSTM cell."""
x = (inputs + m_prev) * weight
return _Clip(_Sigmoid(x) * self._NextC(inputs, weight, m_prev, c_prev))
def _RunLSTMCell(self, basename, init_weights, m_prev_scalar, c_prev_scalar,
pad_scalar):
with self.session() as sess:
num_inputs = 1
num_nodes = 1
weights = init_weights(lstm.LSTMCellWeightsShape(num_inputs, num_nodes))
m_prev = constant_op.constant([[m_prev_scalar]] * self._batch_size)
c_prev = constant_op.constant([[c_prev_scalar]] * self._batch_size)
x = constant_op.constant(self._inputs)
pad = constant_op.constant([[pad_scalar]] * self._batch_size)
m, c = lstm.LSTMCell(weights, m_prev, c_prev, x, pad)
_DumpGraph(sess.graph, 'lstm_cell_%s_%d_%d_%d' %
(basename, m_prev_scalar, c_prev_scalar, pad_scalar))
# Initialize variables and run the unrolled LSTM step.
self.evaluate(variables.global_variables_initializer())
return self.evaluate([m, c])
def testLSTMCell(self):
# Run with all-0 weights, no padding.
m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 0., 0., 0.)
self.assertAllClose(m, [[0.]] * self._batch_size)
self.assertAllClose(c, [[0.]] * self._batch_size)
m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 0., 1., 0.)
self.assertAllClose(m, [[.25]] * self._batch_size)
self.assertAllClose(c, [[.5]] * self._batch_size)
m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 1., 0., 0.)
self.assertAllClose(m, [[.0]] * self._batch_size)
self.assertAllClose(c, [[.0]] * self._batch_size)
m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 1., 1., 0.)
self.assertAllClose(m, [[.25]] * self._batch_size)
self.assertAllClose(c, [[.5]] * self._batch_size)
# Run with all-1 weights, no padding.
for m_prev in [0., 1.]:
for c_prev in [0., 1.]:
m, c = self._RunLSTMCell('ones',
init_ops.ones_initializer(), m_prev, c_prev,
0.)
self.assertAllClose(m, self._NextM(self._inputs, 1., m_prev, c_prev))
self.assertAllClose(c, self._NextC(self._inputs, 1., m_prev, c_prev))
# Run with random weights.
for weight in np.random.rand(3):
weight_tf = constant_op.constant(weight, dtypes.float32)
random_weight = lambda shape, w=weight_tf: array_ops.fill(shape, w)
# No padding.
for m_prev in [0., 1.]:
for c_prev in [0., 1.]:
m, c = self._RunLSTMCell('random', random_weight, m_prev, c_prev, 0.)
self.assertAllClose(m,
self._NextM(self._inputs, weight, m_prev, c_prev))
self.assertAllClose(c,
self._NextC(self._inputs, weight, m_prev, c_prev))
# Set padding.
for m_prev in [0., 1.]:
for c_prev in [0., 1.]:
m, c = self._RunLSTMCell('random', random_weight, m_prev, c_prev, 1.)
self.assertAllClose(m, [[m_prev]] * self._batch_size)
self.assertAllClose(c, [[c_prev]] * self._batch_size)
def testLSTMLayerErrors(self):
num_inputs = 1
num_nodes = 1
seq_length = 3
weights = array_ops.zeros(lstm.LSTMCellWeightsShape(num_inputs, num_nodes))
m = constant_op.constant([[0.]] * self._batch_size)
c = constant_op.constant([[0.]] * self._batch_size)
x_seq = [constant_op.constant(self._inputs)] * seq_length
pad = constant_op.constant([[0.]] * self._batch_size)
with self.assertRaisesWithPredicateMatch(ValueError, 'length of x_seq'):
lstm.LSTMLayer('lstm', weights, m, c, x_seq, [pad])
with self.assertRaisesWithPredicateMatch(ValueError, 'length of x_seq'):
lstm.LSTMLayer('lstm', weights, m, c, x_seq, [pad] * 2)
with self.assertRaisesWithPredicateMatch(ValueError, 'length of x_seq'):
lstm.LSTMLayer('lstm', weights, m, c, x_seq, [pad] * 4)
def _RunLSTMLayer(self, basename, init_weights, m_init_scalar, c_init_scalar,
pad_scalar):
with self.session() as sess:
num_inputs = 1
num_nodes = 1
seq_length = 3
weights = init_weights(lstm.LSTMCellWeightsShape(num_inputs, num_nodes))
m_init = constant_op.constant([[m_init_scalar]] * self._batch_size)
c_init = constant_op.constant([[c_init_scalar]] * self._batch_size)
x_seq = [constant_op.constant(self._inputs)] * seq_length
pad_seq = [constant_op.constant([[pad_scalar]] * self._batch_size)
] * seq_length
out_seq = lstm.LSTMLayer('lstm', weights, m_init, c_init, x_seq, pad_seq)
_DumpGraph(sess.graph, 'lstm_layer_%s_%d_%d_%d' %
(basename, m_init_scalar, c_init_scalar, pad_scalar))
# Initialize variables and run the unrolled LSTM layer.
self.evaluate(variables.global_variables_initializer())
return self.evaluate(out_seq)
def testLSTMLayer(self):
# Run with all-0 weights, no padding.
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 0., 0., 0.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 0., 1., 0.)
self.assertAllClose(o, [[[.25]] * self._batch_size,
[[.125]] * self._batch_size,
[[.0625]] * self._batch_size])
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 1., 0., 0.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 1., 1., 0.)
self.assertAllClose(o, [[[.25]] * self._batch_size,
[[.125]] * self._batch_size,
[[.0625]] * self._batch_size])
# Run with all-1 weights, no padding.
weight1 = 1.
for m_init in [0., 1.]:
for c_init in [0., 1.]:
o = self._RunLSTMLayer('ones',
init_ops.ones_initializer(), m_init, c_init, 0.)
m0 = self._NextM(self._inputs, weight1, m_init, c_init)
c0 = self._NextC(self._inputs, weight1, m_init, c_init)
self.assertAllClose(o[0], m0)
m1 = self._NextM(self._inputs, weight1, m0, c0)
c1 = self._NextC(self._inputs, weight1, m0, c0)
self.assertAllClose(o[1], m1)
m2 = self._NextM(self._inputs, weight1, m1, c1)
self.assertAllClose(o[2], m2)
# Run with random weights.
for weight in np.random.rand(3):
weight_tf = constant_op.constant(weight, dtypes.float32)
random_weight = lambda shape, w=weight_tf: array_ops.fill(shape, w)
# No padding.
for m_init in [0., 1.]:
for c_init in [0., 1.]:
o = self._RunLSTMLayer('random', random_weight, m_init, c_init, 0.)
m0 = self._NextM(self._inputs, weight, m_init, c_init)
c0 = self._NextC(self._inputs, weight, m_init, c_init)
self.assertAllClose(o[0], m0)
m1 = self._NextM(self._inputs, weight, m0, c0)
c1 = self._NextC(self._inputs, weight, m0, c0)
self.assertAllClose(o[1], m1)
m2 = self._NextM(self._inputs, weight, m1, c1)
self.assertAllClose(o[2], m2)
# Set padding.
o = self._RunLSTMLayer('random', random_weight, 0., 0., 1.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('random', random_weight, 0., 1., 1.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('random', random_weight, 1., 0., 1.)
self.assertAllClose(o, [[[1.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('random', random_weight, 1., 1., 1.)
self.assertAllClose(o, [[[1.]] * self._batch_size] * 3)
class LSTMBenchmark(test.Benchmark):
"""Mcro-benchmarks for a single layer of LSTM cells."""
def _LayerBuilder(self, do_training):
out_seq, weights = lstm.BuildLSTMLayer(FLAGS.batch_size, FLAGS.seq_length,
FLAGS.num_inputs, FLAGS.num_nodes)
name, fetches = ('lstm_layer_inference', out_seq)
if do_training:
# Not a real loss function, but good enough for benchmarking backprop.
loss = math_ops.reduce_sum(math_ops.add_n(out_seq))
dw = gradients_impl.gradients(loss, weights)
name, fetches = ('lstm_layer_training', dw)
_DumpGraph(ops.get_default_graph(),
'%s_%d_%d_%d_%d' % (name, FLAGS.batch_size, FLAGS.seq_length,
FLAGS.num_inputs, FLAGS.num_nodes))
return name, fetches
def benchmarkLayerInference(self):
xla_test.Benchmark(self, lambda: self._LayerBuilder(False), False,
FLAGS.device)
def benchmarkLayerInferenceXLA(self):
xla_test.Benchmark(self, lambda: self._LayerBuilder(False), True,
FLAGS.device)
def benchmarkLayerTraining(self):
xla_test.Benchmark(self, lambda: self._LayerBuilder(True), False,
FLAGS.device)
def benchmarkLayerTrainingXLA(self):
xla_test.Benchmark(self, lambda: self._LayerBuilder(True), True,
FLAGS.device)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--batch_size',
type=int,
default=128,
help="""\
Inputs are fed in batches of this size, for both inference and training.
Larger values cause the matmul in each LSTM cell to have higher
dimensionality.\
"""
)
parser.add_argument(
'--seq_length',
type=int,
default=60,
help="""\
Length of the unrolled sequence of LSTM cells in a layer.Larger values
cause more LSTM matmuls to be run.\
"""
)
parser.add_argument(
'--num_inputs',
type=int,
default=1024,
help='Dimension of inputs that are fed into each LSTM cell.'
)
parser.add_argument(
'--num_nodes',
type=int,
default=1024,
help='Number of nodes in each LSTM cell.'
)
parser.add_argument(
'--device',
type=str,
default='gpu',
help="""\
TensorFlow device to assign ops to, e.g. "gpu", "cpu". For details see
documentation for tf.Graph.device.\
"""
)
parser.add_argument(
'--dump_graph_dir',
type=str,
default='',
help='If non-empty, dump graphs in *.pbtxt format to this directory.'
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
test.main(argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/compiler/tests/lstm_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA Concat Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class ConcatTest(xla_test.XLATestCase):
def testHStack(self):
with self.session():
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
with self.test_scope():
c = array_ops.concat([p1, p2], 0)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:4, :], params[p1])
self.assertAllEqual(result[4:, :], params[p2])
def testVStack(self):
with self.session():
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
with self.test_scope():
c = array_ops.concat([p1, p2], 1)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:, :4], params[p1])
self.assertAllEqual(result[:, 4:], params[p2])
def testInt32(self):
with self.session():
p1 = np.random.rand(2, 3).astype("i")
p2 = np.random.rand(2, 3).astype("i")
x1 = constant_op.constant(p1)
x2 = constant_op.constant(p2)
with self.test_scope():
c = array_ops.concat([x1, x2], 0)
result = self.evaluate(c)
self.assertAllEqual(result[:2, :], p1)
self.assertAllEqual(result[2:, :], p2)
def _testRandom(self, dtype):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
# Random number of tensors, but always > 1.
num_tensors = np.random.randint(2, 10)
# Random dim to concat on
concat_dim = np.random.randint(5)
params = {}
if dtype == dtypes.bfloat16:
dtype_feed = dtypes.float32
else:
dtype_feed = dtype
with self.session():
p = []
for i in np.arange(num_tensors):
input_shape = shape
input_shape[concat_dim] = np.random.randint(1, 5)
placeholder = array_ops.placeholder(dtype_feed, shape=input_shape)
p.append(placeholder)
t = dtype_feed.as_numpy_dtype
params[placeholder] = np.random.rand(*input_shape).astype(t)
if dtype != dtype_feed:
concat_inputs = [math_ops.cast(p_i, dtype) for p_i in p]
else:
concat_inputs = p
with self.test_scope():
c = array_ops.concat(concat_inputs, concat_dim)
if dtype != dtype_feed:
c = math_ops.cast(c, dtype_feed)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
ind = [slice(0, params[p[i]].shape[j]) for j in np.arange(5)]
ind[concat_dim] = slice(cur_offset,
cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
if dtype == dtype_feed:
self.assertAllEqual(result[ind], params[p[i]])
else:
self.assertAllClose(result[ind], params[p[i]], 0.01)
def testRandom(self):
self._testRandom(dtypes.float32)
self._testRandom(dtypes.int32)
def _testGradientsSimple(self):
with self.session():
inp = []
inp_tensors = []
with self.test_scope():
for x in [1, 2, 6]:
shape = [10, x, 2]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 1)
output_shape = [10, 9, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 1)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
def testGradientsSimpleAll(self):
self._testGradientsSimple()
def _testGradientsFirstDim(self):
with self.session():
inp = []
inp_tensors = []
with self.test_scope():
for x in [1, 2, 6]:
shape = [x, 10, 2]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 0)
output_shape = [9, 10, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 0)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
def testGradientsFirstDimAll(self):
self._testGradientsFirstDim()
def _testGradientsLastDim(self):
with self.session():
inp = []
inp_tensors = []
with self.test_scope():
for x in [1, 2, 6]:
shape = [10, 2, x]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 2)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
def testGradientsLastDimAll(self):
self._testGradientsLastDim()
def _RunAndVerifyGradientsRandom(self):
# Random dims of rank 5
input_shape = np.random.randint(1, 5, size=5)
# Random number of tensors
num_tensors = np.random.randint(1, 10)
# Random dim to concat on
concat_dim = np.random.randint(5)
concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)
with self.session():
inp = []
inp_tensors = []
with self.test_scope():
for x in concat_dim_sizes:
shape = input_shape
shape[concat_dim] = x
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, concat_dim)
output_shape = input_shape
output_shape[concat_dim] = concat_dim_sizes.sum()
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, concat_dim)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
def testGradientsRandom(self):
for _ in range(5):
self._RunAndVerifyGradientsRandom()
# Re-enable once zero-element Retvals are handled correctly.
def DISABLED_testZeroSize(self):
# Verify that concat doesn't crash and burn for zero size inputs
np.random.seed(7)
with self.session():
with self.test_scope():
for shape0 in (), (2,):
axis = len(shape0)
for shape1 in (), (3,):
for n0 in 0, 1, 2:
for n1 in 0, 1, 2:
x0 = np.random.randn(*(shape0 + (n0,) + shape1))
x1 = np.random.randn(*(shape0 + (n1,) + shape1))
correct = np.concatenate([x0, x1], axis=axis)
# TODO(irving): Make tf.concat handle map, then drop list().
xs = list(map(constant_op.constant, [x0, x1]))
c = array_ops.concat(xs, axis)
self.assertAllEqual(c.eval(), correct)
# Check gradients
dc = np.random.randn(*c.get_shape().as_list())
dxs = self.evaluate(gradients_impl.gradients(c, xs, dc))
self.assertAllEqual(dc, np.concatenate(dxs, axis=axis))
def testConcatTuple(self):
c1 = np.random.rand(4, 4).astype(np.float32)
c2 = np.random.rand(4, 4).astype(np.float32)
with self.session():
with self.test_scope():
concat_list_t = array_ops.concat([c1, c2], 0)
concat_tuple_t = array_ops.concat((c1, c2), 0)
self.assertAllEqual(concat_list_t.eval(), self.evaluate(concat_tuple_t))
def testConcatNoScalars(self):
with self.session():
with self.test_scope():
scalar = constant_op.constant(7)
dim = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegexp(
ValueError, r"Can't concatenate scalars \(use tf\.stack instead\)"):
array_ops.concat([scalar, scalar, scalar], dim)
# The purpose of this is to ensure that XLA on GPU will not run out of memory
# with too many arguments.
def testConcatLargeNumberOfTensors(self):
if "CPU" in self.device:
self.skipTest("This test can time out on CPU, so we will just allow "
"other backends to catch this specific error.")
with self.session():
with self.test_scope():
for concat_dim in range(2):
params = {}
p = []
shape = np.array([7, 13])
num_tensors = 1001
for i in np.arange(num_tensors):
input_shape = shape
placeholder = array_ops.placeholder(
dtypes.float32, shape=input_shape)
p.append(placeholder)
params[placeholder] = np.random.rand(*input_shape).astype(
np.float32)
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
index = [slice(0, params[p[i]].shape[j]) for j in np.arange(2)]
index[concat_dim] = slice(
cur_offset, cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
self.assertAllEqual(result[index], params[p[i]])
class ConcatOffsetTest(xla_test.XLATestCase):
def testBasic(self):
with self.session():
with self.test_scope():
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = self.evaluate(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
class PackTest(xla_test.XLATestCase):
def testBasic(self):
with self.session():
with self.test_scope():
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
packed = array_ops.stack([s0, s1, s2])
ans = self.evaluate(packed)
self.assertAllEqual(ans, [[2, 3, 5], [2, 7, 5], [2, 20, 5]])
def testScalars(self):
with self.session():
with self.test_scope():
s0 = constant_op.constant(2, dtypes.int32)
s1 = constant_op.constant(3, dtypes.int32)
s2 = constant_op.constant(5, dtypes.int32)
packed = array_ops.stack([s0, s1, s2])
ans = self.evaluate(packed)
self.assertAllEqual(ans, [2, 3, 5])
def testEmpty(self):
with self.session():
with self.test_scope():
s0 = constant_op.constant([[]], dtypes.int32)
s1 = constant_op.constant([[]], dtypes.int32)
s2 = constant_op.constant([[]], dtypes.int32)
packed = array_ops.stack([s0, s1, s2])
ans = self.evaluate(packed)
self.assertAllEqual(ans, [[[]], [[]], [[]]])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/concat_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA TensorArray Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.compiler.xla import xla
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _make_converter(dtype):
def _converter(x):
return np.asarray(x).astype(dtype.as_numpy_dtype)
return _converter
# This lets me define `fn` repeatedly to pass to xla.compile.
#
# pylint: disable=function-redefined
@test_util.with_control_flow_v2
class TensorArrayTest(xla_test.XLATestCase):
@test_util.disable_control_flow_v2("Tries to evaluate flow")
def testTensorArrayWriteRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0, 3.0]])
w2 = w1.write(2, [[7.0, -8.5]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
flow = w2.flow
return [r0, r1, r2, flow]
d0, d1, d2, flow_val = self.evaluate(xla.compile(fn))
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0, 3.0]], d1)
self.assertAllEqual([[7.0, -8.5]], d2)
self.assertAllEqual([], flow_val.shape)
def _testTensorArrayWritePack(self, tf_dtype):
with self.session(), self.test_scope():
convert = _make_converter(tf_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
return w2.stack()
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]),
self.evaluate(xla.compile(fn)[0]))
def testTensorArrayWritePack(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWritePack(dtype)
def testEmptyTensorArrayPack(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
empty_element = np.zeros((0, 1), dtype=np.float32)
w0 = ta.write(0, empty_element)
w1 = w0.write(1, empty_element)
w2 = w1.write(2, empty_element)
return w2.stack()
self.assertAllEqual([3, 0, 1], self.evaluate(xla.compile(fn)[0]).shape)
def _testTensorArrayWriteConcat(self, tf_dtype):
with self.session(), self.test_scope():
convert = _make_converter(tf_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0], [204.0, 205.0]]))
return w2.concat()
self.assertAllEqual(
convert([[4.0, 5.0], [104.0, 105.0], [6.0, 7.0], [106.0, 107.0],
[8.0, 9.0], [204.0, 205.0]]),
self.evaluate(xla.compile(fn)[0]))
@test_util.disable_control_flow_v2("b/122315751 (concat)")
def testTensorArrayWriteConcat(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWriteConcat(dtype)
def _testTensorArrayUnpackRead(self, tf_dtype):
with self.session() as session, self.test_scope():
convert = _make_converter(tf_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Unpack a vector into scalars
w0 = ta.unstack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Unpack a matrix into vectors.
w1 = ta.unstack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([1.0, 1.1]), d0)
self.assertAllEqual(convert([2.0, 2.1]), d1)
self.assertAllEqual(convert([3.0, 3.1]), d2)
def fn():
# Reset ta because we're going to change the shape, else shape
# inference will throw an error.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Try unpacking an empty matrix, which should not cause an error.
w2 = ta.unstack(convert([[], [], []]))
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def _testTensorArrayUnpackReadMaybeLegacy(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayUnpackRead(dtype)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy()
def _testTensorArraySplitRead(self, tf_dtype):
with self.session() as session, self.test_scope():
convert = _make_converter(tf_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Split an empty vector.
lengths = constant_op.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def fn():
# Split a vector.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
lengths = constant_op.constant([1, 1, 1])
w0 = ta.split(convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([1.0]), d0)
self.assertAllEqual(convert([2.0]), d1)
self.assertAllEqual(convert([3.0]), d2)
def fn():
# Split a matrix.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
lengths = constant_op.constant([1, 1, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]),
lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([[1.0, 101.0]]), d0)
self.assertAllEqual(convert([[2.0, 201.0]]), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
@test_util.disable_control_flow_v2("b/122315872 (split)")
def testTensorArraySplitRead(self):
for dtype in self.numeric_tf_types:
self._testTensorArraySplitRead(dtype)
@test_util.disable_control_flow_v2("TensorArray.grad is not supported in v2")
def testTensorGradArrayWriteRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, [[-3.0]])
g_ta = w2.grad("grad")
g_w0 = g_ta.write(0, [[5.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, [[-2.0]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
return [r0, r1, r2, g_r0, g_r1, g_r2]
d0, d1, d2, g_d0, g_d1, g_d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual([[4.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual([[-3.0]], d2)
self.assertAllEqual([[5.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual([[-2.0]], g_d2)
@test_util.disable_control_flow_v2("TensorArray.grad is not supported in v2")
def testTensorGradArrayDynamicWriteRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, [[-3.0]])
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, [[-2.0]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
return [r0, r1, r2, g_r0, g_r1, g_r2, s, g_s]
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = self.evaluate(xla.compile(fn))
self.assertAllEqual([[4.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual([[-3.0]], d2)
self.assertAllEqual([[5.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual([[-2.0]], g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
@test_util.disable_control_flow_v2("TensorArray.grad is not supported in v2")
def testTensorGradAccessTwiceReceiveSameObject(self):
with self.session() as session, self.test_scope():
ta_out = {}
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
element_shape=[1, 2])
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
ta_out[0] = g_ta_0.handle
ta_out[1] = g_ta_1.handle
with ops.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
with ops.control_dependencies([g_ta_0.handle.op, g_ta_1.handle.op]):
return [r1_0]
[d_r1_0] = self.evaluate(xla.compile(fn))
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
# Can't assert this because adding a side output like we have here fails
# as follows:
#
# ValueError: Operation u'TensorArrayGrad/TensorArrayGradV3' has been
# marked as not fetchable.
#
# On the other hand, legitimately returning the handle from the
# xla.compile function fails because we don't support DT_RESOURCE outputs
# from XLA clusters.
#
# self.assertAllEqual(ta_out[0], ta_out[1])
@test_util.disable_control_flow_v2("b/124334470")
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
return ta.write(-1, constant_op.constant(7)).flow
# Test writing the wrong datatype.
# TODO(b/129870929): Remove InvalidArgumentError/second regexp after all
# callers provide proper init dtype.
with self.assertRaisesRegexp(
(ValueError, errors.InvalidArgumentError),
r"("
r"conversion requested dtype float32 for Tensor with dtype int32"
r"|"
r"TensorArray dtype is float but op has dtype int32"
r")"):
xla.compile(fn)[0].eval()
@test_util.disable_control_flow_v2("b/124334096 verify dtype")
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
# Find two different floating point types, create an array of
# the first type, but try to read the other type.
if len(self.float_types) > 1:
dtype1, dtype2 = list(self.float_types)[:2]
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtype1, tensor_array_name="foo", size=3)
w0 = ta.write(0, math_ops.cast([[4.0, 5.0]], dtype1))
# Test reading wrong datatype.
return gen_data_flow_ops.tensor_array_read_v3(
handle=w0.handle, index=0, dtype=dtype2, flow_in=w0.flow)
with self.assertRaisesOpError("TensorArray dtype is "):
self.evaluate(xla.compile(fn))
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtype1, tensor_array_name="foo", size=3)
w0 = ta.write(0, math_ops.cast([[4.0, 5.0]], dtype1))
# Test reading from a different index than the one we wrote to
with ops.control_dependencies([w0.read(1)]):
return 1.0
xla.compile(fn)[0].eval()
@test_util.disable_control_flow_v2("b/122315872 (split)")
def testTensorArraySplitIncompatibleShapesFails(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
return ta.split([1.0, 2.0, 3.0], 1).flow
with self.assertRaisesWithPredicateMatch(
ValueError, r"Shape must be rank 1 but is rank 0"):
xla.compile(fn)[0].eval()
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
return ta.split([1.0, 2.0, 3.0], [1, 2, 3]).flow
with self.assertRaisesOpError(
r"lengths must be equal: 1 vs. 2"):
xla.compile(fn)[0].eval()
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
return ta.split(1.0, [1]).flow
with self.assertRaisesOpError(
r"value must have rank >= 1"):
xla.compile(fn)[0].eval()
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
infer_shape=False)
return ta.split([1.0], [1]).flow
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(1 vs. 2\)"):
xla.compile(fn)[0].eval()
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.session(), self.test_scope():
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
ta_grad = w1.grad("grad")
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
return w2_grad.read(2)
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), xla.compile(fn)[0].eval())
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
ta_grad = w1.grad("grad")
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
return wb1_grad.flow
with self.assertRaisesOpError(
r"Mismatched TensorArray sizes"):
xla.compile(fn)[0].eval()
@test_util.disable_control_flow_v2("TensorArray.grad is not supported in v2")
def testTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
def testMultiTensorArray(self):
with self.session(), self.test_scope():
def fn():
h1 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
return r1 + r2
self.assertAllClose(9.0, self.evaluate(xla.compile(fn)[0]))
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.session() as session, self.test_scope():
c = lambda x: np.array(x, dtype=dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.as_dtype(dtype),
tensor_array_name="foo",
size=3,
infer_shape=False)
value_0 = constant_op.constant(c([[4.0, 5.0]]))
value_1 = constant_op.constant(c([[3.0, 3.5]]))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = gradients_impl.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_r0_r0_2 = gradients_impl.gradients(
ys=[r0, r0_2],
xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_just_r1 = gradients_impl.gradients(
ys=[r1], xs=[value_1], grad_ys=[c([[-2.0, -4.0]])])
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r0, r0_2, r1],
xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]),
c([[1.0, -1.0]]),
c([[-2.0, -10.0]])])
return [grad_just_r0, grad_r0_r0_2, grad_just_r1, grad]
[grad_just_r0_vals, grad_r0_r0_2_vals, grad_just_r1_vals,
grad_vals] = self.evaluate(xla.compile(fn))
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
self.assertAllEqual(c([[-2.0, -4.0]]), grad_just_r1_vals[0])
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c([[-2.0, -10.0]]), grad_vals[1])
def testTensorArrayGradientWriteRead(self):
for dtype in self.float_types:
self._testTensorArrayGradientWriteReadType(dtype)
for dtype in self.complex_types:
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self):
with self.session() as sess, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value_0 = constant_op.constant([-1.0, 1.0])
value_1 = constant_op.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.stack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat().
with ops.control_dependencies([p0, r0, s0]):
return gradients_impl.gradients(
ys=[p0, r0, s0],
xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # stack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0], # concat gradient
])
grad_vals = self.evaluate(xla.compile(fn)) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
@test_util.disable_control_flow_v2("b/122315751 (concat)")
def testTensorArrayGradientWritePackConcatAndRead(self):
self._testTensorArrayGradientWritePackConcatAndRead()
def testTensorArrayReadTwice(self):
with self.session(), self.test_scope():
def fn():
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readtwice = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unstack(value)
r0_readtwice = w_readtwice.read(0)
with ops.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
return [r0_readtwice, r1_readtwice]
self.assertAllEqual([1.0, -1.0], self.evaluate(xla.compile(fn))[0])
def _testTensorArrayGradientUnpackRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0).
return gradients_impl.gradients(
ys=[r0, r0_1, r1],
xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = self.evaluate(xla.compile(fn))
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientUnpackRead(self):
self._testTensorArrayGradientUnpackRead()
@test_util.disable_control_flow_v2("b/122315751(concat), b/122315872(split)")
def testTensorArrayGradientSplitConcat(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0],
[100.0, -100.0], [1000.0, -1000.0]])
w = ta.split(value, [2, 2])
r = w.concat()
# Test combined gradients
return gradients_impl.gradients(
ys=[r],
xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0],
[2000.0, -2000.0]]])
grad_vals = self.evaluate(xla.compile(fn))
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, -2.0], [20.0, -20.0], [200.0, -200.0],
[2000.0, -2000.0]],
grad_vals[0])
def testCloseTensorArray(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
with ops.control_dependencies([ta.close()]):
return 1.0
self.evaluate(xla.compile(fn)[0])
def testSizeTensorArray(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
return ta.size()
self.assertAllEqual(3, self.evaluate(xla.compile(fn))[0])
def testWriteCloseTensorArray(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[3.0, 1.0]])
with ops.control_dependencies([w1.close()]):
return 1.0
self.evaluate(xla.compile(fn))
# TODO(phawkins): implement while loops.
# def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
# np_dtype = dtype.as_numpy_dtype
# with self.session() as session, self.test_scope():
# v0 = array_ops.identity(np.arange(3 * 5, dtype=np_dtype).reshape(3, 5))
# var = variables.Variable(np.arange(100, 105, dtype=np_dtype))
# state0 = array_ops.identity(np.array([1] * 5, dtype=np_dtype))
# ta = tensor_array_ops.TensorArray(
# dtype=dtype,
# tensor_array_name="foo",
# size=0 if dynamic_size else 3,
# dynamic_size=dynamic_size)
# time_0 = array_ops.identity(0)
# def body(time, ta_t, state):
# sliced = array_ops.slice(
# v0, begin=array_ops.stack([time, 0]), size=[1, -1])
# sliced = array_ops.squeeze(sliced)
# out = sliced + var + state
# state += sliced
# ta_t = ta_t.write(time, out)
# return (time + 1, ta_t, state)
# (unused_0, h_final, unused_2) = control_flow_ops.while_loop(
# cond=lambda time, unused_1, unused_2: time < 3,
# body=body,
# loop_vars=(time_0, ta, state0),
# shape_invariants=(time_0.get_shape(), tensor_shape.unknown_shape(),
# tensor_shape.unknown_shape()),
# parallel_iterations=3)
# vout = h_final.stack()
# grad_val = -np.arange(3 * 5, dtype=np_dtype).reshape(3, 5)
# v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]
# state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
# var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
# variables.global_variables_initializer().run()
# state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
# self.evaluate([state0, var, v0, vout, v0_grad, var_grad, state0_grad])
# )
# just_v0_grad_t, = self.evaluate([v0_grad])
# # state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# # vout = [ v0[0] + var + state[0] |
# # v0[1] + var + state[1] |
# # v0[2] + var + state[2] ]
# # = [ v0[0] + var + state0 |
# # v0[1] + var + state0 + v0[0] |
# # v0[2] + var + state0 + v0[0] + v0[1] ]
# #
# # d(vout[0])/d(v0) = [1 | 0 | 0 ]
# # d(vout[1])/d(v0) = [1 | 1 | 0 ]
# # d(vout[2])/d(v0) = [1 | 1 | 1 ]
# # d(vout)/d(var) = [1 | 1 | 1]
# # d(vout)/d(state0) = [ 1 | 1 | 1 ]
# state_per_time = np.array(
# [state0_t, state0_t + v0_t[0, :],
# state0_t + v0_t[0, :] + v0_t[1, :]])
# # Compare forward prop
# self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# # Compare backward prop
# expected_v0_grad_t = np.array([
# grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
# grad_val[1, :] + grad_val[2, :], grad_val[2, :]
# ])
# self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
# self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
# self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
# self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
# def testWhileLoopWritePackGradients(self):
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=dtypes.float32)
# # TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# # self._testWhileLoopWritePackGradients(
# # dynamic_size=False, dtype=tf.int64)
# def testWhileLoopDynamicWritePackGradients(self):
# self._testWhileLoopWritePackGradients(
# dynamic_size=True, dtype=dtypes.float32)
# def testGradSerialTwoLoops(self):
# with self.session(), self.test_scope():
# num_steps = 100
# acc = tensor_array_ops.TensorArray(
# dtype=dtypes.float32,
# size=num_steps,
# clear_after_read=False,
# element_shape=tensor_shape.scalar())
# i = constant_op.constant(0, name="i")
# x = constant_op.constant(2.0, name="x")
# c = lambda i, acc: i < 5
# def b(i, acc):
# x1 = control_flow_ops.cond(
# math_ops.equal(i, 0), lambda: x,
# lambda: math_ops.multiply(acc.read(i - 1), 2.0))
# return i + 1, acc.write(i, x1)
# i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])
# z = constant_op.constant(0.0)
# def fn(i, acc):
# return i + 1, acc.write(i, z)
# _, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,
# [i1, acc1])
# r = acc2.stack()
# grad = gradients_impl.gradients(r, [x])[0]
# self.assertAllClose(31.0, self.evaluate(grad))
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.session() as session, self.test_scope():
g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
def fn():
a = array_ops.identity(
np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
b = array_ops.identity(
np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (
ta.read(0, name="read_a_0") + # a + b
ta.read(1, name="read_b_0"))
grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
return [grad_a, grad_b]
grad_a, grad_b = xla.compile(fn)
# Test gradients calculated individually
grad_a_t, = self.evaluate([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = self.evaluate([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly.
joint_grad_a_t, joint_grad_b_t = self.evaluate([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def testWriteShape(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c0 = constant_op.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
return [c0, r0]
c0, r0 = xla.compile(fn)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = constant_op.constant([6.0, 7.0])
w0 = ta.write(0, c0)
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
return [r0, c1, r1]
[r0, c1, r1] = xla.compile(fn)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, c0)
c2 = constant_op.constant([4.0, 5.0, 6.0])
return w0.write(0, c2).flow
with self.assertRaises(ValueError):
self.evaluate(xla.compile(fn))
def _testGradientWhenNotAllComponentsRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
x = constant_op.constant([2.0, 3.0])
w = ta.unstack(x)
r0 = w.read(0)
# Calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
return gradients_impl.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = self.evaluate(xla.compile(fn))[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
def testGradientWhenNotAllComponentsRead(self):
self._testGradientWhenNotAllComponentsRead()
def _testTensorArrayEvalEmpty(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, infer_shape=False)
return ta.stack()
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError, "Uninitialized TensorArray passed to "
"TensorArrayStack/TensorArrayGatherV3"):
xla.compile(fn)[0].eval()
@test_util.disable_control_flow_v2("b/124335246")
def testTensorArrayEvalEmpty(self):
self._testTensorArrayEvalEmpty()
def _testTensorArrayEvalEmptyWithDefault(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, infer_shape=True)
size = ta.size()
ta = ta.unstack(array_ops.zeros([0, 3, 5]))
return [size, ta.stack()]
[size, stack] = self.evaluate(xla.compile(fn))
self.assertEqual(0, size)
self.assertAllEqual([0, 3, 5], stack.shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, infer_shape=True)
ta = ta.unstack(array_ops.zeros([0, 3, 5]))
return ta.concat()
# TODO(b/122315751): Enable this.
self.assertAllEqual([0, 5], self.evaluate(xla.compile(fn))[0].shape)
def testTensorArrayEvalEmptyWithDefault(self):
self._testTensorArrayEvalEmptyWithDefault()
def _testTensorArrayScatterRead(self, tf_dtype):
with self.session() as session, self.test_scope():
convert = _make_converter(tf_dtype)
id0 = array_ops.placeholder(dtypes.int32)
id1 = array_ops.placeholder(dtypes.int32)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=10)
indices = constant_op.constant([1, 8])
value = constant_op.constant(convert([[1.0, -1.0], [10.0, -10.0]]))
w = ta.scatter(indices, value)
r0 = w.read(id0)
r1 = w.read(id1)
return [r0, r1]
# Test aggregation of read
read_vals = session.run(xla.compile(fn), feed_dict={id0: 1, id1: 8})
self.assertAllEqual(convert([1.0, -1.0]), read_vals[0])
self.assertAllEqual(convert([10.0, -10.0]), read_vals[1])
@test_util.disable_control_flow_v2("b/122315734 (scatter)")
def testTensorArrayScatterRead(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayScatterRead(dtype)
self._testTensorArrayScatterRead(dtypes.bool)
@test_util.disable_control_flow_v2("b/122315734 (scatter)")
def testTensorArrayScatterReadAndGradients(self):
with self.session() as session, self.test_scope():
id0 = array_ops.placeholder(dtypes.int32)
id1 = array_ops.placeholder(dtypes.int32)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=10)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(id0)
r1 = w.read(id1)
# Test combined gradients + aggregation of read(0).
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
return [[r0, r1], grad]
read_vals, grad_vals = session.run(
xla.compile(fn), feed_dict={
id0: 1,
id1: 8
})
self.assertEqual(len(read_vals), 2)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([1.0, -1.0], read_vals[0])
self.assertAllEqual([10.0, -10.0], read_vals[1])
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
@test_util.disable_control_flow_v2("b/122315378 (gather)")
def testTensorArrayWriteGatherAndGradients(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=10)
values = constant_op.constant([[1.0 * x, -1.0 * x] for x in range(10)])
indices = constant_op.constant([1, 8])
w = ta.unstack(values)
g = w.gather(indices)
# Test combined gradients + aggregation of read(0).
grad = gradients_impl.gradients(
ys=[g], xs=[values], grad_ys=[[[2.0, 3.0], [4.0, 5.0]]])
return [[g], grad]
g_vals, grad_vals = self.evaluate(xla.compile(fn))
# Gradients for 8 of the 10 unread components are zero.
expected_grad = np.zeros((10, 2))
expected_grad[1] = [2.0, 3.0]
expected_grad[8] = [4.0, 5.0]
self.assertEqual(len(g_vals), 1)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[1.0, -1.0], [8.0, -8.0]], g_vals[0])
self.assertAllEqual(expected_grad, grad_vals[0])
def testTensorArrayIdentity(self):
with self.session() as session, self.test_scope():
tensor_arrays = {}
v0 = resource_variable_ops.ResourceVariable(0.0)
v1 = resource_variable_ops.ResourceVariable(0.0)
def fn():
ta0 = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, infer_shape=False)
ta1 = tensor_array_ops.TensorArray(
dtype=dtypes.int32, size=4, infer_shape=True)
ta0 = ta0.write(0, 0.)
ta1 = ta1.write(0, 1)
with ops.control_dependencies([v0.assign_add(1.0)]):
ta0 = ta0.identity()
with ops.control_dependencies([v1.assign_add(1.0)]):
ta1 = ta1.identity()
read0 = ta0.read(0)
read1 = ta1.read(0)
size0 = ta0.size()
size1 = ta1.size()
tensor_arrays[0] = ta0
tensor_arrays[1] = ta1
return [read0, read1, size0, size1, v0, v1]
variables.global_variables_initializer().run()
read0_v, read1_v, size0_v, size1_v, v0, v1 = self.evaluate(
xla.compile(fn))
# Tests correct properties on new TensorArrays.
self.assertEqual(dtypes.float32, tensor_arrays[0].dtype)
self.assertEqual(dtypes.int32, tensor_arrays[1].dtype)
# Tests that the control dependencies was added and executed.
self.assertEqual(1.0, v0)
self.assertEqual(1.0, v1)
# Tests correct TensorArray.
self.assertEqual(read0_v, 0)
self.assertEqual(read1_v, 1)
self.assertEqual(size0_v, 2)
self.assertEqual(size1_v, 4)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/tensor_array_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import math
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
def GenerateNumpyRandomRGB(shape):
# Only generate floating points that are fractions like n / 256, since they
# are RGB pixels. Some low-precision floating point types in this test can't
# handle arbitrary precision floating points well.
return np.random.randint(0, 256, shape) / 256.
class RGBToHSVTest(xla_test.XLATestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in self.float_types:
inp = GenerateNumpyRandomRGB(shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.session() as sess:
batch0 = array_ops.placeholder(nptype, shape=shape)
with self.test_scope():
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
with self.test_scope():
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2],
{batch0: inp})
# Verify that processing batch elements together is the same as separate
self.assertAllCloseAccordingToType(batch1, join1, half_rtol=0.000002)
self.assertAllCloseAccordingToType(batch2, join2, half_rtol=0.000002)
self.assertAllCloseAccordingToType(
batch2, inp, bfloat16_atol=0.03, half_rtol=0.02)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in self.float_types:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv = image_ops.rgb_to_hsv(placeholder)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = rgb.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(rgb_tf, rgb_np, bfloat16_atol=0.03)
def testRGBToHSVNumpy(self):
"""Tests the RGB to HSV conversion matches a reference implementation."""
for nptype in self.float_types:
rgb_flat = GenerateNumpyRandomRGB((64, 3)).astype(nptype)
rgb_np = rgb_flat.reshape(4, 4, 4, 3)
hsv_np = np.array([
colorsys.rgb_to_hsv(
r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))
for r, g, b in rgb_flat
])
hsv_np = hsv_np.reshape(4, 4, 4, 3)
with self.session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv_op = image_ops.rgb_to_hsv(placeholder)
hsv_tf = hsv_op.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(hsv_tf, hsv_np)
class AdjustContrastTest(xla_test.XLATestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_np.shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = image_ops.adjust_contrast(flt_x, contrast_factor)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllClose(y_tf, y_np, 1e-6)
def testFloatContrast(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testBatchContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.session():
x = array_ops.placeholder(np.float32)
with self.test_scope():
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval({x: x_np})
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
class AdjustHueTest(xla_test.XLATestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.session():
x = array_ops.placeholder(dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(x, delta_h)
y_tf = y.eval({x: x_np})
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-4)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class AdjustSaturationTest(xla_test.XLATestCase):
def _adjust_saturation(self, image, saturation_factor):
image = ops.convert_to_tensor(image, name="image")
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
with self.test_scope():
saturation_adjusted_image = gen_image_ops.adjust_saturation(
flt_image, saturation_factor)
return image_ops.convert_image_dtype(saturation_adjusted_image, orig_dtype)
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_rgb_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_rgb_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_rgb_data, dtype=np.uint8).reshape(x_shape)
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
x = array_ops.placeholder(dtypes.float32, shape=x_shape)
with self.test_scope():
y_fused = self._adjust_saturation(x,
scale).eval(feed_dict={x: x_np})
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class ResizeNearestNeighborTest(xla_test.XLATestCase):
# TODO(ilch): Wrap each test with `for dtype in self.float_types:`
# Some work to understand how that should be done was presented here:
# cl/227850213
def _assertForwardOpMatchesExpected(self,
image_np,
target_shape,
expected=None,
large_tolerance=False,
align_corners=True):
if expected is None:
self.fail("expected must be specified")
with self.session() as sess, self.test_scope():
image = array_ops.placeholder(image_np.dtype)
resized = gen_image_ops.resize_nearest_neighbor(
image, target_shape, align_corners=align_corners)
out = sess.run(resized, {image: image_np[np.newaxis, :, :, np.newaxis]})
if large_tolerance:
self.assertAllClose(
expected[np.newaxis, :, :, np.newaxis], out, rtol=2e-4, atol=2e-4)
else:
self.assertAllClose(expected[np.newaxis, :, :, np.newaxis], out)
def testAlignCorners2x2To1x1(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=np.float32), [1, 1],
expected=np.array([[1]], dtype=np.float32))
def testAlignCorners1x1To2x2(self):
self._assertForwardOpMatchesExpected(
np.array([[1]], dtype=np.float32), [2, 2],
expected=np.array([[1, 1], [1, 1]], dtype=np.float32))
def testAlignCorners1x1To3x3(self):
self._assertForwardOpMatchesExpected(
np.array([[1]], dtype=np.float32), [3, 3],
expected=np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.float32))
def testAlignCorners2x2To3x3(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=np.float32), [3, 3],
expected=np.array([[1, 2, 2], [3, 4, 4], [3, 4, 4]], dtype=np.float32))
def testAlignCorners2x2To4x4(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=np.float32), [4, 4],
expected=np.array(
[[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]],
dtype=np.float32), large_tolerance=True)
def testAlignCorners3x3To2x2(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [2, 2],
expected=np.array([[1, 3], [7, 9]], dtype=np.float32))
def testAlignCorners4x4To3x3(self):
self._assertForwardOpMatchesExpected(
np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]],
dtype=np.float32), [3, 3],
expected=np.array([[1, 3, 4], [9, 11, 12], [13, 15, 16]],
dtype=np.float32))
def testAlignCorners3x3To4x4(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [4, 4],
expected=np.array(
[[1, 2, 2, 3], [4, 5, 5, 6], [4, 5, 5, 6], [7, 8, 8, 9]],
dtype=np.float32))
def testAlignCorners3x3To6x6(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [6, 6],
expected=np.array(
[[1, 1, 2, 2, 3, 3], [1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6],
[4, 4, 5, 5, 6, 6], [7, 7, 8, 8, 9, 9], [7, 7, 8, 8, 9, 9]],
dtype=np.float32))
def testAlignCorners3x3To9x9(self):
# The expected matrix might look uneven in terms of how many of each number
# there is, but this is an artifact of doing the dilation and convolution
# iteratively. The behavior is less esoteric in the 3x3To12x12 case below.
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [9, 9],
expected=np.array(
[[1, 1, 2, 2, 2, 2, 3, 3, 3], [1, 1, 2, 2, 2, 2, 3, 3, 3],
[4, 4, 5, 5, 5, 5, 6, 6, 6], [4, 4, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 5, 5, 5, 5, 6, 6, 6], [4, 4, 5, 5, 5, 5, 6, 6, 6],
[7, 7, 8, 8, 8, 8, 9, 9, 9], [7, 7, 8, 8, 8, 8, 9, 9, 9],
[7, 7, 8, 8, 8, 8, 9, 9, 9]],
dtype=np.float32))
def testAlignCorners3x3To12x12(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [12, 12],
expected=np.array([[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9]],
dtype=np.float32))
class ResizeBilinearTest(parameterized.TestCase, xla_test.XLATestCase):
def _assertForwardOpMatchesExpected(self,
image_np,
target_shape,
expected=None,
large_tolerance=False,
align_corners=True):
if expected is None:
self.fail("expected must be specified")
with self.session() as sess, self.test_scope():
image = array_ops.placeholder(image_np.dtype)
resized = gen_image_ops.resize_bilinear(
image, target_shape, align_corners=align_corners)
out = sess.run(resized, {image: image_np[np.newaxis, :, :, np.newaxis]})
if large_tolerance:
self.assertAllClose(
expected[np.newaxis, :, :, np.newaxis], out, rtol=0.1, atol=0.01)
else:
self.assertAllClose(expected[np.newaxis, :, :, np.newaxis], out)
@parameterized.named_parameters(
("1x2To3x3", 1, 2, 3, 3),
("2x2To1x1", 2, 2, 1, 1),
("2x2To3x3", 2, 2, 3, 3),
("3x3To2x2", 3, 3, 2, 2),
("4x4To3x3", 4, 4, 3, 3),
("3x3To9x9", 3, 3, 9, 9),
("4x4To8x8", 4, 4, 8, 8),
("8x8To16x16", 8, 8, 16, 16),
("64x64To512x512", 64, 64, 512, 512),
("80x80To512x512", 80, 80, 512, 512),
("96x96To512x512", 96, 96, 512, 512),
("112x112To512x512", 112, 112, 512, 512),
("256x48To2048x384", 256, 48, 2048, 384),
("320x60To2048x384", 320, 60, 2048, 384),
("448x84To2048x384", 448, 84, 2048, 384),
("69x69To545x545", 69, 69, 545, 545),
("86x86To545x545", 86, 86, 545, 545),
("103x103To545x545", 103, 103, 545, 545),
("120x120To545x545", 120, 120, 545, 545),
("57x57To456x456", 57, 57, 456, 456),
("72x72To456x456", 72, 72, 456, 456),
("86x86To456x456", 86, 86, 456, 456),
("100x100To456x456", 100, 100, 456, 456),
("64x64To224x224", 64, 64, 224, 224),
("128x128To224x224", 128, 128, 224, 224),
("256x256To224x224", 256, 256, 224, 224),
("512x512To224x224", 512, 512, 224, 224),
("64x64To299x299", 64, 64, 299, 299),
("128x128To299x299", 128, 128, 299, 299),
("256x256To299x299", 256, 256, 299, 299),
("512x512To299x299", 512, 512, 299, 299),
("224x224To224x224", 224, 224, 224, 224),
# This test is disabled because it is very slow. It is slow because
# 383 is prime, 383 and 2047 are coprime, and 2048 is large.
# ("Disabled_384x72To2048x384", 384, 72, 2048, 384),
)
def test(self, src_y, src_x, dst_y, dst_x):
max_y = max(src_y - 1, 1) * (dst_y - 1) + 1
max_x = max(src_x - 1, 1) * (dst_x - 1) + 1
input_data = [
range(y * max_x, (y + 1) * max_x, max(dst_x - 1, 1))
for y in range(0, max_y, max(dst_y - 1, 1))
]
result = [
range(y * max_x, (y + 1) * max_x, max(src_x - 1, 1))
for y in range(0, max_y, max(src_y - 1, 1))
]
self._assertForwardOpMatchesExpected(
np.array(input_data, dtype=np.float32), [dst_y, dst_x],
expected=np.array(result, dtype=np.float32),
large_tolerance=True)
class ResizeBilinearGradTest(parameterized.TestCase, xla_test.XLATestCase):
def _assertBackwardOpMatchesExpected(self,
grads_np,
input_shape=None,
dtype=None,
expected=None,
large_tolerance=False):
if input_shape is None:
self.fail("input_shape must be specified")
if expected is None:
self.fail("expected must be specified")
with self.session() as sess, self.test_scope():
dtype = dtype or np.float32
grads = array_ops.placeholder(np.float32)
resized = gen_image_ops.resize_bilinear_grad(
grads,
np.zeros([1, input_shape[0], input_shape[1], 1], dtype=dtype),
align_corners=True)
out = sess.run(resized, {grads: grads_np[np.newaxis, :, :, np.newaxis]})
if large_tolerance:
self.assertAllClose(
expected[np.newaxis, :, :, np.newaxis], out, rtol=0.1, atol=0.01)
else:
self.assertAllCloseAccordingToType(
expected[np.newaxis, :, :, np.newaxis], out)
@parameterized.named_parameters(
("1x3To1x3", 1, 2, 1, 3),
("1x2To3x2", 1, 2, 3, 2),
("1x2To3x3", 1, 2, 3, 3),
("1x1To4x1", 1, 1, 4, 1),
("1x1To5x1", 1, 1, 5, 1),
("2x2To1x1", 2, 2, 1, 1),
("2x2To3x3", 2, 2, 3, 3),
("3x3To2x2", 3, 3, 2, 2),
("4x4To3x3", 4, 4, 3, 3),
("3x3To9x9", 3, 3, 9, 9),
("4x4To8x8", 4, 4, 8, 8),
("8x8To16x16", 8, 8, 16, 16),
("2x64To2x512", 2, 64, 2, 512),
("64x64To512x512", 64, 64, 512, 512),
("80x80To512x512", 80, 80, 512, 512),
("96x96To512x512", 96, 96, 512, 512),
("112x112To512x512", 112, 112, 512, 512),
# ("Disabled_256x48To2048x384", 256, 48, 2048, 384),
# ("Disabled_320x60To2048x384", 320, 60, 2048, 384),
# ("Disabled_448x84To2048x384", 448, 84, 2048, 384),
("69x69To545x545", 69, 69, 545, 545),
("86x86To545x545", 86, 86, 545, 545),
("103x103To545x545", 103, 103, 545, 545),
("120x120To545x545", 120, 120, 545, 545),
("57x57To456x456", 57, 57, 456, 456),
("72x72To456x456", 72, 72, 456, 456),
("86x86To456x456", 86, 86, 456, 456),
("100x100To456x456", 100, 100, 456, 456),
# This test is disabled because it is very slow. It is slow because
# 383 is prime, 383 and 2047 are coprime, and 2048 is large.
# ("Disabled_384x72To2048x384", 384, 72, 2048, 384),
)
def test(self, src_y, src_x, dst_y, dst_x):
def GetRow(src, dst):
if src == 1:
return np.array([[max(dst**2 - dst, 1)]])
row = [0] * src
for i in range(0, (dst - 1) * max(src - 1, 1) + 1, src - 1):
prev = int(math.floor(i / max(dst - 1, 1)))
row[prev] += max(dst - 1, 1) - i % max(dst - 1, 1)
if prev + 1 < src:
row[prev + 1] += i % max(dst - 1, 1)
return np.array([row])
input_element = max(dst_x - 1, 1) * max(dst_y - 1, 1)
input_data = [[input_element] * dst_x] * dst_y
result = GetRow(src_x, dst_x) * np.transpose(GetRow(src_y, dst_y))
self._assertBackwardOpMatchesExpected(
np.array(input_data, dtype=np.float32), [src_y, src_x],
expected=np.array(result, dtype=np.float32),
large_tolerance=True)
class ResizeBilinearNonAlignCornersTest(xla_test.XLATestCase):
def _assertForwardOpMatchesExpected(self,
image_np,
target_shape,
expected=None,
large_tolerance=False,
align_corners=True):
if expected is None:
self.fail("expected must be specified")
with self.session() as sess, self.test_scope():
image = array_ops.placeholder(image_np.dtype)
resized = gen_image_ops.resize_bilinear(
image, target_shape, align_corners=align_corners)
out = sess.run(resized, {image: image_np[np.newaxis, :, :, np.newaxis]})
if large_tolerance:
self.assertAllClose(
expected[np.newaxis, :, :, np.newaxis], out, rtol=0.1, atol=0.01)
else:
self.assertAllClose(expected[np.newaxis, :, :, np.newaxis], out)
def testNonAlignCorners3x2To6x4(self):
input_data = [[64, 32], [32, 64], [50, 100]]
expected_data = [[64.0, 48.0, 32.0, 32.0], [48.0, 48.0, 48.0, 48.0],
[32.0, 48.0, 64.0, 64.0], [41.0, 61.5, 82.0, 82.0],
[50.0, 75.0, 100.0, 100.0], [50.0, 75.0, 100.0, 100.0]]
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array(input_data, dtype=dtype), [6, 4],
expected=np.array(expected_data, dtype=np.float32),
align_corners=False)
def testNonAlignCorners6x4To3x2(self):
input_data = [[127, 127, 64, 64], [127, 127, 64, 64], [64, 64, 127, 127],
[64, 64, 127, 127], [50, 50, 100, 100], [50, 50, 100, 100]]
expected_data = [[127, 64], [64, 127], [50, 100]]
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array(input_data, dtype=dtype), [3, 2],
expected=np.array(expected_data, dtype=dtype),
align_corners=False)
def testNonAlignCorners3x2To6x4Batch2(self):
input_data = [[[64, 32], [32, 64], [50, 100]], [[32, 16], [16, 32],
[25, 50]]]
expected_data = [[[64.0, 48.0, 32.0, 32.0], [48.0, 48.0, 48.0, 48.0],
[32.0, 48.0, 64.0, 64.0], [41.0, 61.5, 82.0, 82.0],
[50.0, 75.0, 100.0, 100.0], [50.0, 75.0, 100.0, 100.0]],
[[32.0, 24.0, 16.0, 16.0], [24.0, 24.0, 24.0, 24.0],
[16.0, 24.0, 32.0, 32.0], [20.5, 30.75, 41.0, 41.0],
[25.0, 37.5, 50.0, 50.0], [25.0, 37.5, 50.0, 50.0]]]
for dtype in self.float_types:
input_image = np.array(input_data, dtype=dtype)
expected = np.array(expected_data, dtype=dtype)
with self.session() as sess, self.test_scope():
image = array_ops.placeholder(input_image.dtype)
resized = gen_image_ops.resize_bilinear(
image, [6, 4], align_corners=False)
out = sess.run(resized, {image: input_image[:, :, :, np.newaxis]})
self.assertAllClose(expected[:, :, :, np.newaxis], out)
class NonMaxSuppressionTest(xla_test.XLATestCase):
def testNMS128From1024(self):
num_boxes = 1024
boxes_np = np.random.normal(50, 10, (num_boxes, 4)).astype("f4")
scores_np = np.random.normal(0.5, 0.1, (num_boxes,)).astype("f4")
max_output_size = 128
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.0, dtype=np.float32)
with self.session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
score_threshold: score_threshold_np,
iou_threshold: iou_threshold_np
}
(indices_tf, _) = sess.run(selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
def testNMS3From6Boxes(self):
# Three boxes are selected based on IOU.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 3
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.0, dtype=np.float32)
with self.session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
score_threshold: score_threshold_np,
iou_threshold: iou_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 3)
self.assertAllClose(indices_tf[:num_valid], [3, 0, 5])
def testNMS3Then2WithScoreThresh(self):
# Three boxes are selected based on IOU.
# One is filtered out by score threshold.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 3
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.4, dtype=np.float32)
with self.session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
iou_threshold: iou_threshold_np,
score_threshold: score_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 2)
self.assertAllClose(indices_tf[:num_valid], [3, 0])
def testNMS3Then1WithScoreMaxThresh(self):
# Three boxes are selected based on IOU.
# One is filtered out by score threshold.
# One is filtered out by max_output_size.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 1
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.4, dtype=np.float32)
with self.session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
iou_threshold: iou_threshold_np,
score_threshold: score_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 1)
self.assertAllClose(indices_tf[:num_valid], [3])
def testSelectFromContinuousOverLap(self):
# Tests that a suppressed box does not itself suppress other boxes.
boxes_data = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 3]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 3
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.1, dtype=np.float32)
with self.session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
iou_threshold: iou_threshold_np,
score_threshold: score_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 3)
self.assertAllClose(indices_tf[:num_valid], [0, 2, 4])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/image_ops_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.self_adjoint_eig."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test
class SelfAdjointEigOpTest(xla_test.XLATestCase, parameterized.TestCase):
def _test(self, dtype, shape):
np.random.seed(1)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype)
x_np = x_np + np.swapaxes(x_np, -1, -2)
n = shape[-1]
e_np, _ = np.linalg.eigh(x_np)
with self.session() as sess:
x_tf = array_ops.placeholder(dtype)
with self.test_scope():
e, v = linalg_ops.self_adjoint_eig(x_tf)
e_val, v_val = sess.run([e, v], feed_dict={x_tf: x_np})
v_diff = np.matmul(v_val, np.swapaxes(v_val, -1, -2)) - np.eye(n)
self.assertAlmostEqual(np.mean(v_diff**2), 0.0, delta=1e-6)
self.assertAlmostEqual(np.mean((e_val - e_np)**2), 0.0, delta=1e-6)
SIZES = [1, 2, 5, 10, 32]
DTYPES = [np.float32]
PARAMS = itertools.product(SIZES, DTYPES)
@parameterized.parameters(*PARAMS)
def testSelfAdjointEig(self, n, dtype):
for batch_dims in [(), (3,)] + [(3, 2)] * (n < 10):
self._test(dtype, batch_dims + (n, n))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/self_adjoint_eig_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.dynamic_stitch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import googletest
class DynamicStitchTest(xla_test.XLATestCase):
def _AssertDynamicStitchResultIs(self, indices, data, expected):
with self.session() as session:
index_placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype)) for arg in indices
]
data_placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype)) for arg in data
]
with self.test_scope():
output = data_flow_ops.dynamic_stitch(index_placeholders,
data_placeholders)
feed_dict = {}
for placeholder, value in zip(index_placeholders, indices):
feed_dict[placeholder] = value
for placeholder, value in zip(data_placeholders, data):
feed_dict[placeholder] = value
result = session.run(output, feed_dict=feed_dict)
self.assertAllClose(expected, result, rtol=1e-3)
def testSimpleEmpty(self):
idx1 = np.array([0, 2], dtype=np.int32)
idx2 = np.array([[1], [3]], dtype=np.int32)
val1 = np.array([[], []], dtype=np.int32)
val2 = np.array([[[]], [[]]], dtype=np.int32)
self._AssertDynamicStitchResultIs(
[idx1, idx2], [val1, val2],
expected=np.array([[], [], [], []], np.int32))
def testEmptyIndex(self):
idx1 = np.array([], dtype=np.int32)
idx2 = np.array([[], []], dtype=np.int32)
val1 = np.ndarray(shape=(0, 9), dtype=np.int32)
val2 = np.ndarray(shape=(2, 0, 9), dtype=np.int32)
self._AssertDynamicStitchResultIs([idx1, idx2], [val1, val2],
expected=np.ndarray(
shape=(0, 9), dtype=np.int32))
def testSimple1D(self):
val1 = np.array([0, 4, 7], dtype=np.int32)
val2 = np.array([1, 6, 2, 3, 5], dtype=np.int32)
val3 = np.array([0, 40, 70], dtype=np.float32)
val4 = np.array([10, 60, 20, 30, 50], dtype=np.float32)
expected = np.array([0, 10, 20, 30, 40, 50, 60, 70], dtype=np.float32)
self._AssertDynamicStitchResultIs(
[val1, val2], [val3, val4], expected=expected)
def testSimple2D(self):
val1 = np.array([0, 4, 7], dtype=np.int32)
val2 = np.array([1, 6], dtype=np.int32)
val3 = np.array([2, 3, 5], dtype=np.int32)
val4 = np.array([[0, 1], [40, 41], [70, 71]], dtype=np.float32)
val5 = np.array([[10, 11], [60, 61]], dtype=np.float32)
val6 = np.array([[20, 21], [30, 31], [50, 51]], dtype=np.float32)
expected = np.array(
[[0, 1], [10, 11], [20, 21], [30, 31], [40, 41], [50, 51], [60, 61],
[70, 71]],
dtype=np.float32)
self._AssertDynamicStitchResultIs(
[val1, val2, val3], [val4, val5, val6], expected=expected)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/dynamic_stitch_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateful random-number generation ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.client import device_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.kernel_tests.random import util as \
random_test_util
from tensorflow.python.ops import gen_stateful_random_ops
from tensorflow.python.ops import stateful_random_ops as \
random
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def xla_device_name():
devices = device_lib.list_local_devices()
def find_type(device_type):
for d in devices:
if d.device_type == device_type:
return d.name
return None
name = find_type("TPU") or find_type("XLA_GPU") or find_type("XLA_CPU")
if name is None:
raise ValueError(
"Can't find any XLA device. Available devices:\n%s" % devices)
return str(name)
ALGS = [random.RNG_ALG_PHILOX, random.RNG_ALG_THREEFRY]
INTS = [dtypes.int32, dtypes.uint32, dtypes.int64, dtypes.uint64]
# TODO(wangpeng): use parametrized tests to test both ThreeFry and Philox
class StatefulRandomOpsTest(xla_test.XLATestCase, parameterized.TestCase):
"""Test cases for stateful random-number generator operators."""
_ints = INTS
_floats = [dtypes.bfloat16, dtypes.float32, dtypes.float64]
@parameterized.parameters(ALGS)
@test_util.run_v2_only
def testSimple(self, alg):
"""A simple test."""
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=0, alg=alg)
gen.normal(shape=(3,))
gen.uniform(shape=(3,), minval=0, maxval=10, dtype=dtypes.uint32)
gen.uniform_full_int(shape=(3,))
@parameterized.parameters(ALGS)
@test_util.run_v2_only
def testDefun(self, alg):
"""Test for defun."""
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=0, alg=alg)
@def_function.function
def f():
x = gen.normal(shape=(3,))
y = gen.uniform(shape=(3,), minval=0, maxval=10, dtype=dtypes.uint32)
z = gen.uniform_full_int(shape=(3,))
return (x, y, z)
f()
def _compareToKnownOutputs(self, g, counter, key, expect):
"""Compares against known outputs for specific counter and key inputs."""
def uint32s_to_uint64(a, b):
return b << 32 | a
def uint32s_to_uint64s(ls):
return [uint32s_to_uint64(ls[2 * i], ls[2 * i + 1])
for i in range(len(ls) // 2)]
ctr_len = len(counter)
counter = uint32s_to_uint64s(counter)
key = uint32s_to_uint64s(key)
state = counter + key
g.reset(state)
got = g.uniform_full_int(shape=(ctr_len,), dtype=dtypes.uint32)
self.assertAllEqual(expect, got)
g.reset(state)
got = g.uniform_full_int(shape=(ctr_len // 2,), dtype=dtypes.uint64)
self.assertAllEqual(uint32s_to_uint64s(expect), got)
@test_util.run_v2_only
def testThreefry2x32(self):
"""Tests ThreeFry2x32 conforms to known results.
"""
# Based on
# https://github.com/google/jax/blob/8565a3486adf16beb388b2364c9cd930d7a0d92d/tests/random_test.py#L65-L85
# which is in turn based on
# https://github.com/DEShawResearch/Random123-Boost/blob/65e3d874b67aa7b3e02d5ad8306462f52d2079c0/libs/random/test/test_threefry.cpp#L30-L32
with ops.device(xla_device_name()):
g = random.Generator.from_seed(seed=0, alg=random.RNG_ALG_THREEFRY)
self._compareToKnownOutputs(
g,
[0x00000000, 0x00000000], [0x00000000, 0x00000000],
[0x6b200159, 0x99ba4efe])
self._compareToKnownOutputs(
g,
[0xffffffff, 0xffffffff], [0xffffffff, 0xffffffff],
[0x1cb996fc, 0xbb002be7])
self._compareToKnownOutputs(
g,
[0x243f6a88, 0x85a308d3], [0x13198a2e, 0x03707344],
[0xc4923a9c, 0x483df7a0])
@test_util.run_v2_only
def testPhilox4x32(self):
"""Tests Philox4x32 conforms to known results.
"""
# Based on
# https://github.com/DEShawResearch/Random123-Boost/blob/65e3d874b67aa7b3e02d5ad8306462f52d2079c0/libs/random/test/test_philox.cpp#L50-L52
with ops.device(xla_device_name()):
g = random.Generator.from_seed(seed=0, alg=random.RNG_ALG_PHILOX)
self._compareToKnownOutputs(
g,
[0x00000000, 0x00000000, 0x00000000, 0x00000000],
[0x00000000, 0x00000000],
[0x6627e8d5, 0xe169c58d, 0xbc57ac4c, 0x9b00dbd8])
self._compareToKnownOutputs(
g,
[0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff],
[0xffffffff, 0xffffffff],
[0x408f276d, 0x41c83b0e, 0xa20bc7c6, 0x6d5451fd])
self._compareToKnownOutputs(
g,
[0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344],
[0xa4093822, 0x299f31d0],
[0xd16cfe09, 0x94fdcceb, 0x5001e420, 0x24126ea1])
@test_util.run_v2_only
def testNewStateThreeFry(self):
"""Tests that the new state is correct (for ThreeFry).
"""
with ops.device(xla_device_name()):
counter = 57
key = 0x1234
size = 46
state = [counter, key]
gen = random.Generator(state=state, alg=random.RNG_ALG_THREEFRY)
gen.uniform_full_int(shape=(size,), dtype=dtypes.uint32)
self.assertAllEqual([counter+(size+1)//2, key], gen.state.read_value())
gen.reset(state)
gen.uniform_full_int(shape=(size,), dtype=dtypes.uint64)
self.assertAllEqual([counter+size, key], gen.state.read_value())
@test_util.run_v2_only
def testNewStatePhilox(self):
"""Tests that the new state is correct (for Philox).
"""
with ops.device(xla_device_name()):
counter_low = 57
counter_high = 283
key = 0x1234
size = 47
state = [counter_low, counter_high, key]
gen = random.Generator(state=state, alg=random.RNG_ALG_PHILOX)
gen.uniform_full_int(shape=(size,), dtype=dtypes.uint32)
self.assertAllEqual([counter_low+(size+3)//4, counter_high, key],
gen.state.read_value())
gen.reset(state)
gen.uniform_full_int(shape=(size,), dtype=dtypes.uint64)
self.assertAllEqual([counter_low+(size+1)//2, counter_high, key],
gen.state.read_value())
# Tests that large counter_low will correctly overflows to counter_high
counter_low = -1 # same as 0xffffffffffffffff
counter_high = 283
size = 47
state = [counter_low, counter_high, key]
gen = random.Generator(state=state, alg=random.RNG_ALG_PHILOX)
gen.uniform_full_int(shape=(size,), dtype=dtypes.uint32)
self.assertAllEqual([(size+3)//4-1, counter_high+1, key],
gen.state.read_value())
gen.reset(state)
gen.uniform_full_int(shape=(size,), dtype=dtypes.uint64)
self.assertAllEqual([(size+1)//2-1, counter_high+1, key],
gen.state.read_value())
@parameterized.parameters(INTS)
@test_util.run_v2_only
def testXLAEqualsCPU(self, dtype):
"""Tests that XLA and CPU kernels generate the same integers."""
seed = 1234
shape = [315, 49]
with ops.device("/device:CPU:0"):
cpu = (random.Generator.from_seed(seed=seed, alg=random.RNG_ALG_PHILOX)
.uniform_full_int(shape=shape, dtype=dtype))
with ops.device(xla_device_name()):
xla = (random.Generator.from_seed(seed=seed, alg=random.RNG_ALG_PHILOX)
.uniform_full_int(shape=shape, dtype=dtype))
self.assertAllEqual(cpu, xla)
def _testRngIsNotConstant(self, rng, dtype):
# Tests that 'rng' does not always return the same value.
# The random-number generator, if working correctly, should produce the
# same output multiple times with low probability.
x = rng(dtype).numpy()
y = rng(dtype).numpy()
self.assertFalse(np.array_equal(x, y))
@parameterized.parameters(ALGS)
@test_util.run_v2_only
def testUniformIsNotConstant(self, alg):
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=1234, alg=alg)
def rng(dtype):
maxval = dtype.max
# Workaround for b/125364959
if dtype == dtypes.uint64:
maxval = 10000000
return gen.uniform(shape=[2], dtype=dtype, maxval=maxval)
for dtype in self._ints + self._floats:
self._testRngIsNotConstant(rng, dtype)
@parameterized.parameters(ALGS)
@test_util.run_v2_only
def testNormalIsNotConstant(self, alg):
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=1234, alg=alg)
def rng(dtype):
return gen.normal(shape=[2], dtype=dtype)
for dtype in self._floats:
self._testRngIsNotConstant(rng, dtype)
@parameterized.parameters(ALGS)
@test_util.run_v2_only
def testUniformIsInRange(self, alg):
minval = 2
maxval = 33
size = 1000
with ops.device(xla_device_name()):
for dtype in self._ints + self._floats:
gen = random.Generator.from_seed(seed=1234, alg=alg)
x = gen.uniform(
shape=[size], dtype=dtype, minval=minval, maxval=maxval).numpy()
self.assertTrue(np.all(x >= minval))
self.assertTrue(np.all(x <= maxval))
@parameterized.parameters(ALGS)
@test_util.run_v2_only
def testNormalIsFinite(self, alg):
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=1234, alg=alg)
for dtype in self._floats:
x = gen.normal(shape=[10000], dtype=dtype).numpy()
self.assertTrue(np.all(np.isfinite(x)))
@parameterized.parameters(ALGS)
@test_util.run_v2_only
def testDistributionOfUniform(self, alg):
"""Use Pearson's Chi-squared test to test for uniformity."""
with ops.device(xla_device_name()):
n = 1000
seed = 12
for dtype in self._ints + self._floats:
gen = random.Generator.from_seed(seed=seed, alg=alg)
maxval = 1
if dtype.is_integer:
maxval = 100
x = gen.uniform(shape=[n], maxval=maxval, dtype=dtype).numpy()
if maxval > 1:
# Normalize y to range [0, 1).
x = x.astype(float) / maxval
# Tests that the values are distributed amongst 10 bins with equal
# probability. 16.92 is the Chi^2 value for 9 degrees of freedom with
# p=0.05. This test is probabilistic and would be flaky if the random
# seed were not fixed.
val = random_test_util.chi_squared(x, 10)
self.assertLess(val, 16.92)
@parameterized.parameters(ALGS)
@test_util.run_v2_only
def testDistributionOfNormal(self, alg):
"""Use Anderson-Darling test to test distribution appears normal."""
with ops.device(xla_device_name()):
n = 1000
for dtype in self._floats:
gen = random.Generator.from_seed(seed=1234, alg=alg)
x = gen.normal(shape=[n], dtype=dtype).numpy()
# The constant 2.492 is the 5% critical value for the Anderson-Darling
# test where the mean and variance are known. This test is probabilistic
# so to avoid flakiness the seed is fixed.
self.assertLess(
random_test_util.anderson_darling(x.astype(float)), 2.492)
@parameterized.parameters(ALGS)
@test_util.run_v2_only
def testTruncatedNormal(self, alg):
with ops.device(xla_device_name()):
for dtype in self._floats:
gen = random.Generator.from_seed(seed=123, alg=alg)
n = 10000000
y = gen.truncated_normal(shape=[n], dtype=dtype).numpy()
random_test_util.test_truncated_normal(
self.assertEqual, self.assertAllClose, dtype, n, y)
@test_util.run_v2_only
def testErrors(self):
"""Tests that proper errors are raised.
"""
shape = [2, 3]
with ops.device(xla_device_name()):
gen = random.Generator.from_seed(seed=1234, alg=random.RNG_ALG_THREEFRY)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
r"algorithm must be of shape \[\], not"):
gen_stateful_random_ops.stateful_standard_normal_v2(
gen.state.handle, [0, 0], shape)
with self.assertRaisesWithPredicateMatch(
TypeError, "Requested dtype: int64"):
gen_stateful_random_ops.stateful_standard_normal_v2(
gen.state.handle, 1.1, shape)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
"Unsupported algorithm id"):
gen_stateful_random_ops.stateful_standard_normal_v2(
gen.state.handle, 123, shape)
var = variables.Variable([0, 0], dtype=dtypes.uint32)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
"Type mismatch for read of variable .* Expected int64; got"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_THREEFRY, shape)
var = variables.Variable([[0]], dtype=dtypes.int64)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
"RNG state must have one and only one dimension, not"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_THREEFRY, shape)
var = variables.Variable([0], dtype=dtypes.int64)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
"The size of the state must be at least"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_THREEFRY, shape)
var = variables.Variable([0, 0], dtype=dtypes.int64)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
"The size of the state must be at least"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_PHILOX, shape)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/stateful_random_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multinomial generation ops in the XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.platform import googletest
# TODO(srvasude): Merge this with
# third_party/tensorflow/python/kernel_tests/random/multinomial_op_test.py.
class CategoricalTest(xla_test.XLATestCase):
"""Test cases for random-number generating operators."""
def output_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _chi2(self, expected, actual):
"""Returns Chi2 GOF statistic."""
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected)
return chi2
def _do_sampling(self, logits, num_samples):
"""Categorical samples from given input.
Args:
logits: Numpy ndarray of shape [batch_size, num_classes].
num_samples: Int; number of samples to draw.
Returns:
Frequencies from sampled classes; shape [batch_size, num_classes].
"""
with self.session(), self.test_scope():
random_seed.set_random_seed(1618)
op = random_ops.multinomial(logits, num_samples,
output_dtype=dtypes.int32)
d = self.evaluate(op)
batch_size, num_classes = logits.shape
freqs_mat = []
for i in range(batch_size):
cnts = dict(collections.Counter(d[i, :]))
# Requires drawn class labels be in range.
self.assertLess(max(cnts.keys()), num_classes)
self.assertGreaterEqual(min(cnts.keys()), 0)
freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0)
for k in range(num_classes)]
freqs_mat.append(freqs)
return freqs_mat
def _testRngIsNotConstant(self, rng, dtype, output_dtype):
# Tests that 'rng' does not always return the same value.
with self.session():
with self.test_scope():
x = rng(dtype, output_dtype)
# The random-number generator, if working correctly, should produce the
# same output multiple times with low probability.
y = self.evaluate(x)
z = self.evaluate(x)
w = self.evaluate(x)
# We use exact equality here. If the random-number generator is producing
# deterministic output, all three outputs will be bitwise identical.
self.assertTrue((not np.array_equal(y, z)) or
(not np.array_equal(z, w)) or
(not np.array_equal(y, w)))
def testCategoricalIsNotConstant(self):
def rng(dtype, output_dtype):
return random_ops.multinomial(np.array([[1., 1., 1.]], dtype=dtype), 10,
output_dtype=output_dtype)
dtype = np.float32
for output_dtype in self.output_dtypes():
self._testRngIsNotConstant(rng, dtype, output_dtype)
def testCategoricalIsInRange(self):
for dtype in self.float_types:
for output_dtype in self.output_dtypes():
with self.session():
with self.test_scope():
x = random_ops.multinomial(
array_ops.ones(shape=[1, 20], dtype=dtype), 1000,
output_dtype=output_dtype)
y = self.evaluate(x)
self.assertTrue((y >= 0).sum() == 1000)
self.assertTrue((y < 20).sum() == 1000)
def testSamplingCorrectness(self):
np.random.seed(1618) # Make it reproducible.
num_samples = 40000
rand_probs = np.random.dirichlet([1., 1., 2., 3.])
rand_probs2 = np.random.dirichlet([1., 4., 5.], size=3) # batched
for probs in [[.5, .5], [.85, .05, .1], rand_probs, rand_probs2]:
probs = np.asarray(probs)
if len(probs.shape) == 1:
probs = probs.reshape(1, probs.size) # singleton batch
logits = np.log(probs).astype(np.float32)
freqs = self._do_sampling(logits, num_samples)
# the test here is similar to
# python/kernel_tests/random/multinomial_op_test.py
# Note that df >= 1 in all these cases. Choosing a cutoff of 1e-3
# corresponds to an alpha value of 2.5% for df = 1, and smaller for larger
# df.
chi2 = self._chi2(probs, freqs)
self.assertLess(chi2, 1e-3)
def testStatelessMultinomialIsInRange(self):
for dtype in self.float_types.intersection(
[dtypes.float32, dtypes.bfloat16]):
for output_dtype in self.output_dtypes():
with self.session() as sess:
with self.test_scope():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
x = stateless_random_ops.stateless_multinomial(
array_ops.ones(shape=[1, 20], dtype=dtype),
1000,
seed_t,
output_dtype=output_dtype)
y = sess.run(x, {seed_t: [0x12345678, 0xabcdef12]})
self.assertTrue((y >= 0).sum() == 1000)
self.assertTrue((y < 20).sum() == 1000)
def testDeterminismMultinomial(self):
# Stateless values should be equal iff the seeds are equal (roughly)
num_samples = 10
with self.session(), self.test_scope():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
seeds = [(x, y) for x in range(5) for y in range(5)] * 3
for logits in ([[0.1, 0.25, 0.5, 0.15]], [[0.5, 0.5], [0.8, 0.2],
[0.25, 0.75]]):
pure = stateless_random_ops.stateless_multinomial(
logits, num_samples, seed=seed_t)
values = [(seed, pure.eval(feed_dict={seed_t: seed})) for seed in seeds]
for s0, v0 in values:
for s1, v1 in values:
self.assertEqual(s0 == s1, np.all(v0 == v1))
def testEmpty(self):
with self.session():
with self.test_scope():
x = random_ops.multinomial(
array_ops.zeros([42, 40]), 0, output_dtype=dtypes.int32)
y = self.evaluate(x)
self.assertEqual(y.shape, (42, 0))
def testEmptyStateless(self):
with self.session() as sess:
with self.test_scope():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
x = stateless_random_ops.stateless_multinomial(
array_ops.zeros([42, 40]),
0,
seed=seed_t,
output_dtype=dtypes.int32)
y = sess.run(x, {seed_t: [0x12345678, 0xabcdef12]})
self.assertEqual(y.shape, (42, 0))
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/categorical_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.sparse_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
def _SparseToDense(sparse_indices,
output_size,
sparse_values,
default_value,
validate_indices=True):
feed_sparse_indices = array_ops.placeholder(dtypes.int32)
feed_dict = {feed_sparse_indices: sparse_indices}
return sparse_ops.sparse_to_dense(
feed_sparse_indices,
output_size,
sparse_values,
default_value=default_value,
validate_indices=validate_indices).eval(feed_dict=feed_dict)
class SparseToDenseTest(xla_test.XLATestCase):
def testInt(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1, 0)
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testFloat(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1.0, 0.0)
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.float32)
self.assertAllClose(np_ans, tf_ans)
def testSetValue(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], [1, 2], -1)
np_ans = np.array([-1, 1, -1, 2, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testSetSingleValue(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1, -1)
np_ans = np.array([-1, 1, -1, 1, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def test2d(self):
# pylint: disable=bad-whitespace
with self.session(), self.test_scope():
tf_ans = _SparseToDense([[1, 3], [2, 0]], [3, 4], 1, -1)
np_ans = np.array([[-1, -1, -1, -1],
[-1, -1, -1, 1],
[ 1, -1, -1, -1]]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testZeroDefault(self):
with self.session():
x = sparse_ops.sparse_to_dense(2, [4], 7).eval()
self.assertAllEqual(x, [0, 0, 7, 0])
def test3d(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([[1, 3, 0], [2, 0, 1]], [3, 4, 2], 1, -1)
np_ans = np.ones((3, 4, 2), dtype=np.int32) * -1
np_ans[1, 3, 0] = 1
np_ans[2, 0, 1] = 1
self.assertAllClose(np_ans, tf_ans)
def testDegenerateIndexMatrix(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([[2], [3], [4], [5], [6], [7], [8], [9]], [10],
[1, 2, 3, 4, 5, 6, 7, 8], -1)
self.assertAllClose([-1, -1, 1, 2, 3, 4, 5, 6, 7, 8], tf_ans)
def testBadShape(self):
with self.session(), self.test_scope():
with self.assertRaisesWithPredicateMatch(ValueError, "must be rank 1"):
_SparseToDense([1, 3], [[5], [3]], 1, -1)
def testBadValue(self):
with self.session(), self.test_scope():
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[2,1\], "
r"should be \[\] or \[2\]"):
_SparseToDense([1, 3], [5], [[5], [3]], -1)
def testBadNumValues(self):
with self.session(), self.test_scope():
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[3\], should be \[\] or \[2\]"):
_SparseToDense([1, 3], [5], [1, 2, 3], -1)
def testBadDefault(self):
with self.session(), self.test_scope():
with self.assertRaisesOpError("default_value should be a scalar"):
_SparseToDense([1, 3], [5], [1, 2], [0])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/sparse_to_dense_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for operators with no arguments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import googletest
class NullaryOpsTest(xla_test.XLATestCase):
def _testNullary(self, op, expected):
with self.session() as session:
with self.test_scope():
output = op()
result = session.run(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testNoOp(self):
with self.session():
with self.test_scope():
output = control_flow_ops.no_op()
# This should not crash.
output.run()
def testConstants(self):
for dtype in self.numeric_types:
constants = [
dtype(42),
np.array([], dtype=dtype),
np.array([1, 2], dtype=dtype),
np.array([7, 7, 7, 7, 7], dtype=dtype),
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[[1, 2], [3, 4], [5, 6]], [[10, 20], [30, 40], [50, 60]]],
dtype=dtype),
np.array([[[]], [[]]], dtype=dtype),
np.array([[[[1]]]], dtype=dtype),
]
for c in constants:
self._testNullary(lambda c=c: constant_op.constant(c), expected=c)
def testComplexConstants(self):
for dtype in self.complex_types:
constants = [
dtype(42 + 3j),
np.array([], dtype=dtype),
np.ones([50], dtype=dtype) * (3 + 4j),
np.array([1j, 2 + 1j], dtype=dtype),
np.array([[1, 2j, 7j], [4, 5, 6]], dtype=dtype),
np.array([[[1, 2], [3, 4 + 6j], [5, 6]],
[[10 + 7j, 20], [30, 40], [50, 60]]],
dtype=dtype),
np.array([[[]], [[]]], dtype=dtype),
np.array([[[[1 + 3j]]]], dtype=dtype),
]
for c in constants:
self._testNullary(lambda c=c: constant_op.constant(c), expected=c)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/nullary_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AddSign."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.contrib.opt.python.training import addsign
from tensorflow.contrib.opt.python.training import sign_decay
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def py_linear_decay_fn(decay_steps):
def linear_decay(step):
step = min(step, decay_steps)
return float(decay_steps - step) / decay_steps
return linear_decay
def addsign_update_numpy(params,
g_t,
m,
lr,
alpha=1.0,
beta=0.9,
py_sign_decay_fn=None,
t=None):
m_t = beta * m + (1 - beta) * g_t
if py_sign_decay_fn is None:
sign_decayed = 1.0
else:
sign_decayed = py_sign_decay_fn(t-1)
multiplier = alpha + sign_decayed * np.sign(g_t) * np.sign(m_t)
params_t = params - lr * multiplier * g_t
return params_t, m_t
class AddSignTest(xla_test.XLATestCase):
def _testDense(self,
learning_rate=0.1,
sign_decay_fn=None,
py_sign_decay_fn=None,
alpha=1.0,
beta=0.9):
for dtype in self.float_types:
with self.session(), self.test_scope():
# Initialize variables for numpy implementation.
m0, m1 = 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
global_step = resource_variable_ops.ResourceVariable(0, trainable=False)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = addsign.AddSignOptimizer(
learning_rate=learning_rate,
alpha=alpha,
beta=beta,
sign_decay_fn=sign_decay_fn,
)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 7 steps of AddSign
# first 4 steps with positive gradient
# last 3 steps with negative gradient (sign(gm) should be -1)
for t in range(1, 8):
if t < 5:
update.run()
else:
neg_update.run()
var0_np, m0 = addsign_update_numpy(
var0_np,
grads0_np if t < 5 else -grads0_np,
m0,
learning_rate,
alpha=alpha,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
var1_np, m1 = addsign_update_numpy(
var1_np,
grads1_np if t < 5 else -grads1_np,
m1,
learning_rate,
alpha=alpha,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), half_rtol=1e-2)
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testDense(self):
decay_steps = 10
sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
py_sign_decay_fn = py_linear_decay_fn(decay_steps)
self._testDense()
self._testDense(learning_rate=0.01, alpha=0.1, beta=0.8)
self._testDense(
sign_decay_fn=sign_decay_fn, py_sign_decay_fn=py_sign_decay_fn)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/addsign_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
def nhwc_to_format(x, data_format):
"""Converts a numpy array from NHWC format to `data_format`."""
rank = len(x.shape)
if data_format == "NCHW":
return np.transpose(x, [0, rank - 1] + list(range(1, rank - 1)))
elif data_format == "NHWC":
return x
else:
raise ValueError("Unknown format {}".format(data_format))
class UnaryOpsTest(xla_test.XLATestCase):
"""Test cases for unary operators."""
def _assertOpOutputMatchesExpected(self,
op,
inp,
expected,
equality_test=None,
rtol=1e-3,
atol=1e-5):
"""Verifies that 'op' produces 'expected' when fed input 'inp' .
Args:
op: operator to test
inp: numpy input array to use as input to 'op'.
expected: numpy array representing the expected output of 'op'.
equality_test: either None, or a function that tests two numpy arrays for
equality. If None, self.assertAllClose is used.
rtol: relative tolerance for equality test.
atol: absolute tolerance for equality test.
"""
with self.session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(inp.dtype), inp.shape, name="a")
output = op(pinp)
result = session.run(output, {pinp: inp})
if equality_test is None:
self.assertEqual(output.dtype, expected.dtype)
self.assertAllCloseAccordingToType(
expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)
else:
equality_test(result, expected, rtol=rtol, atol=atol)
def ListsAreClose(self, result, expected, rtol, atol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in xrange(len(result)):
self.assertAllClose(result[i], expected[i], rtol, atol)
def testAllTypeOps(self):
for dtype in self.numeric_types - {np.int8, np.uint8}:
self._assertOpOutputMatchesExpected(
array_ops.diag, np.array([1, 2, 3, 4], dtype=dtype),
np.array(
[[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.diag_part,
np.arange(36).reshape([2, 3, 2, 3]).astype(dtype),
np.array([[0, 7, 14], [21, 28, 35]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.diag, np.array([[1, 2], [3, 4]], dtype=dtype),
np.array(
[[[[1, 0], [0, 0]], [[0, 2], [0, 0]]], [[[0, 0], [3, 0]],
[[0, 0], [0, 4]]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.identity,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
# TODO(penporn): Once XLA supports MatrixDiagV2, change the call to
# gen_array_ops.matrix_diag* (V1) to array_ops.matrix_diag* (V2).
self._assertOpOutputMatchesExpected(
gen_array_ops.matrix_diag, np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([[[1, 0], [0, 2]], [[3, 0], [0, 4]]], dtype=dtype))
self._assertOpOutputMatchesExpected(
gen_array_ops.matrix_diag, np.array([1, 2, 3, 4], dtype=dtype),
np.array(
[[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
gen_array_ops.matrix_diag,
np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=dtype),
np.array(
[[[[1, 0, 0], [0, 2, 0], [0, 0, 3]], [[4, 0, 0], [0, 5, 0], [
0, 0, 6
]]], [[[7, 0, 0], [0, 8, 0], [0, 0, 9]], [[10, 0, 0], [0, 11, 0],
[0, 0, 12]]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
gen_array_ops.matrix_diag_part,
np.arange(3 * 2 * 4).reshape([3, 2, 4]).astype(dtype),
np.array([[0, 5], [8, 13], [16, 21]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.prevent_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[[[]]]]], dtype=dtype),
expected=np.array([], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1], [2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1]], [[2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.stop_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
def testFloatOps(self):
for dtype in self.float_types:
x = np.arange(-0.90, 0.90, 0.25)
self._assertOpOutputMatchesExpected(
math_ops.acos, x.astype(dtype), expected=np.arccos(x).astype(dtype))
self._assertOpOutputMatchesExpected(
math_ops.asin, x.astype(dtype), expected=np.arcsin(x).astype(dtype))
x = np.arange(-3, 3).reshape(1, 3, 2)
self._assertOpOutputMatchesExpected(
math_ops.atan, x.astype(dtype), expected=np.arctan(x).astype(dtype))
self._assertOpOutputMatchesExpected(
math_ops.acosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[0, 1.3169579, 1.76274717, 2.06343707], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.asinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[0.88137359, 1.44363548, 1.81844646, 2.09471255], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.atanh,
np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype),
expected=np.array(
[0.10033535, 0.20273255, 0.3095196, 0.42364893], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.ceil,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-1, 2]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.cosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.54308063, 3.76219569, 10.067662, 27.30823284], dtype=dtype))
# Disable float16 testing for now
if dtype != np.float16:
x = np.arange(-10, 10, 1).astype(dtype)
with self.session() as session:
erf_x = session.run(math_ops.erf(x))
erfc_x = session.run(math_ops.erfc(x))
self._assertOpOutputMatchesExpected(math_ops.erf, x, expected=erf_x)
self._assertOpOutputMatchesExpected(math_ops.erfc, x, expected=erfc_x)
self._assertOpOutputMatchesExpected(
math_ops.exp,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0.36787945, 2.7182817]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.expm1,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-0.63212056, 1.71828183]], dtype=dtype),
rtol=1e-5)
self._assertOpOutputMatchesExpected(
math_ops.floor,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-2, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
np.array(
[[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[0, 1, 1, 1, 1, 1, 1, 0, 0]], dtype=np.bool))
# Tests for tf.nn ops.
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[[]]], dtype=dtype), expected=dtype(0))
self._assertOpOutputMatchesExpected(nn_ops.l2_loss, dtype(4), dtype(8))
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[-2, 4]], dtype=dtype), expected=dtype(10))
self._assertOpOutputMatchesExpected(
math_ops.reciprocal,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[1, 0.5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0, 0.69314718]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sin,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.841478, 0.909302]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.cos,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.540297, -0.41614]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log1p,
np.array([[1e-14, 1e-15, 0.6]], dtype=dtype),
expected=np.log1p(np.array([[1e-14, 1e-15, 0.6]],
dtype=dtype)).astype(dtype),
rtol=1e-4,
atol=1e-6)
self._assertOpOutputMatchesExpected(
math_ops.rint,
np.array(
[[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5]],
dtype=dtype),
expected=np.array(
[[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.round,
np.array(
[[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5]],
dtype=dtype),
expected=np.array(
[[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.rsqrt,
np.array([[4, 16]], dtype=dtype),
expected=np.array([[0.5, 0.25]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[0.7310586, 0.7310586, 0.7310586, 0.7310586],
[0.7310586, 0.880797, 0.95257413, 0.98201376]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
np.array([-300, -150, 0, 150, 300], dtype=dtype),
expected=np.array([0, 0, 0.5, 1, 1], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.17520119, 3.62686041, 10.01787493, 27.2899172], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sqrt,
np.array([[4, 9]], dtype=dtype),
expected=np.array([[2, 3]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.tan,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.55740772, -2.18503986, -0.14254654, 1.15782128], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.tanh,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[0.76159418, 0.76159418, 0.76159418, 0.76159418],
[0.76159418, 0.96402758, 0.99505478, 0.99932933]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.log_softmax,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[-1.3862944, -1.3862944, -1.3862944, -1.3862944],
[-3.4401896, -2.4401896, -1.4401897, -0.44018969]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.elu,
np.array([[-1, 0, 1, -1e-6]], dtype=dtype),
expected=np.array([[-0.63212056, 0, 1, -9.999995e-07]], dtype=dtype),
rtol=1e-5,
atol=1e-6)
self._assertOpOutputMatchesExpected(
nn_ops.selu,
np.array([[-1, 0, 1, -1e-5]], dtype=dtype),
expected=np.array(
[[-1.11133074, 0., 1.05070099, -1.758090550379974e-05]],
dtype=dtype),
rtol=1e-5,
atol=1e-6)
self._assertOpOutputMatchesExpected(
nn_ops.relu,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.relu6,
np.array([[-0.05, 6.05, 5]], dtype=dtype),
expected=np.array([[0, 6, 5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.leaky_relu,
np.array([[-2, -1, 0, 1, 2]], dtype=dtype),
expected=np.array([[-0.4, -0.2, 0.0, 1.0, 2.0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([0.032058604, 0.087144323, 0.23688284, 0.64391428],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[0.25, 0.25, 0.25, 0.25],
[0.032058604, 0.087144323, 0.23688284, 0.64391428]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array([[[1, 1], [1, 1]], [[1, 2], [3, 4]]], dtype=dtype),
expected=np.array(
[[[0.5, 0.5], [0.5, 0.5]],
[[0.26894142, 0.73105858], [0.26894142, 0.73105858]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softsign,
np.array([[-2, -1, 0, 1, 2]], dtype=dtype),
expected=np.array(
[[-0.66666669, -0.5, 0, 0.5, 0.66666669]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sign,
np.array([[-2.0, -1.0, -0.0, +0.0, 1.0, 2.0]], dtype=dtype),
expected=np.array([[-1.0, -1.0, -0.0, +0.0, 1.0, 1.0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
np.array(
[[42, float("inf"), -123], [float("nan"), 0, -0.0]], dtype=dtype),
expected=np.array(
[[True, False, True], [False, True, True]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.lgamma,
np.array(0.5, dtype=dtype),
expected=np.array(np.log(np.pi) / 2, dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.lgamma,
np.array(
[[1, 2, 3], [4, 5, 6], [1 / 2, 3 / 2, 5 / 2],
[-3 / 2, -7 / 2, -11 / 2]],
dtype=dtype),
expected=np.array(
[
[0, 0, np.log(2.0)],
[np.log(6.0), np.log(24.0),
np.log(120)],
[
np.log(np.pi) / 2,
np.log(np.pi) / 2 - np.log(2),
np.log(np.pi) / 2 - np.log(4) + np.log(3)
],
[
np.log(np.pi) / 2 - np.log(3) + np.log(4),
np.log(np.pi) / 2 - np.log(105) + np.log(16),
np.log(np.pi) / 2 - np.log(10395) + np.log(64),
],
],
dtype=dtype))
# The actual result is complex. Take the real part.
self._assertOpOutputMatchesExpected(
math_ops.lgamma,
np.array([-1 / 2, -5 / 2, -9 / 2], dtype=dtype),
expected=np.array(
[
np.log(np.pi) / 2 + np.log(2),
np.log(np.pi) / 2 - np.log(15) + np.log(8),
np.log(np.pi) / 2 - np.log(945) + np.log(32),
],
dtype=dtype),
atol=1e-4)
self._assertOpOutputMatchesExpected(
math_ops.digamma,
np.array(
[[1.0, 0.5, 1 / 3.0], [0.25, 1 / 6.0, 0.125], [2.0, 3.0, 4.0],
[6.0, 8.0, 9.0]],
dtype=dtype),
expected=np.array(
[
[
-np.euler_gamma, -2 * np.log(2) - np.euler_gamma,
-np.pi / 2 / np.sqrt(3) - 3 * np.log(3) / 2 -
np.euler_gamma
],
[
-np.pi / 2 - 3 * np.log(2) - np.euler_gamma,
-np.pi * np.sqrt(3) / 2 - 2 * np.log(2) -
3 * np.log(3) / 2 - np.euler_gamma,
-np.pi / 2 - 4 * np.log(2) -
(np.pi + np.log(2 + np.sqrt(2)) - np.log(2 - np.sqrt(2)))
/ np.sqrt(2) - np.euler_gamma
],
[
1 - np.euler_gamma, 1.5 - np.euler_gamma,
11 / 6.0 - np.euler_gamma
],
[
137 / 60.0 - np.euler_gamma, 363 / 140.0 - np.euler_gamma,
761 / 280.0 - np.euler_gamma
],
],
dtype=dtype))
def quantize_and_dequantize_v2(x):
return array_ops.quantize_and_dequantize_v2(
x, -127, 127, signed_input=True, num_bits=8)
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v2,
np.array([-1, -0.5, 0, 0.3], dtype=dtype),
expected=np.array([-1., -0.5, 0., 0.296875], dtype=dtype))
def quantize_and_dequantize_v2_round_half_up(x):
return array_ops.quantize_and_dequantize_v2(
x,
-1,
1.0,
signed_input=True,
num_bits=8,
range_given=True,
round_mode="HALF_UP")
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v2_round_half_up,
np.array([-0.8, -0.5, 0, 0.3, 0.8, -2, 33], dtype=dtype),
expected=np.array([
-102.0 / 127,
-63.0 / 127,
0,
38.0 / 127,
102.0 / 127,
-128.0 / 127,
1,
],
dtype=dtype))
def quantize_and_dequantize_v2_round_half_to_even(x):
return array_ops.quantize_and_dequantize_v2(
x,
-1.0,
1.0,
signed_input=True,
num_bits=8,
range_given=True,
round_mode="HALF_TO_EVEN")
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v2_round_half_to_even,
np.array(
[
-0.8,
# The -0.5 should become -63.5 after scaling and with
# rounding this should become -64. But with the test
# unary_ops_test_cpu_ondemand, this fails as the result
# before scaling becomes -63.499996 and gets rounded to -63.
# TODO(sreenik): Some one more familiar with this test needs
# to take a look and resolve this. This works on all other
# variations of the platform like cpu, and gpu.
# -0.5,
0,
0.3,
0.8,
-2,
33
],
dtype=dtype),
expected=np.array(
[
-102.0 / 127,
# -64.0 / 127,
0,
38.0 / 127,
102.0 / 127,
-128.0 / 127,
1,
],
dtype=dtype))
def quantize_and_dequantize_v3(x):
return array_ops.quantize_and_dequantize_v3(
x, -127, 127, num_bits=8, signed_input=True, range_given=False)
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v3,
np.array([-1, -0.5, 0, 0.3], dtype=dtype),
expected=np.array([-1., -0.5, 0., 0.296875], dtype=dtype))
def testComplexOps(self):
for dtype in self.complex_types:
self._assertOpOutputMatchesExpected(
math_ops.acosh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arccosh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.asinh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arcsinh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.atanh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arctanh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.cosh,
np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype),
expected=np.cosh(np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.sinh,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.sinh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.exp,
np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
expected=np.exp(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.expm1,
np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
expected=np.expm1(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)),
rtol=1e-6,
atol=1e-6)
self._assertOpOutputMatchesExpected(
math_ops.reciprocal,
np.array([[1, 2j, 2 + 3j]], dtype=dtype),
expected=1.0 / np.array([[1, 2j, 2 + 3j]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.log(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.sin,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.sin(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.cos,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.cos(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.log1p,
np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype),
expected=np.log1p(
np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype)),
rtol=1e-4,
atol=1e-6)
val = np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.rsqrt, val, expected=1 / np.sqrt(val))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid, val, expected=1 / (1 + np.exp(-val)))
self._assertOpOutputMatchesExpected(
math_ops.sqrt, val, expected=np.sqrt(val))
self._assertOpOutputMatchesExpected(
math_ops.tanh,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.tanh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.tan,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.tan(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
ctypes = {np.complex64: np.float32, np.complex128: np.float64}
self._assertOpOutputMatchesExpected(
math_ops.abs,
np.array([[3 - 4j, -1j, np.inf]], dtype=dtype),
expected=np.array([[5, 1, np.inf]], dtype=ctypes[dtype]))
self._assertOpOutputMatchesExpected(
math_ops.negative,
np.array([[-1 + 2j, -3j]], dtype=dtype),
expected=np.array([[1 - 2j, 3j]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.square,
np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype),
expected=np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype)**2)
self._assertOpOutputMatchesExpected(
array_ops.zeros_like,
np.array([[4j, 3 - 2j], [2, -1j]], dtype=dtype),
expected=np.array([[0, 0], [0, 0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.ones_like,
np.array([[-4j, 3 + 2j], [2, -1j]], dtype=dtype),
expected=np.array([[1, 1], [1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.angle,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.angle(np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.conj,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([1 - 3j, -4 - 7j, 2.7, 3j], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.imag,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([3, 7, 0, -3], dtype=ctypes[dtype]))
self._assertOpOutputMatchesExpected(
math_ops.real,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([1, -4, 2.7, 0], dtype=ctypes[dtype]))
def testIntOps(self):
for dtype in self.int_types:
self._assertOpOutputMatchesExpected(
bitwise_ops.invert,
np.array([0, -1, 1, 16, 42], dtype=dtype),
expected=np.array([-1, 0, -2, -17, -43], dtype=dtype))
def testNumericOps(self):
for dtype in self.numeric_types - {np.int8, np.uint8}:
self._assertOpOutputMatchesExpected(
math_ops.abs,
np.array([[2, -1]], dtype=dtype),
expected=np.array([[2, 1]], dtype=np.real(dtype(0)).dtype))
self._assertOpOutputMatchesExpected(
math_ops.negative,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[1, -1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.square,
np.array([[-2, 3]], dtype=dtype),
expected=np.array([[4, 9]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.zeros_like,
np.array([[4, 3], [2, 1]], dtype=dtype),
expected=np.array([[0, 0], [0, 0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.ones_like,
np.array([[4, 3], [2, 1]], dtype=dtype),
expected=np.array([[1, 1], [1, 1]], dtype=dtype))
# TODO(phawkins): these tests fail unless fastmath optimizations
# are disabled. Use more robust IsInf/IsNaN detection and enable these
# tests.
@unittest.skip("test case fails in fast-math mode")
def testIsInfAndIsNan(self):
for dtype in self.float_types:
self._assertOpOutputMatchesExpected(
math_ops.is_inf,
np.array(
[[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[1, 0, 0, 0, 0, 0, 0, 1, 0]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.is_nan,
np.array(
[[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.sign,
np.array([[np.nan]], dtype=dtype),
expected=np.array([[0.0]], dtype=dtype))
def testLogicalOps(self):
self._assertOpOutputMatchesExpected(
math_ops.logical_not,
np.array([[True, False], [False, True]], dtype=np.bool),
expected=np.array([[False, True], [True, False]], dtype=np.bool))
def testBiasAddGrad(self):
self._assertOpOutputMatchesExpected(
gen_nn_ops.bias_add_grad,
np.array([[1., 2.], [3., 4.]], dtype=np.float32),
expected=np.array([4., 6.], dtype=np.float32))
self._assertOpOutputMatchesExpected(
lambda x: gen_nn_ops.bias_add_grad(x, data_format="NCHW"),
np.array(
[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]], dtype=np.float32),
expected=np.array([14., 22.], dtype=np.float32))
def testCast(self):
shapes = [[], [4], [2, 3], [2, 0, 4]]
types = (
set([dtypes.bool, dtypes.int32, dtypes.float32])
| self.complex_tf_types)
for shape in shapes:
for src_type in types:
for dst_type in types:
src = np.arange(np.prod(shape)).astype(src_type.as_numpy_dtype)
if src_type in self.complex_tf_types:
src += (np.arange(np.prod(shape)) * 2j).astype(
src_type.as_numpy_dtype)
src = src.reshape(shape)
dst = src.astype(dst_type.as_numpy_dtype)
self._assertOpOutputMatchesExpected(
lambda x, dst_type=dst_type: math_ops.cast(x, dst_type),
src,
expected=dst)
def testBitcast(self):
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.int32),
np.array([1, 0x3f800000], np.int32),
expected=np.array([1, 0x3f800000], np.int32))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.float32),
np.array([1, 0x3f800000], np.int32),
expected=np.array([1e-45, 1.0], np.float32))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.int32),
np.array([1e-45, 1.0], np.float32),
expected=np.array([1, 0x3f800000], np.int32))
if np.int64 in self.numeric_types:
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.int64),
np.array([1, 0x100000003f800000], np.uint64),
expected=np.array([1, 0x100000003f800000], np.int64))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.uint64),
np.array([1, 0x100000003f800000], np.int64),
expected=np.array([1, 0x100000003f800000], np.uint64))
def testInvertPermutation(self):
self._assertOpOutputMatchesExpected(
array_ops.invert_permutation,
np.array([1, 2, 0], np.int32),
expected=np.array([2, 0, 1], dtype=np.int32))
def testInvertPermutationTwiceIsNoop(self):
self._assertOpOutputMatchesExpected(
lambda x: array_ops.invert_permutation(array_ops.invert_permutation(x)),
np.array([1, 2, 0], np.int32),
expected=np.array([1, 2, 0], dtype=np.int32))
def testRank(self):
rank_op = lambda x: array_ops.rank_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
rank_op, dtype(7), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
rank_op, np.array([[], []], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op, np.array([-1, 1], dtype=dtype), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
rank_op, np.array([[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(2))
def testShape(self):
shape_op = lambda x: array_ops.shape_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
shape_op, dtype(7), expected=np.array([], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[], []], dtype=dtype),
expected=np.array([2, 0], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([-1, 1], dtype=dtype),
expected=np.array([2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([1, 2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.array([3, 1], dtype=np.int32))
def testSize(self):
size_op = lambda x: array_ops.size_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
size_op, dtype(7), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
size_op, np.array([[], []], dtype=dtype), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
size_op, np.array([-1, 1], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op, np.array([[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(3))
def testSizeWithInt64OutType(self):
def size_op(x):
return array_ops.size_internal(x, optimize=False, out_type=np.int64)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
size_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int64(3))
def testUnpack(self):
self._assertOpOutputMatchesExpected(
array_ops.unstack,
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 2.], dtype=np.float32),
np.array([3., 4.], dtype=np.float32),
np.array([5., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
self._assertOpOutputMatchesExpected(
lambda x: array_ops.unstack(x, axis=1),
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 3., 5.], dtype=np.float32),
np.array([2., 4., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
def testDepthToSpace(self):
def make_op(data_format):
def op(x):
return array_ops.depth_to_space(
x, block_size=2, data_format=data_format)
return op
for dtype in self.numeric_types:
for data_format in ["NCHW", "NHWC"]:
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array([[[[1, 2, 3, 4]]]], dtype=dtype), data_format),
expected=nhwc_to_format(
np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]], dtype=dtype),
data_format),
expected=nhwc_to_format(
np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]],
dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]],
dtype=dtype), data_format),
expected=nhwc_to_format(
np.array(
[[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]],
dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op("NCHW_VECT_C"),
np.arange(32, dtype=dtype).reshape((1, 8, 1, 1, 4)),
expected=np.array([[[[[0, 1], [8, 9]], [[16, 17], [24, 25]]],
[[[2, 3], [10, 11]], [[18, 19], [26, 27]]],
[[[4, 5], [12, 13]], [[20, 21], [28, 29]]],
[[[6, 7], [14, 15]], [[22, 23], [30, 31]]]]],
dtype=dtype))
def testSpaceToDepth(self):
def make_op(data_format):
def op(x):
return array_ops.space_to_depth(
x, block_size=2, data_format=data_format)
return op
for dtype in self.numeric_types:
for data_format in ["NCHW", "NHWC"]:
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype), data_format),
expected=nhwc_to_format(
np.array([[[[1, 2, 3, 4]]]], dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]],
dtype=dtype), data_format),
expected=nhwc_to_format(
np.array(
[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]], dtype=dtype),
data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]],
dtype=dtype), data_format),
expected=nhwc_to_format(
np.array(
[[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]],
dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op("NCHW_VECT_C"),
np.arange(32, dtype=dtype).reshape((1, 2, 2, 2, 4)),
expected=np.array([[[[[0, 1, 2, 3, 16, 17, 18, 19]]],
[[[4, 5, 6, 7, 20, 21, 22, 23]]],
[[[8, 9, 10, 11, 24, 25, 26, 27]]],
[[[12, 13, 14, 15, 28, 29, 30, 31]]]]],
dtype=dtype))
def _assertSoftplusMatchesExpected(self, features, dtype):
features = np.array(features, dtype=dtype)
zero = np.asarray(0).astype(dtype)
expected = np.logaddexp(zero, features).astype(dtype)
self._assertOpOutputMatchesExpected(
nn_ops.softplus, features, expected=expected, rtol=1e-6, atol=9.1e-6)
def testSoftplus(self):
for dtype in self.float_types:
self._assertSoftplusMatchesExpected([[-2, 0, 8]], dtype)
self._assertSoftplusMatchesExpected(
[[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]], dtype)
if dtype == dtypes.bfloat16.as_numpy_dtype:
log_eps = np.log(np.finfo(np.float32).eps)
else:
log_eps = np.log(np.finfo(dtype).eps)
one = dtype(1)
ten = dtype(10)
self._assertSoftplusMatchesExpected([
log_eps, log_eps - one, log_eps + one, log_eps - ten, log_eps + ten,
-log_eps, -log_eps - one, -log_eps + one, -log_eps - ten,
-log_eps + ten
], dtype)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/unary_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA Gather Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
class GatherTest(xla_test.XLATestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, adds an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testScalar1D(self):
with self.session() as session, self.test_scope():
data = np.array([0, 1, 2, 3, 7, 5])
for dtype in self.all_tf_types:
for indices in 4, [4], [1, 2, 2, 4, 5]:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = session.run(gather_t, feed_dict={params: params_np})
np_val = constant_op.constant(params_np[indices])
self.assertAllEqual(np_val, gather_val)
def testScalar2D(self):
with self.session() as session, self.test_scope():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
for dtype in self.all_tf_types:
for axis in 0, 1, -1:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices = constant_op.constant(2)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = session.run(gather_t, feed_dict={params: params_np})
expected = constant_op.constant(
np.take(params_np, 2, axis=axis), dtype)
self.assertAllEqual(expected, gather_val)
def testSimpleTwoD32(self):
with self.session() as session, self.test_scope():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
for dtype in self.all_tf_types:
for axis in 0, 1, -1:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
# The indices must be in bounds for any axis.
indices = constant_op.constant([0, 1, 0, 2])
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = session.run(gather_t, feed_dict={params: params_np})
expected = constant_op.constant(
np.take(params_np, [0, 1, 0, 2], axis=axis), dtype)
self.assertAllEqual(expected, gather_val)
def testSimpleTwoD32_Int64Indices(self):
if np.int64 not in self.int_types:
return
with self.session() as session, self.test_scope():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
# The indices must be in bounds for any axis.
indices_np = np.array([0, 1, 0, 2])
for dtype in self.all_tf_types:
for axis in 0, 1, -1:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices = array_ops.placeholder(dtype=dtypes.int64)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = session.run(
gather_t, feed_dict={
params: params_np,
indices: indices_np
})
expected = constant_op.constant(
np.take(params_np, [0, 1, 0, 2], axis=axis), dtype)
self.assertAllEqual(expected, gather_val)
def testHigherRank(self):
"""Check that scalar and empty indices shapes work as well."""
shape = (2, 1, 3, 2)
for indices_shape in (), (0,), (2, 0), (2, 3):
for dtype in self.all_tf_types:
for axis in 0, 1, 2, 3, -1, -2:
params = self._buildParams(np.random.randn(*shape), dtype)
indices = np.random.randint(shape[axis], size=indices_shape)
with self.session() as sess, self.test_scope():
tf_params = array_ops.placeholder(dtype=dtype)
tf_indices = constant_op.constant(indices, dtype=dtypes.int32)
gather = array_ops.gather(tf_params, tf_indices, axis=axis)
gather_value = sess.run(gather, feed_dict={tf_params: params})
gather_np = constant_op.constant(
np.take(params, indices, axis=axis), dtype)
self.assertAllEqual(gather_np, gather_value)
def testIndicesWithDifferentDimensions(self):
with self.session():
for dtype in self.numeric_tf_types:
params = array_ops.placeholder(dtype=dtype)
indices = array_ops.placeholder(dtype=np.int32)
with self.test_scope():
gather = array_ops.gather(params, indices)
self.assertAllEqual(
7, gather.eval(feed_dict={params: [4, 7, 2], indices: 1}))
self.assertAllEqual(
[7], gather.eval(feed_dict={params: [4, 7, 2], indices: [1]}))
self.assertAllEqual(
[[7]], gather.eval(feed_dict={params: [4, 7, 2], indices: [[1]]}))
def testGatherPrecision(self):
with self.session() as session, self.test_scope():
data = np.array([[0, 0, 0, 0], [0, 2 * (1 + np.exp2(-8)), 0, 0],
[0, 0, 0, 0], [0.015789, 0.0985, 0.55789, 0.3842]])
indices = np.array([1, 2, 3, 1])
dtype = dtypes.float32
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = session.run(gather_t, feed_dict={params: params_np})
np_val = params_np[indices]
self.assertAllEqual(np_val, gather_val)
class GatherBenchmark(test.Benchmark):
"""Microbenchmarks for the gather op."""
def _benchmarkGather(self, name, axis, gather_indices, use_xla_jit):
def BuilderFn():
inputs = variables.Variable(
array_ops.zeros([100, 100, 10, 100, 50], dtype=dtypes.float32),
dtype=dtypes.float32,
name='input')
indices = variables.Variable(
gather_indices, dtype=dtypes.int32, name='indices')
gather_t = array_ops.gather(inputs, indices, axis=axis)
return '%s.axis%d' % (name, axis), [gather_t]
xla_test.Benchmark(self, BuilderFn, use_xla_jit=use_xla_jit, device='cpu')
def _benchmarkSliceGather(self, axis, use_xla_jit):
"""Benchmarks a gather op that's really a dynamic slice."""
self._benchmarkGather('slice_gather', axis, [1], use_xla_jit)
def _benchmarkNontrivialGather(self, axis, use_xla_jit):
self._benchmarkGather('nontrivial_gather', axis, [9, 1, 0, 2] * 4,
use_xla_jit)
def benchmarkSliceGatherAxis0(self):
self._benchmarkSliceGather(axis=0, use_xla_jit=False)
def benchmarkSliceGatherAxis0XLA(self):
self._benchmarkSliceGather(axis=0, use_xla_jit=True)
def benchmarkSliceGatherAxis1(self):
self._benchmarkSliceGather(axis=1, use_xla_jit=False)
def benchmarkSliceGatherAxis1XLA(self):
self._benchmarkSliceGather(axis=1, use_xla_jit=True)
def benchmarkSliceGatherAxis4(self):
self._benchmarkSliceGather(axis=4, use_xla_jit=False)
def benchmarkSliceGatherAxis4XLA(self):
self._benchmarkSliceGather(axis=4, use_xla_jit=True)
def benchmarkNontrivialGatherAxis0(self):
self._benchmarkNontrivialGather(axis=0, use_xla_jit=False)
def benchmarkNontrivialGatherAxis0XLA(self):
self._benchmarkNontrivialGather(axis=0, use_xla_jit=True)
def benchmarkNontrivialGatherAxis1(self):
self._benchmarkNontrivialGather(axis=1, use_xla_jit=False)
def benchmarkNontrivialGatherAxis1XLA(self):
self._benchmarkNontrivialGather(axis=1, use_xla_jit=True)
def benchmarkNontrivialGatherAxis4(self):
self._benchmarkNontrivialGather(axis=4, use_xla_jit=False)
def benchmarkNontrivialGatherAxis4XLA(self):
self._benchmarkNontrivialGather(axis=4, use_xla_jit=True)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/gather_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for eager execution using XLA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import convolutional
from tensorflow.python.layers import pooling
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import googletest
from tensorflow.python.training import adam
class EagerTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
def testGradientTape(self):
with self.test_scope():
x = constant_op.constant(1.0)
y = constant_op.constant(10.0)
with backprop.GradientTape(persistent=True) as tape:
tape.watch(x)
tape.watch(y)
a = x + y + x * y
da_dx = tape.gradient(a, x)
da_dy = tape.gradient(a, y)
self.assertEqual(11.0, da_dx.numpy())
self.assertEqual(2.0, da_dy.numpy())
def testExecuteListOutputLen0(self):
with self.test_scope():
empty = constant_op.constant([], dtype=dtypes.float32)
result = array_ops.unstack(empty, 0)
self.assertTrue(isinstance(result, list))
self.assertEqual(0, len(result))
def testExecuteListOutputLen1(self):
with self.test_scope():
split_dim = constant_op.constant(1)
value = constant_op.constant([[0., 1., 2.], [3., 4., 5.]])
result = array_ops.split(value, 1, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(1, len(result))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0])
def testExecuteListOutputLen3(self):
with self.test_scope():
split_dim = constant_op.constant(1)
value = constant_op.constant([[0., 1., 2.], [3., 4., 5.]])
result = array_ops.split(value, 3, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(3, len(result))
self.assertAllEqual([[0], [3]], result[0])
self.assertAllEqual([[1], [4]], result[1])
self.assertAllEqual([[2], [5]], result[2])
def testBasicGraph(self):
# Run some ops eagerly
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
# Run some ops graphly
with context.graph_mode(), self.session():
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, self.evaluate(product))
def testDegenerateSlices(self):
with self.test_scope():
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
# degenerate by offering a forward interval with a negative stride
self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :])
# degenerate with a reverse interval with a positive stride
self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :])
# empty interval in every dimension
self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1])
def testIdentity(self):
with self.test_scope():
self.assertAllEqual(2, array_ops.identity(2))
def testRandomOps(self):
with self.test_scope():
tensor = gen_random_ops.random_uniform((2, 2), dtypes.float32)
row0 = tensor[0].numpy()
row1 = tensor[1].numpy()
# It should be very unlikely to rng to generate two equal rows.
self.assertFalse((row0 == row1).all())
def testIdentityOnVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(True)
i = array_ops.identity(v)
self.assertAllEqual(True, i.numpy())
def testAssignAddVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
v.assign_add(2.0)
self.assertEqual(3.0, v.numpy())
def testReadAssignRead(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
val1 = v.read_value()
v.assign_add(2.0)
val2 = v.read_value()
self.assertEqual(1.0, val1.numpy())
self.assertEqual(3.0, val2.numpy())
def testGradient(self):
def f(x):
return x
with self.test_scope():
grad_fn = backprop.gradients_function(f)
self.assertAllEqual(2., grad_fn(1., dy=2.)[0])
def testVariableGradient(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(1.0)
def f():
x = v0 * v0
return x
grads = backprop.implicit_grad(f)()
self.assertEqual(2., grads[0][0].numpy())
def testMultipleVariableReads(self):
# This test makes sure consecutive variable reads don't copy
# the underlying memory.
with self.test_scope():
# Create 128MiB variables
var = resource_variable_ops.ResourceVariable(
array_ops.ones([32, 1024, 1024]))
# Read the same variable 100 times. If the underlying tensor
# is not copied, this is a trivial operation. If it is copied,
# this will eat over 13GB and OOM.
values = []
for _ in range(100):
values.append(var.value())
# The shape, shape_n, size, and rank are tested here because their
# execution kernels (as opposed to compilation only tf2xla kernels)
# are distincts from tf2xla kernels.
def testShape(self):
def const(value):
return array_ops.shape(
constant_op.constant(value)).numpy()
def ones(value):
return array_ops.shape(
array_ops.ones(value)).numpy()
with self.test_scope():
# Shapes of directly constructed tensors
self.assertAllEqual([], const(3))
self.assertAllEqual([3], const([1.0, 2.0, 3.0]))
self.assertAllEqual([2, 2], const([[1.0, 2.0], [3.0, 4.0]]))
self.assertAllEqual([2, 1, 2], const([[[1.0, 2.0]], [[3.0, 4.0]]]))
# Shapes of tensors created by op running on device
# We make this distinction because directly constructed tensors
# are treated differently in a few places that can influence shape:
# - they always have on_host_tensor
# - they and their shapes can be cached
# - they end up on device via a copy, instead of as program output
self.assertAllEqual([], ones([]))
self.assertAllEqual([3], ones([3]))
self.assertAllEqual([2, 2], ones([2, 2]))
self.assertAllEqual([2, 1, 2], ones([2, 1, 2]))
def testShapeN(self):
with self.test_scope():
# Shapes of directly constructed tensors
shapes = array_ops.shape_n([
constant_op.constant(1.0),
constant_op.constant([1.0, 2.0, 3.0]),
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])])
self.assertAllEqual(
[[], [3], [2, 2]],
[x.numpy().tolist() for x in shapes])
# Shapes of tensors created by op running on device
shapes = array_ops.shape_n([
array_ops.ones([]),
array_ops.ones([3]),
array_ops.ones([2, 2])])
self.assertAllEqual(
[[], [3], [2, 2]],
[x.numpy().tolist() for x in shapes])
def testSize(self):
with self.test_scope():
self.assertEqual(
1, array_ops.size(constant_op.constant(1.0)).numpy())
self.assertEqual(
3, array_ops.size(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertEqual(
4, array_ops.size(
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])).numpy())
def testRank(self):
with self.test_scope():
self.assertEqual(
0, array_ops.rank(constant_op.constant(1.0)).numpy())
self.assertEqual(
1, array_ops.rank(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertEqual(
2, array_ops.rank(
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])).numpy())
def testAdam(self):
with self.test_scope():
optimizer = adam.AdamOptimizer(0.1)
x = resource_variable_ops.ResourceVariable(10.0)
with backprop.GradientTape() as tape:
y = x * x
dy_dx = tape.gradient(y, x)
optimizer.apply_gradients([(dy_dx, x)])
self.assertAlmostEqual(9.9, x.numpy(), places=3)
def testAdamSparse(self):
with ops.device('/cpu:0'):
# Create 2-D embedding for 3 objects on CPU because sparse/sliced updates
# are not implemented on TPU.
embedding_matrix = resource_variable_ops.ResourceVariable(
array_ops.ones([3, 2]))
with self.test_scope():
with backprop.GradientTape() as tape:
embedding = embedding_ops.embedding_lookup(embedding_matrix, [1])
y = math_ops.reduce_sum(embedding)
dy_dx = tape.gradient(y, embedding_matrix)
self.assertIsInstance(dy_dx, ops.IndexedSlices)
optimizer = adam.AdamOptimizer(0.1)
# The gradient application operations will run on CPU because optimizer
# updates are always collocated with the variable.
optimizer.apply_gradients([(dy_dx, embedding_matrix)])
# This assign_add will run on CPU because when an input to an
# operation is a resource, this operation is placed on the resource's
# device by the eager runtime.
embedding_matrix.assign_add(array_ops.ones([3, 2]))
self.assertAllClose([[2.0, 2.0],
[1.9, 1.9],
[2.0, 2.0]], embedding_matrix.numpy())
class EagerFunctionTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_scope():
matmul = function.defun(math_ops.matmul)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t, transpose_a=True)
self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])
def testConv(self):
if 'GPU' in self.device:
# TODO(b/32333178)
self.skipTest('Current implementation of RandomStandardNormal kernel '
'is very slow on GPU, and has been blacklisted.')
with self.test_scope():
data_format = 'channels_last'
conv = convolutional.Conv2D(
filters=1, kernel_size=2, padding='VALID',
data_format=data_format, activation=nn_ops.relu,
kernel_initializer=init_ops.ones_initializer(),
bias_initializer=init_ops.zeros_initializer())
pool = pooling.MaxPooling2D(2, 2, data_format=data_format)
def model(x):
x = conv(x)
return pool(x)
model = function.defun(model)
x = array_ops.ones([1, 4, 4, 1])
y = model(x)
self.assertAllEqual(y.numpy(), [[[[4.]]]])
def testReadVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return v.read_value()
var = f()
self.assertEqual(1.0, var.numpy())
def testResourceVariableNoInlineReadWrite(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
w = resource_variable_ops.ResourceVariable(0.0)
@function.defun_with_attributes(attributes={'_noinline': True})
def g(x):
w.assign(w.read_value() + x)
return v.read_value() + x * w.read_value()
@function.defun_with_attributes(attributes={'_noinline': True})
def f():
return g(1.0) + g(2.0) + g(3.0) + g(4.0) + g(5.0)
# 1 + 1*1 + 1 + 2*3 + 1 + 3*6 + 1 + 4*10 + 1 + 5*15
self.assertEqual(145.0, f().numpy())
self.assertEqual(15.0, w.read_value().numpy())
def testResourceVariableNoInlineReadOnly(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(10.0)
@function.defun_with_attributes(attributes={'_noinline': True})
def g():
return v.read_value()
@function.defun_with_attributes(attributes={'_noinline': True})
def f():
return g() + g() + g() + g() + g()
self.assertEqual(50.0, f().numpy())
def testResourceVariableNoInlineWriteOnly(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(0.0)
@function.defun_with_attributes(attributes={'_noinline': True})
def g(x):
v.assign(x)
@function.defun_with_attributes(attributes={'_noinline': True})
def f():
g(1.0)
g(2.0)
g(3.0)
g(4.0)
g(5.0)
f()
self.assertEqual(5.0, v.read_value().numpy())
def testUpdateVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
def f(v):
v.assign_add(1.0)
return v
f = function.defun(f)
var = f(v)
self.assertEqual(2.0, var.numpy())
def testReturnResourceHandle(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable([[1.0, 2.0], [3.0, 4.0]])
def f(v):
return v.handle
f = function.defun(f)
handle = f(v)
self.assertAllEqual(v.numpy(),
resource_variable_ops.read_variable_op(
handle, dtypes.float32).numpy())
def testReturnMultipleResourceHandles(self):
with self.test_scope():
v1 = resource_variable_ops.ResourceVariable(1.25)
v2 = resource_variable_ops.ResourceVariable(2.0)
def f(v):
return v.handle, 3.0 * v, v2.handle, v + v2
f = function.defun(f)
v1_handle, v1_times_3, v2_handle, variable_sum = f(v1)
self.assertAllEqual(v1.numpy(),
resource_variable_ops.read_variable_op(
v1_handle, dtypes.float32).numpy())
self.assertEqual(3.75, v1_times_3.numpy())
self.assertAllEqual(v2.numpy(),
resource_variable_ops.read_variable_op(
v2_handle, dtypes.float32).numpy())
self.assertEqual(3.25, variable_sum.numpy())
def testAllArgumentKinds(self):
"""Test a complex function that takes different argument kinds.
tf2xla machinery that translates, compiles, and runs defuns
classifies arguments into: compile-time constants, regular tensors,
and resources. This test creates a function with a mix of all these
kinds. Moreover, the order of function arguments is intentionally mixed up.
This also tests the case when the same argument is a compile-time constant
as well as used in an operation that normally expects its inputs to be
in device memory - addition in this case.
"""
with self.test_scope():
def foo(c1, r1, v1, c2, v2, r2):
# c1 and c2 are compile-time constants
# r1 and r2 are regular tensors
# v1 and v2 are resource variables
a = c1 + r1
b = math_ops.cast(c2, dtypes.float32) + v2
c = array_ops.slice(v1, c1, c2)
d = r2 * v2
return a, b, c, d
foo = function.defun(foo)
c1 = [0, 0]
c2 = array_ops.ones([2], dtype=dtypes.int32)
r1 = array_ops.ones([2])
r2 = [[2., 2.], [3., 3.]]
v1 = resource_variable_ops.ResourceVariable([[1., 2.], [3., 4.]])
v2 = resource_variable_ops.ResourceVariable([[10., 20.], [30., 40.]])
a, b, c, d = foo(c1, r1, v1, c2, v2, r2)
self.assertAllEqual([1, 1], a.numpy())
self.assertAllEqual([[11., 21.], [31., 41.]], b.numpy())
self.assertAllEqual([[1.]], c.numpy())
self.assertAllEqual([[20., 40.], [90., 120.]], d.numpy())
def testDefunInGradientTape(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def f(x):
x = v0 * v0 * x
return x
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = f(x)
dy = tape.gradient(y, v0)
self.assertEqual(75, y.numpy())
self.assertEqual(30, dy.numpy())
def testGradientTapeInDefun(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def f():
x = constant_op.constant(1.0)
with backprop.GradientTape() as tape:
y = v0 * x
dy = tape.gradient(y, v0)
return dy
dy = f()
self.assertEqual(1.0, dy.numpy())
def testSliceInDefun(self):
with self.test_scope():
@function.defun
def f(x, y):
return x[0::2, y:, ...]
x = array_ops.ones([2, 3, 4], dtype=dtypes.float32)
y = array_ops.ones([], dtype=dtypes.int32)
with backprop.GradientTape() as tape:
tape.watch(x)
tape.watch(y)
z = f(x, y)
dz = tape.gradient(z, x)
self.assertAllEqual(np.ones([1, 2, 4]), z.numpy())
self.assertAllEqual((2, 3, 4), dz.shape.as_list())
def testNestedDefun(self):
with self.test_scope():
@function.defun
def times_two(x):
return 2. * x
@function.defun
def two_x_plus_1(x):
return times_two(x) + 1.
x = constant_op.constant([2., 3., 4.])
y = two_x_plus_1(x)
self.assertAllEqual([5., 7., 9.], y.numpy())
def testNestedDefunWithVariable(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def g(x):
x = v0 * x
return x
@function.defun
def f(x):
x = g(v0 * x)
return x
x = constant_op.constant(3.0)
y = f(x)
self.assertEqual(75.0, y.numpy())
def testNestedDefunInGradientTape(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def g(x):
x = v0 * x
return x
@function.defun
def f(x):
x = g(v0 * x)
return x
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = f(x)
dy = tape.gradient(y, v0)
self.assertEqual(75, y.numpy())
self.assertEqual(30, dy.numpy())
def testNestedDefunInGradientTapeDifferentVars(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
v1 = resource_variable_ops.ResourceVariable(3.0)
@function.defun
def g(x):
x = v1 * x
return x
@function.defun
def f(x):
x = g(v0 * x)
return x
x = constant_op.constant(3.0)
with backprop.GradientTape(persistent=True) as tape:
y = f(x)
dy_v0 = tape.gradient(y, v0)
dy_v1 = tape.gradient(y, v1)
self.assertEqual(45, y.numpy())
self.assertEqual(9, dy_v0.numpy())
self.assertEqual(15, dy_v1.numpy())
def testWhileInDefun(self):
with self.test_scope():
@def_function.function
def f(start):
c = lambda x: math_ops.less(x, 13.0)
b = lambda x: math_ops.add(x, 1.0)
return control_flow_ops.while_loop(c, b, [start])
y = f(constant_op.constant(3.0))
self.assertEqual(13.0, y.numpy())
def testAutoGraphWhileInDefun(self):
with self.test_scope():
@def_function.function
def f(start):
x = start
while x < 13.0:
x += 1.0
return x
y = f(constant_op.constant(3.0))
self.assertEqual(13.0, y.numpy())
def testCondInDefun(self):
with self.test_scope():
@def_function.function
def f(pred, value):
fn1 = lambda: math_ops.add(value, 1.0)
fn2 = lambda: math_ops.subtract(value, 1.0)
return control_flow_ops.cond(pred, fn1, fn2)
plus_one = f(constant_op.constant(True), constant_op.constant(10.0))
minus_one = f(constant_op.constant(False), constant_op.constant(10.0))
self.assertEqual(11.0, plus_one.numpy())
self.assertEqual(9.0, minus_one.numpy())
def testAutoGraphCondInDefun(self):
with self.test_scope():
@def_function.function
def f(pred, value):
if pred:
return value + 1.0
else:
return value - 1.0
plus_one = f(constant_op.constant(True), constant_op.constant(10.0))
minus_one = f(constant_op.constant(False), constant_op.constant(10.0))
self.assertEqual(11.0, plus_one.numpy())
self.assertEqual(9.0, minus_one.numpy())
def testScanInDefun(self):
with self.test_scope():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name='data')
v = constant_op.constant(2.0, name='v')
@def_function.function
def f(y):
# pylint: disable=unnecessary-lambda
return functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), y, initializer=v)
# pylint: enable=unnecessary-lambda
r = f(elems)
self.assertAllEqual([2., 4., 12., 48., 240., 1440.], self.evaluate(r))
def testFeedDeviceMemoryToOpExpectingHostMemory(self):
@function.defun
def f(dims, value):
return array_ops.fill(dims, value)
with self.test_scope():
x = constant_op.constant([4], dtype=dtypes.int64)
y = f(x, 3)
self.assertAllEqual([3, 3, 3, 3], y)
def testRequestNotToCompile(self):
with self.test_scope():
def f(x):
with ops.device('device:CPU:0'):
y = 2.0 * x
return x, y
wholly_compiled_f = def_function.function(f)
op_by_op_f = function.defun_with_attributes(
f, attributes={'_XlaCompile': False})
x = constant_op.constant([0.0, 2.0], name='data')
# When function is wholly compiled, all outputs will be on the
# device on which it is run.
r_x, r_y = wholly_compiled_f(x)
self.assertAllEqual([0.0, 2.0], r_x)
self.assertAllEqual([0.0, 4.0], r_y)
if context.executing_eagerly():
# backing_device is only available for eager tensors.
self.assertRegexpMatches(r_x.backing_device, self.device)
self.assertRegexpMatches(r_y.backing_device, self.device)
# When function is executed op-by-op, requested devices will be
# respected.
r_x, r_y = op_by_op_f(x)
self.assertAllEqual([0.0, 2.0], r_x)
self.assertAllEqual([0.0, 4.0], r_y)
if context.executing_eagerly():
# backing_device is only available for eager tensors.
self.assertRegexpMatches(r_x.backing_device, self.device)
self.assertRegexpMatches(r_y.backing_device, 'device:CPU:0')
class ExcessivePaddingTest(xla_test.XLATestCase):
"""Test that eager execution works with TPU flattened tensors.
Tensors that would normally be excessively padded when written
to TPU memory are reshaped to 1-D flat tensors.
This test case verifies that such tensors work with eager execution.
The flattening currently only happens on TPU, but tests should work
fine with all backends as flattening is transparent.
"""
def testFromConstant(self):
with self.test_scope():
# Create constant of shape [100, 2, 1]. This tensor would be
# excessively padded on TPU.
tensor = constant_op.constant(100 * [[[10.0], [2.0]]])
# Use reduce_sum since it requires correctly working with
# a particular dimension.
reduced = math_ops.reduce_sum(tensor, axis=1)
self.assertAllEqual(100 * [[12.0]], reduced)
def testFromOperation(self):
with self.test_scope():
tensor = array_ops.ones([3, 100, 2, 2])
reduced = math_ops.reduce_sum(tensor, axis=[0, 2, 3])
self.assertAllEqual(100 * [12.0], reduced)
def testAsFunctionInput(self):
with self.test_scope():
@function.defun
def f(x):
return math_ops.reduce_sum(x, axis=2)
tensor = constant_op.constant(100 * [[[10.0, 2.0]]])
reduced = f(tensor)
self.assertAllEqual(100 * [[12.0]], reduced)
def testAsFunctionOutput(self):
with self.test_scope():
@function.defun
def f(x):
return x * constant_op.constant(100 * [[[10.0, 2.0]]])
y = f(3)
reduced = math_ops.reduce_sum(y, axis=2)
self.assertAllEqual(100 * [[36.0]], reduced)
def multiple_tpus():
devices = context.context().devices()
return len([d for d in devices if 'device:TPU:' in d]) > 1
class MultiDeviceTest(xla_test.XLATestCase):
"""Test running TPU computation on more than one core."""
def testBasic(self):
if not multiple_tpus():
self.skipTest('MultiDeviceTest requires multiple TPU devices.')
# Compute 10 on TPU core 0
with ops.device('device:TPU:0'):
two = constant_op.constant(2)
five = constant_op.constant(5)
ten = two * five
self.assertAllEqual(10, ten)
# Compute 6 on TPU core 1
with ops.device('device:TPU:1'):
two = constant_op.constant(2)
three = constant_op.constant(3)
six = two * three
self.assertAllEqual(6, six)
# Copy 10 and 6 to CPU and sum them
self.assertAllEqual(16, ten + six)
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(log_device_placement=True))
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/eager_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
def NHWCToNCHW(input_tensor):
"""Convert the input from NHWC format to NCHW.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 3, 1, 2])
else:
return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
def NCHWToNHWC(input_tensor):
"""Convert the input from NCHW format to NHWC.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 2, 3, 1])
else:
return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs
"""
test_configs = ["NHWC", "NCHW"]
return test_configs
class PoolingTest(xla_test.XLATestCase):
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
data_format, expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, currently only co.MaxPool.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
"""
total_size = np.prod(input_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.array([f * 1.0 for f in range(1, total_size + 1)], dtype=np.float32)
x = x.reshape(input_sizes)
with self.session() as sess:
with self.test_scope():
inputs = array_ops.placeholder(dtypes.float32)
t = inputs
if data_format == "NCHW":
t = NHWCToNCHW(t)
ksize = NHWCToNCHW(ksize)
strides = NHWCToNCHW(strides)
t = pool_func(t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
t = NCHWToNHWC(t)
actual = sess.run(t, {inputs: x})
self.assertAllClose(expected, actual.flatten(), rtol=1e-5, atol=1e-6)
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,
data_format, expected)
def testMaxPoolValidPadding(self):
expected_output = [13.0, 14.0, 15.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testMaxPoolSamePadding(self):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingNonSquareWindow(self):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0])
def testMaxPoolValidPaddingUnevenStride(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0])
def testMaxPoolSamePaddingFilter4(self):
expected_output = [
21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,
61.0, 62.0, 63.0, 64.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingFilter8(self):
expected_output = [
145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 161.0, 162.0,
163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 177.0, 178.0, 179.0, 180.0,
181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0,
191.0, 192.0, 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0, 305.0, 306.0,
307.0, 308.0, 309.0, 310.0, 311.0, 312.0, 313.0, 314.0, 315.0, 316.0,
317.0, 318.0, 319.0, 320.0, 401.0, 402.0, 403.0, 404.0, 405.0, 406.0,
407.0, 408.0, 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0, 441.0, 442.0,
443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 465.0, 466.0, 467.0, 468.0,
469.0, 470.0, 471.0, 472.0, 481.0, 482.0, 483.0, 484.0, 485.0, 486.0,
487.0, 488.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
# Tests for DepthwiseMaxPooling on CPU only.
def testDepthwiseMaxPool1x1DepthWindow1(self):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0])
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0])
def testKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33])
def testKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11])
# Average pooling
def testAvgPoolValidPadding(self):
expected_output = [7, 8, 9]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testAvgPoolSamePadding(self):
expected_output = [7., 8., 9., 11.5, 12.5, 13.5]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
class PoolGradTest(xla_test.XLATestCase):
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
def _VerifyOneTest(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling gradient function.
Args:
pool_func: Forward pooling function
pool_grad_func: Pooling gradient function for pool_grad_func
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
pool_grad_grad_func: Second-order gradient function, if available.
"""
total_size = np.prod(input_sizes)
# TODO(b/73062247): MaxPoolGradGrad can confuse gradients when x is equally
# maximal at 16 bits. Switch to np.random.randn when resolved.
x = np.arange(1, total_size + 1, dtype=np.float32)
x *= (np.random.randint(2, size=total_size) * 2 - 1) # Flip signs randomly
# Verify some specifically interesting values...
x[np.random.choice(total_size)] = np.inf
x[np.random.choice(total_size)] = -np.inf
# TODO(b/74222344): Fix nan handling for max pool grad.
# x[np.random.choice(total_size)] = np.nan
x = x.reshape(input_sizes)
with self.session() as sess:
# Use the forward pool function to compute some corresponding outputs
# (needed for the CPU device, and we need the shape in both cases).
with ops.device(self.CPU_DEVICE):
inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
outputs = pool_func(
inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
output_vals = np.array(sess.run(outputs, {inputs: x}))
output_gradient_vals = np.arange(
1, output_vals.size + 1, dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)
output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)
# Use the Tensorflow CPU pooling gradient to compute the expected input
# gradients.
with ops.device(self.CPU_DEVICE):
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_vals.shape)
expected_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_input_gradient_vals = sess.run(
expected_input_gradients,
{inputs: x,
output_gradients: output_gradient_vals})
output_grad_gradients = array_ops.placeholder(
dtypes.float32, shape=expected_input_gradient_vals.shape)
if pool_grad_grad_func is not None:
expected_grad_gradients = pool_grad_grad_func(
inputs,
outputs,
output_grad_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_grad_gradients_vals = sess.run(expected_grad_gradients, {
inputs: x,
output_grad_gradients: output_grad_grad_vals
})
# Run the gradient op on the XLA device
with self.test_scope():
outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
xla_inputs = inputs
xla_outputs = outputs
xla_output_gradients = output_gradients
xla_output_grad_gradients = output_grad_gradients
xla_ksize = ksize
xla_strides = strides
if data_format == "NCHW":
xla_inputs = NHWCToNCHW(inputs)
xla_outputs = NHWCToNCHW(outputs)
xla_output_gradients = NHWCToNCHW(output_gradients)
xla_output_grad_gradients = NHWCToNCHW(output_grad_gradients)
xla_ksize = NHWCToNCHW(ksize)
xla_strides = NHWCToNCHW(strides)
actual_input_gradients = pool_grad_func(
xla_inputs,
xla_outputs,
xla_output_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_input_gradients = NCHWToNHWC(actual_input_gradients)
if pool_grad_grad_func is not None:
actual_grad_gradients = pool_grad_grad_func(
xla_inputs,
xla_outputs,
xla_output_grad_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_grad_gradients = NCHWToNHWC(actual_grad_gradients)
actual_input_gradients_vals = sess.run(actual_input_gradients, {
inputs: x,
outputs: output_vals,
output_gradients: output_gradient_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_input_gradient_vals,
actual_input_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_input_gradients_vals, inputs)
if pool_grad_grad_func is not None:
actual_grad_gradients_vals = sess.run(
actual_grad_gradients, {
inputs: x,
outputs: output_vals,
output_grad_gradients: output_grad_grad_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_grad_gradients_vals,
actual_grad_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_grad_gradients_vals, outputs)
def _VerifyValues(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling function.
Args:
pool_func: Pooling function to be called, e.g., tf.nn.max_pool2d
pool_grad_func: Corresponding pooling gradient function.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
pool_grad_grad_func: Second-order gradient function, if available.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=pool_grad_grad_func)
def _TestPooling(self, forward_op, backward_op, pool_grad_grad_func=None):
# VALID padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, non square window
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# VALID padding, uneven stride
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 4 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 8 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
def testMaxPool(self):
self._TestPooling(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
pool_grad_grad_func=gen_nn_ops.max_pool_grad_grad)
def testAvgPool(self):
# Wrapper around AvgPoolGrad that ignores extra arguments needed by
# MaxPoolGrad.
def AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding,
data_format):
del outputs # Unused by average-pooling gradients.
return gen_nn_ops.avg_pool_grad(
inputs.get_shape().as_list(),
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
self._TestPooling(nn_ops.avg_pool, AvgPoolGrad)
# The CPU implementation of AvgPoolGrad doesn't accept kernels smaller than
# the stride size, so we only run the following tests on MaxPoolGrad.
def testMaxPoolKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID")
def testMaxPoolKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/compiler/tests/pooling_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ExtractImagePatches op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ExtractImagePatches(xla_test.XLATestCase):
"""Functional tests for ExtractImagePatches op."""
def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):
"""Tests input-output pairs for the ExtractImagePatches op.
Args:
image: Input tensor with shape: [batch, in_rows, in_cols, depth].
ksizes: Patch size specified as: [ksize_rows, ksize_cols].
strides: Output strides, specified as [stride_rows, stride_cols].
rates: Atrous rates, specified as [rate_rows, rate_cols].
padding: Padding type.
patches: Expected output.
"""
ksizes = [1] + ksizes + [1]
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.session():
image_placeholder = array_ops.placeholder(dtypes.float32)
with self.test_scope():
out_tensor = array_ops.extract_image_patches(
image_placeholder,
ksizes=ksizes,
strides=strides,
rates=rates,
padding=padding,
name="im2col")
feed_dict = {image_placeholder: image}
self.assertAllClose(patches, out_tensor.eval(feed_dict=feed_dict))
def testKsize1x1Stride1x1Rate1x1(self):
"""Verifies that for 1x1 kernel the output equals the input."""
# [2, 3, 4, 5]
image = np.reshape(range(120), [2, 3, 4, 5])
# [2, 3, 4, 5]
patches = np.reshape(range(120), [2, 3, 4, 5])
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[1, 1],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize1x1Stride2x3Rate1x1(self):
"""Test for 1x1 kernel and strides."""
# [2, 4, 5, 3]
image = np.reshape(range(120), [2, 4, 5, 3])
# [2, 2, 2, 3]
patches = image[:, ::2, ::3, :]
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[2, 3],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize2x2Stride1x1Rate1x1Valid(self):
"""Test for 2x2 kernel with VALID padding."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 1, 1, 4]
patches = [[[[1, 2, 3, 4]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
patches=patches)
def testKsize2x2Stride1x1Rate1x1Same(self):
"""Test for 2x2 kernel with SAME padding."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 2, 2, 4]
patches = [[[[1, 2, 3, 4], [2, 0, 4, 0]], [[3, 4, 0, 0], [4, 0, 0, 0]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
patches=patches)
def testKsize2x2Stride1x1Rate2x2Valid(self):
"""Test for 2x2 kernel with 2x2 dilation."""
# [1, 2, 2, 1]
image = np.arange(16).reshape(1, 4, 4, 1).astype(np.float32)
# [1, 2, 2, 4]
patches = [[[[0, 2, 8, 10], [1, 3, 9, 11]],
[[4, 6, 12, 14], [5, 7, 13, 15]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[2, 2],
padding="VALID",
patches=patches)
def testKsize2x2Stride1x1Rate1x1ValidDepth2(self):
"""Test for 2x2 kernel with VALID padding."""
# [1, 2, 2, 2]
image = [[[[1, 5], [2, 6]], [[3, 7], [4, 8]]]]
# [1, 1, 1, 8]
patches = [[[[1, 5, 2, 6, 3, 7, 4, 8]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
patches=patches)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/extract_image_patches_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for while loops in XLA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class WhileTest(xla_test.XLATestCase):
def testSingletonLoopHandrolled(self):
# Define a function for the loop body
@function.Defun(dtypes.int32)
def loop_body(step):
step_out = step + constant_op.constant(1, dtype=dtypes.int32)
return step_out
# Define a function for the loop condition
@function.Defun(dtypes.int32)
def loop_cond(step):
return step < 10
with self.session() as sess:
init_index = array_ops.placeholder(dtypes.int32, [])
with self.test_scope():
loop_outputs = xla.while_loop([init_index], loop_cond, loop_body)
result = sess.run(loop_outputs, {init_index: 0})
self.assertAllClose(result, [10], rtol=1e-3)
def testCountingLoopHandrolled(self):
# Define a function for the loop body
@function.Defun(dtypes.int32, dtypes.float32)
def loop_body(step, rsum):
step_out = step + constant_op.constant(1, dtype=dtypes.int32)
sum_out = rsum + constant_op.constant(1.5, dtype=dtypes.float32)
return step_out, sum_out
# Define a function for the loop condition
@function.Defun(dtypes.int32, dtypes.float32)
def loop_cond(step, rsum):
del rsum
return step < 10
with self.session() as sess:
init_index = array_ops.placeholder(dtypes.int32, [])
init_sum = array_ops.placeholder(dtypes.float32, [])
with self.test_scope():
loop_outputs = xla.while_loop([init_index, init_sum], loop_cond,
loop_body)
result = sess.run(loop_outputs, {init_index: 0, init_sum: 0.0})
self.assertAllClose(result, [10, 15.0], rtol=1e-3)
no_iters_result = sess.run(loop_outputs, {init_index: 10, init_sum: 0.0})
self.assertAllClose(no_iters_result, [10, 0.0], rtol=1e-3)
def testCountingLoopHandrolledC64(self):
# Define a function for the loop body
@function.Defun(dtypes.int32, dtypes.complex64)
def loop_body(step, rsum):
step_out = step + constant_op.constant(1, dtype=dtypes.int32)
sum_out = rsum + constant_op.constant(1.5 + 2j, dtype=dtypes.complex64)
return step_out, sum_out
# Define a function for the loop condition
@function.Defun(dtypes.int32, dtypes.complex64)
def loop_cond(step, rsum):
del rsum
return step < 10
with self.session() as sess:
init_index = array_ops.placeholder(dtypes.int32, [])
init_sum = array_ops.placeholder(dtypes.complex64, [])
with self.test_scope():
loop_outputs = xla.while_loop([init_index, init_sum], loop_cond,
loop_body)
result = sess.run(loop_outputs, {init_index: 0, init_sum: 0.0})
self.assertAllClose(result[1], np.complex64(15 + 20j), rtol=1e-3)
no_iters_result = sess.run(loop_outputs, {init_index: 10, init_sum: 0.0})
self.assertAllClose(no_iters_result[1], np.complex64(0), rtol=1e-3)
def testLoopWithConstantOutput(self):
# Define a function for the loop body
@function.Defun(dtypes.int32, dtypes.int32)
def loop_body(step, x):
del x
step_out = step + constant_op.constant(1, dtype=dtypes.int32)
return (step_out, 7)
# Define a function for the loop condition
@function.Defun(dtypes.int32, dtypes.int32)
def loop_cond(step, x):
del x
return step < 10
with self.session() as sess:
init_index = array_ops.placeholder(dtypes.int32, [])
with self.test_scope():
loop_outputs = xla.while_loop([init_index, 42], loop_cond, loop_body)
result = sess.run(loop_outputs, {init_index: 0})
self.assertAllClose(result, [10, 7], rtol=1e-3)
def _testMaxItersSimple(self):
if is_compile_on_demand():
self.skipTest("list_ops are not supported in cpu_ondemand")
with self.session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def create_while_loop():
iterations = array_ops.size(p, name="iterations")
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, 1.0),
maximum_iterations=iterations,
name="outer")
return array_ops.identity(r[1])
output = create_while_loop()
output = gradients_impl.gradients(output, v)[0]
result = sess.run(output, feed_dict={p: [0, 0, 0]})
print(result)
xla_context.Exit()
def testMaxItersSimple(self):
self.skipTest("Fails with v1 control flow")
# This fails with old control.
# self._testMaxItersSimple()
@test_util.enable_control_flow_v2
def testMaxItersSimpleV2(self):
self._testMaxItersSimple()
def _testNestedWhileLoopWithMaxItersFromOuterContext(self):
if is_compile_on_demand():
self.skipTest("list_ops are not supported in cpu_ondemand")
with self.session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def mid_body_builder(iterations):
def mid_body(i, x):
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, x),
maximum_iterations=iterations,
name="inner")
return (i + 1, gradients_impl.gradients(x + r[1], v)[0])
return mid_body
def outer_body(i, x):
iterations = array_ops.size(p, name="iterations")
return (i + 1, x + control_flow_ops.while_loop(
lambda *_: True,
mid_body_builder(iterations), (0, x),
maximum_iterations=iterations,
name="mid")[1])
def create_while_loop():
r = control_flow_ops.while_loop(
lambda *_: True,
outer_body, (0, 1.0),
maximum_iterations=5,
name="outer")
return array_ops.identity(r[1])
# p:placeholder
# j = 0
# i, x = 0, 1.
# while j++ < 5:
# i1, x1 = 0, x
# while i1++ < len(p):
# i2, x2 = 0, x1
# while i2++ < len(p):
# x2 = v * x2
# x1 = grad(x1 + x2, v)
# x = x1
# output = x
output = create_while_loop()
sess.run(output, feed_dict={p: [0, 0, 0]})
xla_context.Exit()
def testNestedWhileLoopWithMaxItersFromOuterContext(self):
self._testNestedWhileLoopWithMaxItersFromOuterContext()
@test_util.enable_control_flow_v2
def testNestedWhileLoopWithMaxItersFromOuterContextV2(self):
self._testNestedWhileLoopWithMaxItersFromOuterContext()
@test_util.enable_control_flow_v2
def testMap(self):
if is_compile_on_demand():
self.skipTest("list_ops are not supported in cpu_ondemand")
with self.session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, name="data")
r = map_fn.map_fn(lambda x: math_ops.multiply(math_ops.add(x, 3), 2),
elems)
self.assertAllEqual(r, np.array([(x + 3) * 2 for x in nums]))
xla_context.Exit()
def is_compile_on_demand():
return ("TF_XLA_FLAGS" in os.environ and
"tf_xla_compile_on_demand" in os.environ["TF_XLA_FLAGS"])
if __name__ == "__main__":
os.environ["TF_XLA_FLAGS"] = ("--tf_xla_min_cluster_size=2 " +
os.environ.get("TF_XLA_FLAGS", ""))
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/while_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ArgMin and ArgMax Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ArgMinMaxTest(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self, op, axis, output_type, op_input,
expected):
"""Verifies that 'op' produces 'expected' when fed input 'op_input' .
Args:
op: argmin or argmax operator to test.
axis: integer axis to reduce across.
output_type: numpy datatype of the output to produce.
op_input: numpy input array to use as input to 'op'.
expected: numpy array representing the expected output of 'op'.
"""
with self.session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(op_input.dtype), op_input.shape, name="a")
output = op(pinp, axis=axis, output_type=output_type)
result = session.run(output, {pinp: op_input})
self.assertAllEqual(result, expected)
def testArgMinMax(self):
# Complex numbers do not support argmin/argmax.
minmax_types = self.all_types & {np.int32, np.int64}
for dtype in minmax_types:
# output_type is a numpy data type that is used to specify the desired
# output type of the op as well as to convert the Python number to the
# array scalar of the type.
for output_type in minmax_types:
self._assertOpOutputMatchesExpected(
math_ops.argmax,
axis=0,
output_type=output_type,
op_input=np.array([1, 10, 27, 3, 3, 4], dtype=dtype),
expected=output_type(2))
self._assertOpOutputMatchesExpected(
math_ops.argmax,
axis=0,
output_type=output_type,
op_input=np.array([[4, 1, 7], [3, 2, 4]], dtype=dtype),
expected=np.array([0, 1, 0], dtype=output_type))
self._assertOpOutputMatchesExpected(
math_ops.argmax,
axis=1,
output_type=output_type,
op_input=np.array([[4, 1], [3, 2]], dtype=dtype),
expected=np.array([0, 0], dtype=output_type))
self._assertOpOutputMatchesExpected(
math_ops.argmin,
axis=0,
output_type=output_type,
op_input=np.array([3, 10, 27, 3, 2, 4], dtype=dtype),
expected=output_type(4))
self._assertOpOutputMatchesExpected(
math_ops.argmin,
axis=0,
output_type=output_type,
op_input=np.array([[4, 1, 7], [3, 2, 4]], dtype=dtype),
expected=np.array([1, 0, 1], dtype=output_type))
self._assertOpOutputMatchesExpected(
math_ops.argmin,
axis=1,
output_type=output_type,
op_input=np.array([[4, 1], [3, 2]], dtype=dtype),
expected=np.array([1, 1], dtype=output_type))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/compiler/tests/argminmax_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental library that exposes XLA operations directly in TensorFlow.
It is sometimes useful to be able to build HLO programs directly from
TensorFlow. This file provides Tensorflow operators that mirror the semantics of
HLO operators as closely as possible.
Note: There is no promise of backward or forward compatibility for operators
defined in this module. This is primarily because the underlying HLO operators
do not promise backward or forward compatibility.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tf2xla.ops import gen_xla_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# TODO(phawkins): provide wrappers for all XLA operators. Currently the missing
# ops include:
# infeed/outfeed (available via tf.contrib.tpu)
# collectives, e.g., cross-replica-sum (available via tf.contrib.tpu)
# conditional
# gather/scatter
# collapse
# This file reuses builtin names (following XLA's names, so we can call things
# like xla.max), so we capture the builtin versions here.
# pylint: disable=redefined-builtin
_max = max
_min = min
_slice = slice # pylint: disable=invalid-name
constant = constant_op.constant
# Unary operators.
# For most arithmetic operators there is a TensorFlow operator
# that exactly corresponds to each XLA operator. Rather than defining
# XLA-specific variants, we reuse the corresponding TensorFlow operator.
# TODO(phawkins): It would be even better to have TensorFlow operators that 1:1
# wrap every HLO operator, because that would allow us to be confident that the
# semantics match.
def _unary_op(fn):
"""Wrapper that restricts `fn` to have the correct signature."""
def unary_op_wrapper(x, name=None):
return fn(x, name=name)
return unary_op_wrapper
abs = _unary_op(math_ops.abs)
# TODO(phawkins): implement clz.
conj = _unary_op(math_ops.conj)
cos = _unary_op(math_ops.cos)
ceil = _unary_op(math_ops.ceil)
digamma = _unary_op(math_ops.digamma)
erf = _unary_op(math_ops.erf)
erfc = _unary_op(math_ops.erfc)
# TODO(phawkins): implement erfinv
exp = _unary_op(math_ops.exp)
expm1 = _unary_op(math_ops.expm1)
floor = _unary_op(math_ops.floor)
imag = _unary_op(math_ops.imag)
is_finite = _unary_op(math_ops.is_finite)
lgamma = _unary_op(math_ops.lgamma)
log = _unary_op(math_ops.log)
log1p = _unary_op(math_ops.log1p)
logical_not = _unary_op(math_ops.logical_not)
neg = _unary_op(math_ops.neg)
real = _unary_op(math_ops.real)
# TODO(phawkins): unlike xla::Round, this rounds to even instead of zero for
# numbers halfway between two integers.
round = _unary_op(math_ops.round)
sin = _unary_op(math_ops.sin)
sign = _unary_op(math_ops.sign)
tanh = _unary_op(math_ops.tanh)
# Binary operators
# The main difference between TensorFlow and XLA binary ops is the broadcasting
# semantics. TensorFlow uses Numpy-style broadcasting semantics, whereas XLA
# requires an explicit specification of which dimensions to broadcast if the
# arguments have different ranks.
def _broadcasting_binary_op(fn):
"""Wraps a binary Tensorflow operator and performs XLA-style broadcasting."""
def broadcasting_binary_op_wrapper(x, y, broadcast_dims=None, name=None):
"""Inner wrapper function."""
broadcast_dims = broadcast_dims or []
broadcast_dims = ops.convert_to_tensor(broadcast_dims, dtypes.int64)
# Rather than relying on having static shape information in the TensorFlow
# graph, we use an XlaBroadcastHelper op that can compute the correct shapes
# at JIT compilation time.
x, y = gen_xla_ops.xla_broadcast_helper(x, y, broadcast_dims)
return fn(x, y, name=name)
return broadcasting_binary_op_wrapper
# Map from TF signed types to TF unsigned types.
_SIGNED_TO_UNSIGNED_TABLE = {
dtypes.int8: dtypes.uint8,
dtypes.int16: dtypes.uint16,
dtypes.int32: dtypes.uint32,
dtypes.int64: dtypes.uint64,
}
# Map from TF unsigned types to TF signed types.
_UNSIGNED_TO_SIGNED_TABLE = {
dtypes.uint8: dtypes.int8,
dtypes.uint16: dtypes.int16,
dtypes.uint32: dtypes.int32,
dtypes.uint64: dtypes.int64,
}
def _shift_right_logical_helper(x, y, name=None):
"""Performs an integer right logical shift irrespective of input type."""
assert y.dtype == x.dtype
dtype = x.dtype
signed = dtype in _SIGNED_TO_UNSIGNED_TABLE
if signed:
unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[dtype]
x = math_ops.cast(x, unsigned_dtype)
y = math_ops.cast(y, unsigned_dtype)
output = bitwise_ops.right_shift(x, y, name=name)
if signed:
output = math_ops.cast(output, dtype)
return output
def _shift_right_arithmetic_helper(x, y, name=None):
"""Performs an integer right arithmetic shift irrespective of input type."""
assert y.dtype == x.dtype
dtype = x.dtype
unsigned = dtype in _UNSIGNED_TO_SIGNED_TABLE
if unsigned:
signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[dtype]
x = math_ops.cast(x, signed_dtype)
y = math_ops.cast(y, signed_dtype)
output = bitwise_ops.right_shift(x, y, name=name)
if unsigned:
output = math_ops.cast(output, dtype)
return output
add = _broadcasting_binary_op(math_ops.add)
sub = _broadcasting_binary_op(math_ops.sub)
mul = _broadcasting_binary_op(math_ops.mul)
div = _broadcasting_binary_op(math_ops.div)
rem = _broadcasting_binary_op(gen_math_ops.mod)
max = _broadcasting_binary_op(math_ops.maximum)
min = _broadcasting_binary_op(math_ops.minimum)
atan2 = _broadcasting_binary_op(math_ops.atan2)
complex = _broadcasting_binary_op(math_ops.complex)
logical_and = _broadcasting_binary_op(math_ops.logical_and)
logical_or = _broadcasting_binary_op(math_ops.logical_or)
logical_xor = _broadcasting_binary_op(math_ops.logical_xor)
eq = _broadcasting_binary_op(math_ops.equal)
ne = _broadcasting_binary_op(math_ops.not_equal)
ge = _broadcasting_binary_op(math_ops.greater_equal)
gt = _broadcasting_binary_op(math_ops.greater)
le = _broadcasting_binary_op(math_ops.less_equal)
lt = _broadcasting_binary_op(math_ops.less)
pow = _broadcasting_binary_op(math_ops.pow)
shift_left = _broadcasting_binary_op(bitwise_ops.left_shift)
shift_right_logical = _broadcasting_binary_op(_shift_right_logical_helper)
shift_right_arithmetic = _broadcasting_binary_op(_shift_right_arithmetic_helper)
def _binary_op(fn):
"""Wrapper that restricts `fn` to have the correct signature."""
def binary_op_wrapper(x, y, name=None):
return fn(x, y, name=name)
return binary_op_wrapper
transpose = _binary_op(array_ops.transpose)
rev = _binary_op(array_ops.reverse)
bitcast_convert_type = array_ops.bitcast
def broadcast(x, dims, name=None):
x = ops.convert_to_tensor(x)
shape = array_ops.concat([constant_op.constant(dims),
array_ops.shape(x)],
axis=0)
return array_ops.broadcast_to(x, shape, name=name)
def clamp(a, x, b, name=None):
return min(max(a, x, name=name), b, name=name)
concatenate = array_ops.concat
def conv(lhs,
rhs,
window_strides,
padding,
lhs_dilation,
rhs_dilation,
dimension_numbers,
feature_group_count=1,
precision_config=None,
name=None):
"""Wraps the XLA ConvGeneralDilated operator.
ConvGeneralDilated is the most general form of XLA convolution and is
documented at
https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
Args:
lhs: the input tensor
rhs: the kernel tensor
window_strides: the inter-window strides
padding: the padding to apply at the start and end of each input dimensions
lhs_dilation: dilation to apply between input elements
rhs_dilation: dilation to apply between kernel elements
dimension_numbers: a `ConvolutionDimensionNumbers` proto.
feature_group_count: number of feature groups for grouped convolution.
precision_config: a `xla.PrecisionConfig` proto.
name: an optional name for the operator
Returns:
A tensor representing the output of the convolution.
"""
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_conv(
lhs,
rhs,
window_strides=window_strides,
padding=padding,
lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
feature_group_count=feature_group_count,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
name=name)
convert_element_type = math_ops.cast
def dot(lhs, rhs, name=None):
return math_ops.tensordot(lhs, rhs, axes=1, name=name)
def dot_general(lhs, rhs, dimension_numbers, precision_config=None, name=None):
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_dot(
lhs,
rhs,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
name=name)
def self_adjoint_eig(a, lower, max_iter, epsilon):
return gen_xla_ops.xla_self_adjoint_eig(a, lower, max_iter, epsilon)
def svd(a, max_iter, epsilon, precision_config=None):
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_svd(a, max_iter, epsilon, precision_config_proto)
dynamic_slice = gen_xla_ops.xla_dynamic_slice
dynamic_update_slice = gen_xla_ops.xla_dynamic_update_slice
einsum = gen_xla_ops.xla_einsum
# TODO(phawkins): generalize tf.pad to support interior padding, and then remove
# the XLA-specific pad operator.
pad = gen_xla_ops.xla_pad
def random_normal(mu, sigma, dims, name=None):
mu = ops.convert_to_tensor(mu)
return random_ops.random_normal(
dims, mean=mu, stddev=sigma, dtype=mu.dtype, name=name)
def random_uniform(minval, maxval, dims, name=None):
minval = ops.convert_to_tensor(minval)
return random_ops.random_uniform(
dims, minval, maxval, dtype=minval.dtype, name=name)
recv = gen_xla_ops.xla_recv
reduce = gen_xla_ops.xla_reduce
def reduce_window(operand,
init,
reducer,
window_dimensions,
window_strides=None,
base_dilations=None,
window_dilations=None,
padding=None,
name=None):
"""Wraps the XLA ReduceWindow operator.
ReduceWindow is documented at
https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow .
Args:
operand: the input tensor
init: a scalar tensor representing the initial value for the reduction
reducer: a reduction function that combines a pair of scalars.
window_dimensions: shape of the window, as a list of integers
window_strides: inter-window strides, as a list of integers. Optional; if
omitted, defaults to strides of 1.
padding: padding to apply to 'operand'. List of (low, high) pairs of
integers that specify the padding to apply before and after each
dimension. Optional; if omitted, defaults to no padding.
name: the operator name, or None.
Returns:
A tensor that represents the output of the reduce_window operator.
"""
window_strides = window_strides or [1] * len(window_dimensions)
base_dilations = base_dilations or [1] * len(window_dimensions)
window_dilations = window_dilations or [1] * len(window_dimensions)
padding = padding or [(0, 0)] * len(window_dimensions)
return gen_xla_ops.xla_reduce_window(
input=operand,
init_value=init,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=base_dilations,
window_dilations=window_dilations,
padding=padding,
computation=reducer,
name=name)
replica_id = gen_xla_ops.xla_replica_id
def reshape(x, new_sizes, dimensions=None, name=None):
if dimensions is not None:
x = array_ops.transpose(x, dimensions)
x = array_ops.reshape(x, new_sizes, name=name)
return x
def select(condition, x, y, name=None):
return array_ops.where(condition, x, y, name)
select_and_scatter = gen_xla_ops.xla_select_and_scatter
send = gen_xla_ops.xla_send
def slice(x, start_dims, limit_dims, strides):
spec = [
_slice(start, limit, stride)
for (start, limit, stride) in zip(start_dims, limit_dims, strides)
]
return x[tuple(spec)]
sort = gen_xla_ops.xla_sort
key_value_sort = gen_xla_ops.xla_key_value_sort
while_loop = gen_xla_ops.xla_while
dequantize = gen_xla_ops.xla_dequantize
|
tensorflow-master
|
tensorflow/compiler/tf2xla/python/xla.py
|
"""Gradients for XLA ops."""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
@ops.RegisterGradient("XlaClusterOutput")
def _XlaClusterOutputGrad(_, grad):
del grad # unused
raise RuntimeError("Gradient computation of graph in xla.compile() is "
"prohibited because it can cause performance degradation."
"Please move gradient computation inside xla.compile().")
|
tensorflow-master
|
tensorflow/compiler/jit/ops/xla_ops_grad.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate tensorflow graphs for testing tfcompile."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import app
from tensorflow.python.training import saver as saver_lib
FLAGS = None
def tfadd(_):
x = constant_op.constant([1], name='x_const')
y = constant_op.constant([2], name='y_const')
math_ops.add(x, y, name='x_y_sum')
def tfadd_with_ckpt(out_dir):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = variables.VariableV1(constant_op.constant([0]), name='y_saved')
math_ops.add(x, y, name='x_y_sum')
init_op = variables.global_variables_initializer()
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
with session.Session() as sess:
sess.run(init_op)
sess.run(y.assign(y + 42))
# Without the checkpoint, the variable won't be set to 42.
ckpt = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt.ckpt')
saver.save(sess, ckpt)
def tfadd_with_ckpt_saver(out_dir):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = variables.VariableV1(constant_op.constant([0]), name='y_saved')
math_ops.add(x, y, name='x_y_sum')
init_op = variables.global_variables_initializer()
saver = saver_lib.Saver(name='abcprefix', write_version=saver_pb2.SaverDef.V1)
with session.Session() as sess:
sess.run(init_op)
sess.run(y.assign(y + 42))
# Without the checkpoint, the variable won't be set to 42.
ckpt_file = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt_saver.ckpt')
saver.save(sess, ckpt_file)
# Without the SaverDef, the restore op won't be named correctly.
saver_file = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt_saver.saver')
with open(saver_file, 'wb') as f:
f.write(saver.as_saver_def().SerializeToString())
def tfassert_eq(_):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = array_ops.placeholder(dtypes.int32, name='y_hold')
control_flow_ops.Assert(
math_ops.equal(x, y), ['Expected x == y.'], name='assert_eq')
math_ops.add(x, math_ops.negative(y), name='x_y_diff')
def tfcond(_):
p = array_ops.placeholder(dtypes.bool, name='p_hold')
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = array_ops.placeholder(dtypes.int32, name='y_hold')
z = control_flow_ops.cond(p, lambda: x, lambda: y)
array_ops.identity(z, name='result')
def tfgather(_):
params = array_ops.placeholder(dtypes.float32, name='params')
indices = array_ops.placeholder(dtypes.int32, name='indices')
array_ops.gather(params, indices, name='gather_output')
def tfmatmul(_):
x = array_ops.placeholder(dtypes.float32, name='x_hold')
y = array_ops.placeholder(dtypes.float32, name='y_hold')
math_ops.matmul(x, y, name='x_y_prod')
def tfmatmulandadd(_):
# This tests multiple outputs.
x = array_ops.placeholder(dtypes.float32, name='x_hold')
y = array_ops.placeholder(dtypes.float32, name='y_hold')
math_ops.matmul(x, y, name='x_y_prod')
math_ops.add(x, y, name='x_y_sum')
def tffunction(_):
@function.Defun(dtypes.int32, dtypes.int32)
def test_func(a, b):
return a + b
x = constant_op.constant([1], name='x_const')
y = constant_op.constant([2], name='y_const')
test_func(x, y, name='func_call') # pylint: disable=unexpected-keyword-arg
def tfsplits(_):
"""A more complex graph, including splits."""
x = array_ops.placeholder(dtypes.float32, shape=[2, 2], name='x')
y = array_ops.placeholder(dtypes.float32, shape=[2, 2], name='y')
for _ in range(3):
x0, x1 = array_ops.split(x, 2, 0)
y0, y1 = array_ops.split(y, 2, 0)
x0 += 1
y0 += 1
z = math_ops.matmul(x, y, name='x_y_prod')
a = array_ops.concat([x0, y1], axis=0, name='concat_x0_y1')
b = array_ops.concat([y0, x1], axis=0, name='concat_y0_x1')
x = math_ops.matmul(a, b, name='a_b')
y = math_ops.add(x, z)
array_ops.identity(y, name='result')
def tftop_k(_):
x = array_ops.placeholder(dtypes.int32, shape=[5], name='x')
output = nn_ops.top_k(x, 2, name='values')
array_ops.identity(output[1], name='indices')
def tfvariable(_):
x = variables.Variable(1000.0, name='x')
old_x = x.value()
with ops.control_dependencies([old_x]):
new_x = x.assign_add(42.0)
array_ops.stack([old_x, new_x], name='result')
def tfvariable_sequential_updates(_):
x = variables.Variable(1.0, name='x')
y = variables.Variable(1.0, name='y')
updates = control_flow_ops.no_op()
for _ in range(3):
with ops.control_dependencies([updates]):
x_val = x.read_value() + y
updates = x.assign_sub(0.1 * x_val)
array_ops.identity(updates, name='result')
def write_graph(build_graph, out_dir):
"""Build a graph using build_graph and write it out."""
g = ops.Graph()
with g.as_default():
build_graph(out_dir)
filename = os.path.join(out_dir, 'test_graph_%s.pb' % build_graph.__name__)
with open(filename, 'wb') as f:
f.write(g.as_graph_def().SerializeToString())
def main(_):
write_graph(tfadd, FLAGS.out_dir)
write_graph(tfadd_with_ckpt, FLAGS.out_dir)
write_graph(tfadd_with_ckpt_saver, FLAGS.out_dir)
write_graph(tfassert_eq, FLAGS.out_dir)
write_graph(tfcond, FLAGS.out_dir)
write_graph(tffunction, FLAGS.out_dir)
write_graph(tfgather, FLAGS.out_dir)
write_graph(tfmatmul, FLAGS.out_dir)
write_graph(tfmatmulandadd, FLAGS.out_dir)
write_graph(tfsplits, FLAGS.out_dir)
write_graph(tftop_k, FLAGS.out_dir)
write_graph(tfvariable, FLAGS.out_dir)
write_graph(tfvariable_sequential_updates, FLAGS.out_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--out_dir',
type=str,
default='',
help='Output directory for graphs, checkpoints and savers.')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/compiler/aot/tests/make_test_graphs.py
|
from esbn_transformer.esbn_transformer import EsbnTransformer
|
esbn-transformer-main
|
esbn_transformer/__init__.py
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def max_neg_value(t):
return -torch.finfo(t.dtype).max
def rearrange_all(tensors, *args, **kwargs):
return map(lambda t: rearrange(t, *args, **kwargs), tensors)
# feedforward
class GroupLayerNorm(nn.Module):
def __init__(self, dim, groups = 1, eps = 1e-5):
super().__init__()
self.eps = eps
self.groups = groups
self.g = nn.Parameter(torch.ones(1, groups, dim, 1))
self.b = nn.Parameter(torch.zeros(1, groups, dim, 1))
def forward(self, x):
x = rearrange(x, 'b (g d) n -> b g d n', g = self.groups)
std = torch.var(x, dim = 2, unbiased = False, keepdim = True).sqrt()
mean = torch.mean(x, dim = 2, keepdim = True)
out = (x - mean) / (std + self.eps) * self.g + self.b
return rearrange(out, 'b g d n -> b (g d) n')
class PreNorm(nn.Module):
def __init__(
self,
dim,
fn,
groups = 1
):
super().__init__()
self.norm = GroupLayerNorm(dim, groups = groups)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class FeedForward(nn.Module):
def __init__(
self,
*,
dim,
mult = 4,
groups = 1
):
super().__init__()
input_dim = dim * groups
hidden_dim = dim * mult * groups
self.net = nn.Sequential(
nn.Conv1d(input_dim, hidden_dim, 1, groups = groups),
nn.GELU(),
nn.Conv1d(hidden_dim, input_dim, 1, groups = groups)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
causal = False,
groups = 1
):
super().__init__()
self.scale = dim_head ** -0.5
self.groups = groups
self.heads = heads
self.causal = causal
input_dim = dim * groups
inner_dim = dim_head * heads * groups
self.to_q = nn.Conv1d(input_dim, inner_dim, 1, bias = False)
self.to_kv = nn.Conv1d(input_dim, inner_dim * 2, 1, bias = False)
self.to_out = nn.Conv1d(inner_dim, input_dim, 1)
def forward(self, x, mask = None, context = None):
n, device, h, g, causal = x.shape[2], x.device, self.heads, self.groups, self.causal
context = default(context, x)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = 1))
q, k, v = rearrange_all((q, k, v), 'b (g h d) n -> (b g h) n d', g = g, h = h)
q = q * self.scale
sim = einsum('b i d, b j d -> b i j', q, k)
if g > 1:
# in the case there are symbols
# allow the network to bind the symbols using the attention matrix from the sensory side
sim = rearrange(sim, '(b g h) i j -> b g h i j', g = g, h = h)
sim = sim.cumsum(dim = 1)
sim = rearrange(sim, 'b g h i j -> (b g h) i j')
if exists(mask):
mask = repeat(mask, 'b n -> (b g h) n', h = h, g = g)
mask = rearrange(mask, 'b n -> b n ()') * rearrange(mask, 'b n -> b () n')
mask_value = max_neg_value(sim)
sim = sim.masked_fill(~mask, mask_value)
if causal:
causal_mask = torch.ones((n, n), device = device).triu(1).bool()
mask_value = max_neg_value(sim)
sim = sim.masked_fill(causal_mask, mask_value)
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b g h) n d -> b (g h d) n', h = h, g = g)
return self.to_out(out)
class TransformerBlock(nn.Module):
def __init__(
self,
*,
dim,
causal = False,
dim_head = 64,
heads = 8,
ff_mult = 4,
groups = 1
):
super().__init__()
self.attn = PreNorm(dim, Attention(dim = dim, dim_head = dim_head, heads = heads, causal = causal, groups = groups), groups = groups)
self.ff = PreNorm(dim, FeedForward(dim = dim, mult = ff_mult, groups = groups), groups = groups)
def forward(self, x, mask = None):
x = self.attn(x, mask = mask) + x
x = self.ff(x) + x
return x
# main class
class EsbnTransformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
num_tokens,
max_seq_len,
causal = False,
dim_head = 64,
heads = 8,
ff_mult = 4
):
super().__init__()
self.dim = dim
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.layers = nn.ModuleList([])
self.pre_transformer_block = TransformerBlock(dim = dim, causal = causal, dim_head = dim_head, heads = heads)
self.symbols = nn.Parameter(torch.randn(max_seq_len, dim))
for _ in range(depth):
self.layers.append(TransformerBlock(dim = dim, causal = causal, dim_head = dim_head, heads = heads, groups = 2))
self.post_transformer_block = TransformerBlock(dim = dim, causal = causal, dim_head = dim_head, heads = heads,)
self.to_logits = nn.Sequential(
Rearrange('b d n -> b n d'),
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x, mask = None):
b, n, d, device = *x.shape, self.dim, x.device
x = self.token_emb(x)
pos_emb = self.pos_emb(torch.arange(n, device = device))
pos_emb = rearrange(pos_emb, 'n d -> () n d')
x = x + pos_emb
x = rearrange(x, 'b n d -> b d n')
x = self.pre_transformer_block(x, mask = mask)
x = rearrange(x, 'b d n -> b () d n')
symbols = self.symbols[:, :n]
symbols = repeat(symbols, 'n d -> b () d n', b = b)
x = torch.cat((x, symbols), dim = 1)
x = rearrange(x, 'b ... n -> b (...) n')
for block in self.layers:
x = block(x, mask = mask)
x = rearrange(x, 'b (s d) n -> b s d n', s = 2)
x = x[:, 1]
x = self.post_transformer_block(x, mask = mask)
return self.to_logits(x)
|
esbn-transformer-main
|
esbn_transformer/esbn_transformer.py
|
from setuptools import setup, find_packages
setup(
name="mlp-gpt-jax",
packages=find_packages(),
version="0.0.20",
license="MIT",
description="MLP GPT - Jax",
author="Phil Wang",
author_email="",
url="https://github.com/lucidrains/mlp-gpt-jax",
keywords=[
"artificial intelligence",
"deep learning",
"language model",
"multilayered-perceptron",
"jax"
],
install_requires=[
"click",
"click-option-group",
"einops>=0.3",
"dm-haiku",
"jax",
"jaxlib",
"optax",
"torch",
"tqdm"
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
],
)
|
mlp-gpt-jax-main
|
setup.py
|
from random import randrange
import tqdm
import gzip
import numpy as np
from torch.utils.data import DataLoader, Dataset
import jax
from jax import nn, random, jit
from optax import adam, clip_by_global_norm, chain, apply_updates, apply_every
from haiku import PRNGSequence
from mlp_gpt_jax import TransformedMLPGpt
from mlp_gpt_jax.utils import sample, get_train_loss_fn
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
MAX_GRAD_NORM = 0.5
VALIDATE_EVERY = 100
SAMPLE_EVERY = 500
SEQ_LEN = 768
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
data_train, data_val = np.split(X, [int(90e6)])
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = randrange(0, self.data.shape[0] - self.seq_len - 1)
return self.data[rand_start: rand_start + self.seq_len + 1]
def __len__(self):
return self.data.shape[0] // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# setup model and params
model_kwargs = dict(
num_tokens = 256,
dim = 512,
seq_len = SEQ_LEN,
depth = 8,
attn_dim = 32,
)
train_model = TransformedMLPGpt(**model_kwargs, layer_survival_prob = 0.95)
eval_model = TransformedMLPGpt(**model_kwargs)
rng = PRNGSequence(42)
params = train_model.init(next(rng), train_dataset[0][:-1])
loss_fn = get_train_loss_fn(train_model)
# optimizer
optim = chain(
clip_by_global_norm(MAX_GRAD_NORM),
adam(LEARNING_RATE),
apply_every(GRADIENT_ACCUMULATE_EVERY)
)
optim_state = optim.init(params)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
data = next(train_loader).numpy()
loss, grads = loss_fn(params, next(rng), data)
updates, optim_state = optim.update(grads, optim_state, params)
params = apply_updates(params, updates)
if i % GRADIENT_ACCUMULATE_EVERY == 0:
print(f'loss: {loss.item()}')
if i % SAMPLE_EVERY == 0:
valid_data = next(val_loader).numpy()
prime = valid_data[0][:100]
prime_str = decode_tokens(prime)
print(prime_str, "\n", "*" * 40)
sampled = sample(rng, jit(eval_model.apply), params, prime, SEQ_LEN, top_k = 25)
sampled_str = decode_tokens(sampled[100:])
print(sampled_str)
|
mlp-gpt-jax-main
|
train.py
|
from mlp_gpt_jax.mlp_gpt_jax import MLPGpt, TransformedMLPGpt
|
mlp-gpt-jax-main
|
mlp_gpt_jax/__init__.py
|
from functools import partial
import jax
from jax import random
from jax import nn
import jax.numpy as np
import haiku as hk
from haiku import initializers
from einops import rearrange
# constants
EPS = 1e-3
ATTN_MASK_VALUE = -1e10
# helpers
LayerNorm = partial(hk.LayerNorm, create_scale = True, create_offset = True, axis = -1)
def exists(val):
return val is not None
# classes
class Attention(hk.Module):
def __init__(
self,
*,
dim_out,
dim_head
):
super().__init__()
self.scale = dim_head ** -0.5
self.to_qkv = hk.Linear(dim_head * 3)
self.to_out = hk.Linear(dim_out)
def __call__(self, x):
n = x.shape[0]
qkv = self.to_qkv(x)
q, k, v = np.split(qkv, 3, axis = -1)
sim = np.einsum('i d, j d -> i j', q, k) * self.scale
mask = np.triu(np.ones((n, n), dtype = bool), 1)
sim = np.where(mask, ATTN_MASK_VALUE, sim)
attn = nn.softmax(sim, axis = -1)
out = np.einsum('i j, j d -> i d', attn, v)
return self.to_out(out)
class SGU(hk.Module):
def __init__(
self,
*,
dim,
dim_out,
seq_len
):
super().__init__()
self.seq_len = seq_len
self.norm = LayerNorm()
self.proj_out = hk.Linear(dim_out)
def __call__(self, x, gate_res = None):
n = self.seq_len
x, gate = np.split(x, 2, axis = -1)
gate = self.norm(gate)
init_scale = EPS / n
init_eps = initializers.RandomUniform(minval = -init_scale, maxval = init_scale)
weights = hk.get_parameter('spatial_weights', shape = (n, n), init = init_eps)
biases = hk.get_parameter('spatial_biases', shape = (n, 1), init = np.ones)
mask = np.tril(np.ones((n, n)))
weights = weights * mask
gate = np.einsum('n d, m n -> m d', gate, weights)
gate += biases
if exists(gate_res):
gate += gate_res
x = x * gate
return self.proj_out(x)
class gMLP(hk.Module):
def __init__(
self,
*,
dim,
dim_ff,
seq_len,
name,
attn_dim = None
):
super().__init__(name = name)
self.attn = Attention(dim_head = attn_dim, dim_out = dim_ff // 2) if exists(attn_dim) else None
self.norm = LayerNorm()
self.proj_in = hk.Linear(dim_ff)
self.sgu = SGU(dim = dim_ff, dim_out = dim_ff // 2, seq_len = seq_len)
self.proj_out = hk.Linear(dim)
def __call__(self, x):
x = self.norm(x)
gate_res = self.attn(x) if exists(self.attn) else None
x = self.proj_in(x)
x = nn.gelu(x)
x = self.sgu(x, gate_res)
x = self.proj_out(x)
return x
class MaybeExecute(hk.Module):
def __init__(
self,
*,
prob_execute,
fn
):
super().__init__()
self.fn = fn
self.prob_execute = prob_execute
def __call__(self, x):
key = hk.next_rng_key()
p = random.bernoulli(key, p = self.prob_execute)
out = self.fn(x) * p + 0 * (1 - p)
return out / self.prob_execute
class MLPGpt(hk.Module):
def __init__(
self,
*,
num_tokens,
dim,
seq_len,
depth,
heads = 1,
ff_mult = 4,
attn_dim = None,
clamp_gate = True,
layer_survival_prob = 1.
):
super().__init__()
self.embed = hk.Embed(num_tokens, dim)
gmlps = [gMLP(dim = dim, dim_ff = dim * ff_mult, seq_len = seq_len, name = f'gmlp{i}', attn_dim = attn_dim) for i in range(depth)]
self.layers = [MaybeExecute(prob_execute = layer_survival_prob, fn = gmlp) for gmlp in gmlps]
self.to_logits = hk.Sequential([
LayerNorm(),
hk.Linear(num_tokens)
])
def __call__(self, x):
x = self.embed(x)
for layer in self.layers:
x += layer(x)
return self.to_logits(x)
def TransformedMLPGpt(**kwargs):
@hk.transform
def inner(seq):
return MLPGpt(**kwargs)(seq)
return inner
|
mlp-gpt-jax-main
|
mlp_gpt_jax/mlp_gpt_jax.py
|
from jax import random, nn, value_and_grad, vmap, jit
from jax.lax import top_k
import jax.numpy as np
# helper functions
def exists(val):
return val is not None
def log(t, eps = 1e-20):
return np.log(t + eps)
# training functions
def cross_entropy(logits, targets, axis = -1):
logprobs = nn.log_softmax(logits, axis = axis)
nll = np.take_along_axis(logprobs, np.expand_dims(targets, axis = axis), axis = axis)
ce = -np.mean(nll)
return ce
def get_train_loss_fn(model):
batch_model_apply = jit(vmap(model.apply, in_axes = (None, None, 0), out_axes = 0))
@value_and_grad
def loss_fn(params, key, data):
inp, labels = data[:, :-1], data[:, 1:]
logits = batch_model_apply(params, key, inp)
return cross_entropy(logits, labels, axis = -1)
return loss_fn
# sampling functions
def select_top_k(tensor, k):
values, _ = top_k(tensor, k)
mask = tensor > values.min()
return mask, np.where(mask, tensor, 0.)
def gumbel_noise(rng, shape):
noise = random.uniform(rng, shape = shape, minval = 0., maxval = 1.)
return -log(-log(noise))
def sample(rng, fn, params, prime, length, top_k = None):
start_pos = prime.shape[-1]
seq = np.pad(prime, (0, length - prime.shape[-1]))
one_hots = np.eye(length, dtype = int)
for curr_pos in range(start_pos, length):
logits = fn(params, next(rng), seq)
logits = logits[curr_pos - 1]
noise = gumbel_noise(next(rng), logits.shape)
if exists(top_k):
mask, logits = select_top_k(logits, top_k)
noise *= mask
logits += noise
sampled_ind = np.argmax(logits, axis = -1)
one_hot = one_hots[curr_pos]
seq += one_hot * sampled_ind
return seq
|
mlp-gpt-jax-main
|
mlp_gpt_jax/utils.py
|
from setuptools import setup, find_packages
setup(
name = 'classifier-free-guidance-pytorch',
packages = find_packages(exclude=[]),
include_package_data = True,
version = '0.2.2',
license='MIT',
description = 'Classifier Free Guidance - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/classifier-free-guidance-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'classifier free guidance',
'text conditioning and guidance'
],
install_requires=[
'beartype',
'einops>=0.6',
'ftfy',
'open-clip-torch>=2.8.0',
'torch>=1.6',
'transformers'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
classifier-free-guidance-pytorch-main
|
setup.py
|
from typing import List
from beartype import beartype
import torch
import transformers
from transformers import T5Tokenizer, T5EncoderModel, T5Config
transformers.logging.set_verbosity_error()
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# config
MAX_LENGTH = 256
DEFAULT_T5_NAME = 'google/t5-v1_1-base'
T5_CONFIGS = {}
# singleton globals
def get_tokenizer(name):
tokenizer = T5Tokenizer.from_pretrained(name)
return tokenizer
def get_model(name):
model = T5EncoderModel.from_pretrained(name)
return model
def get_model_and_tokenizer(name):
global T5_CONFIGS
if name not in T5_CONFIGS:
T5_CONFIGS[name] = dict()
if "model" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["model"] = get_model(name)
if "tokenizer" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["tokenizer"] = get_tokenizer(name)
return T5_CONFIGS[name]['model'], T5_CONFIGS[name]['tokenizer']
def get_encoded_dim(name):
if name not in T5_CONFIGS:
# avoids loading the model if we only want to get the dim
config = T5Config.from_pretrained(name)
T5_CONFIGS[name] = dict(config=config)
elif "config" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["config"]
elif "model" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["model"].config
else:
assert False
return config.d_model
# encoding text
def t5_encode_text(texts, name = DEFAULT_T5_NAME, output_device = None):
t5, tokenizer = get_model_and_tokenizer(name)
if torch.cuda.is_available():
t5 = t5.cuda()
device = next(t5.parameters()).device
encoded = tokenizer.batch_encode_plus(
texts,
return_tensors = "pt",
padding = 'longest',
max_length = MAX_LENGTH,
truncation = True
)
input_ids = encoded.input_ids.to(device)
attn_mask = encoded.attention_mask.to(device)
t5.eval()
with torch.no_grad():
output = t5(input_ids = input_ids, attention_mask = attn_mask)
encoded_text = output.last_hidden_state.detach()
attn_mask = attn_mask.bool()
if not exists(output_device):
return encoded_text, attn_mask
encoded_text.to(output_device)
attn_mask.to(output_device)
return encoded_text, attn_mask
class T5Adapter():
def __init__(
self,
name
):
name = default(name, DEFAULT_T5_NAME)
t5, tokenizer = get_model_and_tokenizer(name)
if torch.cuda.is_available():
t5 = t5.cuda()
self.name = name
self.t5 = t5
self.tokenizer = tokenizer
@property
def dim_latent(self):
return get_encoded_dim(self.name)
@property
def max_text_len(self):
return MAX_LENGTH
@torch.no_grad()
@beartype
def embed_text(
self,
texts: List[str],
return_text_encodings = False,
output_device = None
):
device = next(self.t5.parameters()).device
encoded = self.tokenizer.batch_encode_plus(
texts,
return_tensors = "pt",
padding = 'longest',
max_length = MAX_LENGTH,
truncation = True
)
input_ids = encoded.input_ids.to(device)
attn_mask = encoded.attention_mask.to(device)
self.t5.eval()
with torch.no_grad():
output = self.t5(input_ids = input_ids, attention_mask = attn_mask)
encoded_text = output.last_hidden_state.detach()
attn_mask = attn_mask.bool()
encoded_text.masked_fill_(~attn_mask[..., None], 0.)
if not return_text_encodings:
numer = encoded_text.sum(dim = -2)
denom = attn_mask.sum(dim = -1)[..., None]
numer.masked_fill_(denom == 0, 0.)
mean_encodings = numer / denom.clamp(min = 1e-3)
return mean_encodings
return encoded_text.to(output_device)
|
classifier-free-guidance-pytorch-main
|
classifier_free_guidance_pytorch/t5.py
|
from classifier_free_guidance_pytorch.classifier_free_guidance_pytorch import NullConditioner, TextConditioner, AttentionTextConditioner
from classifier_free_guidance_pytorch.classifier_free_guidance_pytorch import classifier_free_guidance
from classifier_free_guidance_pytorch.open_clip import OpenClipAdapter
from classifier_free_guidance_pytorch.t5 import T5Adapter
|
classifier-free-guidance-pytorch-main
|
classifier_free_guidance_pytorch/__init__.py
|
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
causal = False,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def get_mask(self, n, device):
if exists(self.mask) and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = repeat(k, 'b ... -> b h ...', h = heads)
if v.ndim == 3:
v = repeat(v, 'b ... -> b h ...', h = heads)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask):
if mask.ndim == 2:
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.flash:
return self.flash_attn(q, k, v, mask = mask)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
# similarity
sim = einsum(f"b h i d, {kv_einsum_eq} -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
if mask.ndim == 2:
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, {kv_einsum_eq} -> b h i d", attn, v)
return out
|
classifier-free-guidance-pytorch-main
|
classifier_free_guidance_pytorch/attend.py
|
# take from https://github.com/openai/CLIP/blob/main/clip/simple_tokenizer.py
# to give users a quick easy start to training DALL-E without doing BPE
import torch
import html
import os
import ftfy
import regex as re
from functools import lru_cache
from pathlib import Path
# OpenAI simple tokenizer
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt")
@lru_cache()
def bytes_to_unicode():
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = Path(bpe_path).read_text(encoding='utf8').split('\n')
merges = merges[1:49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + '</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.vocab_size = 49408
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(
self,
tokens,
remove_start_end = True,
pad_tokens = set()
):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
if remove_start_end:
tokens = [token for token in tokens if token not in (49406, 40407, 0)]
text = ''.join([self.decoder[token] for token in tokens if token not in pad_tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
def tokenize(
self,
texts,
context_length = 256,
truncate_text = False
):
if isinstance(texts, str):
texts = [texts]
all_tokens = [self.encode(text) for text in texts]
max_context_length = max([len(tokens) for tokens in all_tokens])
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result, max_context_length
tokenizer = SimpleTokenizer()
|
classifier-free-guidance-pytorch-main
|
classifier_free_guidance_pytorch/tokenizer.py
|
from functools import wraps, partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat, pack, unpack
from beartype import beartype
from beartype.typing import Callable, Tuple, Optional, List, Literal, Union
from beartype.door import is_bearable
from inspect import signature
from classifier_free_guidance_pytorch.t5 import T5Adapter
from classifier_free_guidance_pytorch.open_clip import OpenClipAdapter
from classifier_free_guidance_pytorch.attend import Attend
# constants
COND_DROP_KEY_NAME = 'cond_drop_prob'
TEXTS_KEY_NAME = 'texts'
TEXT_EMBEDS_KEY_NAME = 'text_embeds'
TEXT_CONDITIONER_NAME = 'text_conditioner'
CONDITION_FUNCTION_KEY_NAME = 'cond_fns'
# helper functions
def exists(val):
return val is not None
def default(*values):
for value in values:
if exists(value):
return value
return None
def cast_tuple(val, length = 1):
return val if isinstance(val, tuple) else ((val,) * length)
def pack_one(x, pattern):
return pack([x], pattern)
def unpack_one(x, ps, pattern):
return unpack(x, ps, pattern)[0]
# tensor helpers
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
# classifier free guidance with automatic text conditioning
@beartype
def classifier_free_guidance(
fn: Callable,
cond_drop_prob_keyname = COND_DROP_KEY_NAME,
texts_key_name = TEXTS_KEY_NAME,
text_embeds_key_name = TEXT_EMBEDS_KEY_NAME,
cond_fns_keyname = CONDITION_FUNCTION_KEY_NAME,
text_conditioner_name = TEXT_CONDITIONER_NAME
):
fn_params = signature(fn).parameters
auto_handle_text_condition = texts_key_name not in fn_params and text_embeds_key_name not in fn_params
assert not (auto_handle_text_condition and cond_fns_keyname not in fn_params), f'{cond_fns_keyname} must be in the wrapped function for autohandling texts -> conditioning functions - ex. forward(..., {cond_fns_keyname})'
@wraps(fn)
def inner(
self,
*args,
cond_scale: float = 1.,
rescale_phi: float = 0.,
**kwargs
):
@wraps(fn)
def fn_maybe_with_text(self, *args, **kwargs):
if auto_handle_text_condition:
texts = kwargs.pop('texts', None)
text_embeds = kwargs.pop('text_embeds', None)
assert not (exists(texts) and exists(text_embeds))
cond_fns = None
text_conditioner = getattr(self, text_conditioner_name, None)
# auto convert texts -> conditioning functions
if exists(texts) ^ exists(text_embeds):
assert is_bearable(texts, Optional[List[str]]), f'keyword `{texts_key_name}` must be a list of strings'
assert exists(text_conditioner) and is_bearable(text_conditioner, Conditioner), 'text_conditioner must be set on your network with the correct hidden dimensions to be conditioned on'
cond_drop_prob = kwargs.pop(cond_drop_prob_keyname, None)
text_condition_input = dict(texts = texts) if exists(texts) else dict(text_embeds = text_embeds)
cond_fns = text_conditioner(**text_condition_input, cond_drop_prob = cond_drop_prob)
elif isinstance(text_conditioner, NullConditioner):
cond_fns = text_conditioner()
kwargs.update(cond_fns = cond_fns)
return fn(self, *args, **kwargs)
# main classifier free guidance logic
if self.training:
assert cond_scale == 1, 'you cannot do condition scaling when in training mode'
return fn_maybe_with_text(self, *args, **kwargs)
assert cond_scale >= 1, 'invalid conditioning scale, must be greater or equal to 1'
kwargs_without_cond_dropout = {**kwargs, cond_drop_prob_keyname: 0.}
kwargs_with_cond_dropout = {**kwargs, cond_drop_prob_keyname: 1.}
logits = fn_maybe_with_text(self, *args, **kwargs_without_cond_dropout)
if cond_scale == 1:
return logits
null_logits = fn_maybe_with_text(self, *args, **kwargs_with_cond_dropout)
scaled_logits = null_logits + (logits - null_logits) * cond_scale
if rescale_phi <= 0:
return scaled_logits
# proposed in https://arxiv.org/abs/2305.08891
# as a way to prevent over-saturation with classifier free guidance
# works both in pixel as well as latent space as opposed to the solution from imagen
dims = tuple(range(1, logits.ndim - 1))
rescaled_logits = scaled_logits * (logits.std(dim = dims, keepdim = True) / scaled_logits.std(dim = dims, keepdim= True))
return rescaled_logits * rescale_phi + (1. - rescale_phi) * logits
return inner
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
dim_context = None,
norm_context = False,
num_null_kv = 0,
flash = False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
dim_context = default(dim_context, dim)
self.norm = nn.LayerNorm(dim)
self.context_norm = nn.LayerNorm(dim_context) if norm_context else nn.Identity()
self.attend = Attend(flash = flash)
self.num_null_kv = num_null_kv
self.null_kv = nn.Parameter(torch.randn(2, num_null_kv, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim_context, dim_head * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(
self,
x,
context = None,
mask = None
):
b = x.shape[0]
if exists(context):
context = self.context_norm(context)
kv_input = default(context, x)
x = self.norm(x)
q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1)
if self.num_null_kv > 0:
null_k, null_v = repeat(self.null_kv, 'kv n d -> kv b n d', b = b).unbind(dim = 0)
k = torch.cat((null_k, k), dim = -2)
v = torch.cat((null_v, v), dim = -2)
if exists(mask):
mask = F.pad(mask, (self.num_null_kv, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
out = self.attend(q, k, v, mask = mask)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# dimension adapters
def rearrange_channel_last(fn):
@wraps(fn)
def inner(hiddens):
hiddens, ps = pack_one(hiddens, 'b * d')
conditioned = fn(hiddens)
return unpack_one(conditioned, ps, 'b * d')
return inner
def rearrange_channel_first(fn):
""" will adapt shape of (batch, feature, ...) for conditioning """
@wraps(fn)
def inner(hiddens):
hiddens, ps = pack_one(hiddens, 'b d *')
hiddens = rearrange(hiddens, 'b d n -> b n d')
conditioned = fn(hiddens)
conditioned = rearrange(conditioned, 'b n d -> b d n')
return unpack_one(conditioned, ps, 'b d *')
return inner
# conditioning modules
class FiLM(nn.Module):
def __init__(
self,
dim,
hidden_dim
):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim * 4),
nn.SiLU(),
nn.Linear(hidden_dim * 4, hidden_dim * 2)
)
nn.init.zeros_(self.net[-1].weight)
nn.init.zeros_(self.net[-1].bias)
def forward(self, conditions, hiddens):
scale, shift = self.net(conditions).chunk(2, dim = -1)
assert scale.shape[-1] == hiddens.shape[-1], f'unexpected hidden dimesion {hiddens.shape[-1]} used for conditioning'
scale, shift = map(lambda t: rearrange(t, 'b d -> b 1 d'), (scale, shift))
return hiddens * (scale + 1) + shift
class CrossAttention(nn.Module):
def __init__(
self,
dim,
hidden_dim,
heads = 8,
dim_head = 64,
flash = False
):
super().__init__()
self.attn = Attention(
dim = hidden_dim,
dim_context = dim,
norm_context = True,
num_null_kv = 1,
dim_head = dim_head,
heads = heads,
flash = flash
)
def forward(
self,
condition,
hiddens,
mask = None
):
return self.attn(hiddens, condition, mask = mask) + hiddens
# film text conditioning
CONDITION_CONFIG = dict(
t5 = T5Adapter,
clip = OpenClipAdapter
)
MODEL_TYPES = CONDITION_CONFIG.keys()
class Conditioner(nn.Module):
pass
# null conditioner
class Identity(nn.Module):
def forward(self, t, *args, **kwargs):
return t
@beartype
class NullConditioner(Conditioner):
def __init__(
self,
*,
num_null_conditioners: int
):
super().__init__()
self.cond_fns = tuple(Identity() for _ in range(num_null_conditioners))
self.register_buffer('_device_param', torch.tensor(0.), persistent = False)
@property
def device(self):
return next(self.buffers()).device
def embed_texts(self, texts: List[str]):
assert False, 'null conditioner cannot embed text'
def forward(self, *args, **kwarg) -> Tuple[Identity, ...]:
return self.cond_fns
# text conditioner with FiLM
@beartype
class TextConditioner(Conditioner):
def __init__(
self,
*,
hidden_dims: Tuple[int, ...],
model_types = 't5',
model_names = None,
cond_drop_prob = 0.,
hiddens_channel_first = True,
text_embed_stem_dim_mult = 2
):
super().__init__()
model_types = cast_tuple(model_types)
model_names = cast_tuple(model_names, length = len(model_types))
assert len(model_types) == len(model_names)
assert all([model_type in MODEL_TYPES for model_type in model_types])
text_models = []
for model_type, model_name in zip(model_types, model_names):
klass = CONDITION_CONFIG.get(model_type)
model = klass(model_name)
text_models.append(model)
self.text_models = text_models
self.latent_dims = [model.dim_latent for model in text_models]
self.conditioners = nn.ModuleList([])
self.hidden_dims = hidden_dims
self.num_condition_fns = len(hidden_dims)
self.hiddens_channel_first = cast_tuple(hiddens_channel_first, self.num_condition_fns) # whether hiddens to be conditioned is channel first or last
assert len(self.hiddens_channel_first) == self.num_condition_fns
self.cond_drop_prob = cond_drop_prob
total_latent_dim = sum(self.latent_dims)
mlp_stem_output_dim = total_latent_dim * text_embed_stem_dim_mult
self.text_embed_stem_mlp = nn.Sequential(
nn.Linear(total_latent_dim, mlp_stem_output_dim),
nn.SiLU()
)
for hidden_dim in hidden_dims:
self.conditioners.append(FiLM(mlp_stem_output_dim, hidden_dim))
self.null_text_embed = nn.Parameter(torch.randn(total_latent_dim))
self.register_buffer('_device_param', torch.tensor(0.), persistent = False)
@property
def device(self):
return next(self.buffers()).device
def embed_texts(self, texts: List[str]):
device = self.device
text_embeds = []
for text_model in self.text_models:
text_embed = text_model.embed_text(texts)
text_embeds.append(text_embed.to(device))
return torch.cat(text_embeds, dim = -1)
def forward(
self,
texts: Optional[List[str]] = None,
text_embeds: Optional[torch.Tensor] = None,
cond_drop_prob = None,
repeat_batch = 1, # for robotic transformer edge case
) -> Tuple[Callable, ...]:
assert exists(texts) ^ exists(text_embeds)
if self.training:
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
else:
assert exists(cond_drop_prob), 'when not training, cond_drop_prob must be explicitly set'
if exists(texts):
batch = len(texts)
elif exists(text_embeds):
batch = text_embeds.shape[0]
if not exists(text_embeds):
text_embeds = self.embed_texts(texts)
if cond_drop_prob > 0.:
prob_keep_mask = prob_mask_like((batch, 1), 1. - cond_drop_prob, device = self.device)
null_text_embeds = rearrange(self.null_text_embed, 'd -> 1 d')
text_embeds = torch.where(
prob_keep_mask,
text_embeds,
null_text_embeds
)
# text embed mlp stem, as done in unet conditioning in guided diffusion
text_embeds = self.text_embed_stem_mlp(text_embeds)
# prepare the conditioning functions
repeat_batch = cast_tuple(repeat_batch, self.num_condition_fns)
cond_fns = []
for cond, cond_hiddens_channel_first, cond_repeat_batch in zip(self.conditioners, self.hiddens_channel_first, repeat_batch):
cond_text_embeds = repeat(text_embeds, 'b ... -> (b r) ...', r = cond_repeat_batch)
cond_fn = partial(cond, cond_text_embeds)
wrapper_fn = rearrange_channel_first if cond_hiddens_channel_first else rearrange_channel_last
cond_fns.append(wrapper_fn(cond_fn))
return tuple(cond_fns)
# cross attention text conditioner
@beartype
class AttentionTextConditioner(Conditioner):
def __init__(
self,
*,
hidden_dims: Tuple[int, ...],
model_types = 't5',
model_names = None,
cond_drop_prob = 0.,
hiddens_channel_first = True,
dim_latent = None,
attn_dim_head = 64,
attn_heads = 8,
flash = True
):
super().__init__()
model_types = cast_tuple(model_types)
model_names = cast_tuple(model_names, length = len(model_types))
assert len(model_types) == len(model_names)
assert all([model_type in MODEL_TYPES for model_type in model_types])
text_models = []
for model_type, model_name in zip(model_types, model_names):
klass = CONDITION_CONFIG.get(model_type)
model = klass(model_name)
text_models.append(model)
self.text_models = text_models
self.to_latent_dims = nn.ModuleList([])
dim_latent = default(dim_latent, max([model.dim_latent for model in text_models]))
for model in text_models:
self.to_latent_dims.append(nn.Linear(model.dim_latent, dim_latent))
self.conditioners = nn.ModuleList([])
self.hidden_dims = hidden_dims
self.num_condition_fns = len(hidden_dims)
self.hiddens_channel_first = cast_tuple(hiddens_channel_first, self.num_condition_fns) # whether hiddens to be conditioned is channel first or last
assert len(self.hiddens_channel_first) == self.num_condition_fns
self.cond_drop_prob = cond_drop_prob
for hidden_dim in hidden_dims:
self.conditioners.append(CrossAttention(dim_latent, hidden_dim, flash = flash))
self.register_buffer('_device_param', torch.tensor(0.), persistent = False)
@property
def device(self):
return next(self.buffers()).device
def embed_texts(self, texts: List[str]):
device = self.device
text_embeds = []
for text_model, to_latent in zip(self.text_models, self.to_latent_dims):
text_embed = text_model.embed_text(texts, return_text_encodings = True)
text_embed = text_embed.to(device)
mask = (text_embed != 0).any(dim = -1)
text_embed = to_latent(text_embed)
text_embed = text_embed.masked_fill(~mask[..., None], 0.)
text_embeds.append(text_embed)
return torch.cat(text_embeds, dim = -2)
def forward(
self,
texts: Optional[List[str]] = None,
text_embeds: Optional[torch.Tensor] = None,
cond_drop_prob = None,
repeat_batch = 1, # for robotic transformer edge case
) -> Tuple[Callable, ...]:
assert exists(texts) ^ exists(text_embeds)
if self.training:
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
else:
assert exists(cond_drop_prob), 'when not training, cond_drop_prob must be explicitly set'
if exists(texts):
batch = len(texts)
elif exists(text_embeds):
batch = text_embeds.shape[0]
if not exists(text_embeds):
text_embeds = self.embed_texts(texts)
mask = (text_embeds != 0).any(dim = -1)
if cond_drop_prob > 0.:
prob_keep_mask = prob_mask_like((batch, 1), 1. - cond_drop_prob, device = self.device)
mask = mask & prob_keep_mask
# prepare the conditioning functions
repeat_batch = cast_tuple(repeat_batch, self.num_condition_fns)
cond_fns = []
for cond, cond_hiddens_channel_first, cond_repeat_batch in zip(self.conditioners, self.hiddens_channel_first, repeat_batch):
cond_text_embeds = repeat(text_embeds, 'b ... -> (b r) ...', r = cond_repeat_batch)
cond_mask = repeat(mask, 'b ... -> (b r) ...', r = cond_repeat_batch) if exists(mask) else None
cond_fn = partial(cond, cond_text_embeds, mask = cond_mask)
wrapper_fn = rearrange_channel_first if cond_hiddens_channel_first else rearrange_channel_last
cond_fns.append(wrapper_fn(cond_fn))
return tuple(cond_fns)
|
classifier-free-guidance-pytorch-main
|
classifier_free_guidance_pytorch/classifier_free_guidance_pytorch.py
|
from beartype import beartype
from typing import List
import torch
from torch import nn, einsum
import torch.nn.functional as F
import open_clip
from classifier_free_guidance_pytorch.tokenizer import tokenizer
# constants
DEFAULT_CLIP_NAME = 'ViT-B-32'
DEFAULT_PRETRAINED_CLIP = 'laion400m_e32'
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def l2norm(t):
return F.normalize(t, dim = -1)
# adapter
class OpenClipAdapter():
def __init__(
self,
name = DEFAULT_CLIP_NAME,
pretrained = DEFAULT_PRETRAINED_CLIP
):
name = default(name, DEFAULT_CLIP_NAME)
pretrained = default(pretrained, DEFAULT_PRETRAINED_CLIP)
clip, _, preprocess = open_clip.create_model_and_transforms(name, pretrained = pretrained)
self.clip = clip
clip.eval()
self.tokenizer = tokenizer
self.eos_id = 49407
text_attention_final = self.find_layer('ln_final')
self._dim_latent = text_attention_final.weight.shape[0]
self.handle = text_attention_final.register_forward_hook(self._hook)
self.clip_normalize = preprocess.transforms[-1]
self.cleared = False
def find_layer(self, layer):
modules = dict([*self.clip.named_modules()])
return modules.get(layer, None)
def clear(self):
if self.cleared:
return
self.handle()
def _hook(self, _, inputs, outputs):
self.text_encodings = outputs
@property
def dim_latent(self):
return self._dim_latent
@property
def max_text_len(self):
return 77
@torch.no_grad()
@beartype
def embed_text(
self,
texts: List[str],
return_text_encodings = False,
output_device = None
):
texts, max_length = self.tokenizer.tokenize(texts)
texts = texts[..., :self.max_text_len]
text_embeds = self.clip.encode_text(texts)
texts = texts[..., :max_length]
if not return_text_encodings:
return l2norm(text_embeds).to(output_device)
is_eos_id = (texts == self.eos_id)
text_mask_excluding_eos = is_eos_id.cumsum(dim = -1) == 0
text_mask = F.pad(text_mask_excluding_eos, (1, -1), value = True)
text_mask = text_mask & (texts != 0)
assert not self.cleared
text_encodings = self.text_encodings[:, :max_length]
text_encodings = text_encodings.masked_fill(~text_mask[..., None], 0.)
del self.text_encodings
return text_encodings.float().to(output_device)
|
classifier-free-guidance-pytorch-main
|
classifier_free_guidance_pytorch/open_clip.py
|
from setuptools import setup, find_packages
setup(
name = 'mogrifier',
packages = find_packages(),
version = '0.0.3',
license='MIT',
description = 'Implementation of Mogrifier circuit from Deepmind',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/mogrifier',
keywords = ['artificial intelligence', 'natural language processing'],
install_requires=[
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
mogrifier-master
|
setup.py
|
from mogrifier.mogrifier import Mogrifier
|
mogrifier-master
|
mogrifier/__init__.py
|
import torch
from torch import nn
def weight(dim_in, dim_out, factorize_k = None):
if factorize_k is None:
return nn.Linear(dim_in, dim_out, bias = False)
assert factorize_k < dim_in and factorize_k < dim_out, 'k must be of relative lower rank'
return nn.Sequential(
nn.Linear(dim_in, factorize_k, bias = False),
nn.Linear(factorize_k, dim_out, bias = False)
)
class Mogrifier(nn.Module):
def __init__(self, dim, iters = 5, factorize_k = None):
super().__init__()
self.dim = dim
self.iters = iters
self.Q = weight(dim, dim, factorize_k)
self.R = weight(dim, dim, factorize_k) if iters > 1 else None
def forward(self, x, h):
shape = x.shape
*_, dim = shape
assert dim == self.dim, f'mogrifier accepts a dimension of {self.dim}'
x, h = map(lambda t: t.reshape(-1, dim), (x, h))
for ind in range(self.iters):
if (ind % 2) == 0:
x = 2 * self.Q(h).sigmoid() * x
else:
h = 2 * self.R(x).sigmoid() * h
x, h = map(lambda t: t.reshape(*shape), (x, h))
return x, h
|
mogrifier-master
|
mogrifier/mogrifier.py
|
from setuptools import setup, find_packages
setup(
name = 'mixture-of-experts',
packages = find_packages(),
version = '0.2.3',
license='MIT',
description = 'Sparsely-Gated Mixture of Experts for Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/mixture-of-experts',
keywords = ['artificial intelligence', 'deep learning', 'transformers', 'mixture of experts'],
install_requires=[
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
mixture-of-experts-master
|
setup.py
|
from mixture_of_experts.mixture_of_experts import MoE, HeirarchicalMoE, Experts
|
mixture-of-experts-master
|
mixture_of_experts/__init__.py
|
import torch
from torch import nn
import torch.nn.functional as F
import math
from inspect import isfunction
# constants
MIN_EXPERT_CAPACITY = 4
# helper functions
def default(val, default_val):
default_val = default_val() if isfunction(default_val) else default_val
return val if val is not None else default_val
def cast_tuple(el):
return el if isinstance(el, tuple) else (el,)
# tensor related helper functions
def top1(t):
values, index = t.topk(k=1, dim=-1)
values, index = map(lambda x: x.squeeze(dim=-1), (values, index))
return values, index
def cumsum_exclusive(t, dim=-1):
num_dims = len(t.shape)
num_pad_dims = - dim - 1
pre_padding = (0, 0) * num_pad_dims
pre_slice = (slice(None),) * num_pad_dims
padded_t = F.pad(t, (*pre_padding, 1, 0)).cumsum(dim=dim)
return padded_t[(..., slice(None, -1), *pre_slice)]
# pytorch one hot throws an error if there are out of bound indices.
# tensorflow, in contrast, does not throw an error
def safe_one_hot(indexes, max_length):
max_index = indexes.max() + 1
return F.one_hot(indexes, max(max_index + 1, max_length))[..., :max_length]
def init_(t):
dim = t.shape[-1]
std = 1 / math.sqrt(dim)
return t.uniform_(-std, std)
# activations
class GELU_(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
GELU = nn.GELU if hasattr(nn, 'GELU') else GELU_
# expert class
class Experts(nn.Module):
def __init__(self,
dim,
num_experts = 16,
hidden_dim = None,
activation = GELU):
super().__init__()
hidden_dim = default(hidden_dim, dim * 4)
num_experts = cast_tuple(num_experts)
w1 = torch.zeros(*num_experts, dim, hidden_dim)
w2 = torch.zeros(*num_experts, hidden_dim, dim)
w1 = init_(w1)
w2 = init_(w2)
self.w1 = nn.Parameter(w1)
self.w2 = nn.Parameter(w2)
self.act = activation()
def forward(self, x):
hidden = torch.einsum('...nd,...dh->...nh', x, self.w1)
hidden = self.act(hidden)
out = torch.einsum('...nh,...hd->...nd', hidden, self.w2)
return out
# the below code is almost all transcribed from the official tensorflow version, from which the papers are written
# https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/research/moe.py
# gating network
class Top2Gating(nn.Module):
def __init__(
self,
dim,
num_gates,
eps = 1e-9,
outer_expert_dims = tuple(),
second_policy_train = 'random',
second_policy_eval = 'random',
second_threshold_train = 0.2,
second_threshold_eval = 0.2,
capacity_factor_train = 1.25,
capacity_factor_eval = 2.):
super().__init__()
self.eps = eps
self.num_gates = num_gates
self.w_gating = nn.Parameter(torch.randn(*outer_expert_dims, dim, num_gates))
self.second_policy_train = second_policy_train
self.second_policy_eval = second_policy_eval
self.second_threshold_train = second_threshold_train
self.second_threshold_eval = second_threshold_eval
self.capacity_factor_train = capacity_factor_train
self.capacity_factor_eval = capacity_factor_eval
def forward(self, x, importance = None):
*_, b, group_size, dim = x.shape
num_gates = self.num_gates
if self.training:
policy = self.second_policy_train
threshold = self.second_threshold_train
capacity_factor = self.capacity_factor_train
else:
policy = self.second_policy_eval
threshold = self.second_threshold_eval
capacity_factor = self.capacity_factor_eval
raw_gates = torch.einsum('...bnd,...de->...bne', x, self.w_gating)
raw_gates = raw_gates.softmax(dim=-1)
# FIND TOP 2 EXPERTS PER POSITON
# Find the top expert for each position. shape=[batch, group]
gate_1, index_1 = top1(raw_gates)
mask_1 = F.one_hot(index_1, num_gates).float()
density_1_proxy = raw_gates
if importance is not None:
equals_one_mask = (importance == 1.).float()
mask_1 *= equals_one_mask[..., None]
gate_1 *= equals_one_mask
density_1_proxy = density_1_proxy * equals_one_mask[..., None]
del equals_one_mask
gates_without_top_1 = raw_gates * (1. - mask_1)
gate_2, index_2 = top1(gates_without_top_1)
mask_2 = F.one_hot(index_2, num_gates).float()
if importance is not None:
greater_zero_mask = (importance > 0.).float()
mask_2 *= greater_zero_mask[..., None]
del greater_zero_mask
# normalize top2 gate scores
denom = gate_1 + gate_2 + self.eps
gate_1 /= denom
gate_2 /= denom
# BALANCING LOSSES
# shape = [batch, experts]
# We want to equalize the fraction of the batch assigned to each expert
density_1 = mask_1.mean(dim=-2)
# Something continuous that is correlated with what we want to equalize.
density_1_proxy = density_1_proxy.mean(dim=-2)
loss = (density_1_proxy * density_1).mean() * float(num_gates ** 2)
# Depending on the policy in the hparams, we may drop out some of the
# second-place experts.
if policy == "all":
pass
elif policy == "none":
mask_2 = torch.zeros_like(mask_2)
elif policy == "threshold":
mask_2 *= (gate_2 > threshold).float()
elif policy == "random":
probs = torch.zeros_like(gate_2).uniform_(0., 1.)
mask_2 *= (probs < (gate_2 / max(threshold, self.eps))).float().unsqueeze(-1)
else:
raise ValueError(f"Unknown policy {policy}")
# Each sequence sends (at most?) expert_capacity positions to each expert.
# Static expert_capacity dimension is needed for expert batch sizes
expert_capacity = min(group_size, int((group_size * capacity_factor) / num_gates))
expert_capacity = max(expert_capacity, MIN_EXPERT_CAPACITY)
expert_capacity_f = float(expert_capacity)
# COMPUTE ASSIGNMENT TO EXPERTS
# [batch, group, experts]
# This is the position within the expert's mini-batch for this sequence
position_in_expert_1 = cumsum_exclusive(mask_1, dim=-2) * mask_1
# Remove the elements that don't fit. [batch, group, experts]
mask_1 *= (position_in_expert_1 < expert_capacity_f).float()
# [batch, experts]
# How many examples in this sequence go to this expert
mask_1_count = mask_1.sum(dim=-2, keepdim=True)
# [batch, group] - mostly ones, but zeros where something didn't fit
mask_1_flat = mask_1.sum(dim=-1)
# [batch, group]
position_in_expert_1 = position_in_expert_1.sum(dim=-1)
# Weight assigned to first expert. [batch, group]
gate_1 *= mask_1_flat
position_in_expert_2 = cumsum_exclusive(mask_2, dim=-2) + mask_1_count
position_in_expert_2 *= mask_2
mask_2 *= (position_in_expert_2 < expert_capacity_f).float()
mask_2_flat = mask_2.sum(dim=-1)
position_in_expert_2 = position_in_expert_2.sum(dim=-1)
gate_2 *= mask_2_flat
# [batch, group, experts, expert_capacity]
combine_tensor = (
gate_1[..., None, None]
* mask_1_flat[..., None, None]
* F.one_hot(index_1, num_gates)[..., None]
* safe_one_hot(position_in_expert_1.long(), expert_capacity)[..., None, :] +
gate_2[..., None, None]
* mask_2_flat[..., None, None]
* F.one_hot(index_2, num_gates)[..., None]
* safe_one_hot(position_in_expert_2.long(), expert_capacity)[..., None, :]
)
dispatch_tensor = combine_tensor.bool().to(combine_tensor)
return dispatch_tensor, combine_tensor, loss
# plain mixture of experts
class MoE(nn.Module):
def __init__(self,
dim,
num_experts = 16,
hidden_dim = None,
activation = nn.ReLU,
second_policy_train = 'random',
second_policy_eval = 'random',
second_threshold_train = 0.2,
second_threshold_eval = 0.2,
capacity_factor_train = 1.25,
capacity_factor_eval = 2.,
loss_coef = 1e-2,
experts = None):
super().__init__()
self.num_experts = num_experts
gating_kwargs = {'second_policy_train': second_policy_train, 'second_policy_eval': second_policy_eval, 'second_threshold_train': second_threshold_train, 'second_threshold_eval': second_threshold_eval, 'capacity_factor_train': capacity_factor_train, 'capacity_factor_eval': capacity_factor_eval}
self.gate = Top2Gating(dim, num_gates = num_experts, **gating_kwargs)
self.experts = default(experts, lambda: Experts(dim, num_experts = num_experts, hidden_dim = hidden_dim, activation = activation))
self.loss_coef = loss_coef
def forward(self, inputs, **kwargs):
b, n, d, e = *inputs.shape, self.num_experts
dispatch_tensor, combine_tensor, loss = self.gate(inputs)
expert_inputs = torch.einsum('bnd,bnec->ebcd', inputs, dispatch_tensor)
# Now feed the expert inputs through the experts.
orig_shape = expert_inputs.shape
expert_inputs = expert_inputs.reshape(e, -1, d)
expert_outputs = self.experts(expert_inputs)
expert_outputs = expert_outputs.reshape(*orig_shape)
output = torch.einsum('ebcd,bnec->bnd', expert_outputs, combine_tensor)
return output, loss * self.loss_coef
# 2-level heirarchical mixture of experts
class HeirarchicalMoE(nn.Module):
def __init__(self,
dim,
num_experts = (4, 4),
hidden_dim = None,
activation = nn.ReLU,
second_policy_train = 'random',
second_policy_eval = 'random',
second_threshold_train = 0.2,
second_threshold_eval = 0.2,
capacity_factor_train = 1.25,
capacity_factor_eval = 2.,
loss_coef = 1e-2,
experts = None):
super().__init__()
assert len(num_experts) == 2, 'only 2 levels of heirarchy for experts allowed for now'
num_experts_outer, num_experts_inner = num_experts
self.num_experts_outer = num_experts_outer
self.num_experts_inner = num_experts_inner
gating_kwargs = {'second_policy_train': second_policy_train, 'second_policy_eval': second_policy_eval, 'second_threshold_train': second_threshold_train, 'second_threshold_eval': second_threshold_eval, 'capacity_factor_train': capacity_factor_train, 'capacity_factor_eval': capacity_factor_eval}
self.gate_outer = Top2Gating(dim, num_gates = num_experts_outer, **gating_kwargs)
self.gate_inner = Top2Gating(dim, num_gates = num_experts_inner, outer_expert_dims = (num_experts_outer,), **gating_kwargs)
self.experts = default(experts, lambda: Experts(dim, num_experts = num_experts, hidden_dim = hidden_dim, activation = activation))
self.loss_coef = loss_coef
def forward(self, inputs, **kwargs):
b, n, d, eo, ei = *inputs.shape, self.num_experts_outer, self.num_experts_inner
dispatch_tensor_outer, combine_tensor_outer, loss_outer = self.gate_outer(inputs)
expert_inputs_outer = torch.einsum('bnd,bnec->ebcd', inputs, dispatch_tensor_outer)
# we construct an "importance" Tensor for the inputs to the second-level
# gating. The importance of an input is 1.0 if it represents the
# first-choice expert-group and 0.5 if it represents the second-choice expert
# group. This is used by the second-level gating.
importance = combine_tensor_outer.permute(2, 0, 3, 1).sum(dim=-1)
importance = 0.5 * ((importance > 0.5).float() + (importance > 0.).float())
dispatch_tensor_inner, combine_tensor_inner, loss_inner = self.gate_inner(expert_inputs_outer, importance = importance)
expert_inputs = torch.einsum('ebnd,ebnfc->efbcd', expert_inputs_outer, dispatch_tensor_inner)
# Now feed the expert inputs through the experts.
orig_shape = expert_inputs.shape
expert_inputs = expert_inputs.reshape(eo, ei, -1, d)
expert_outputs = self.experts(expert_inputs)
expert_outputs = expert_outputs.reshape(*orig_shape)
# NOW COMBINE EXPERT OUTPUTS (reversing everything we have done)
# expert_output has shape [y0, x1, h, d, n]
expert_outputs_outer = torch.einsum('efbcd,ebnfc->ebnd', expert_outputs, combine_tensor_inner)
output = torch.einsum('ebcd,bnec->bnd', expert_outputs_outer, combine_tensor_outer)
return output, (loss_outer + loss_inner) * self.loss_coef
|
mixture-of-experts-master
|
mixture_of_experts/mixture_of_experts.py
|
import torch
# constants
GOAL = 'Attention is all you need'
POP_SIZE = 100
MUTATION_RATE = 0.04
FRAC_FITTEST_SURVIVE = 0.25
FRAC_TOURNAMENT = 0.25
# encode and decode functions
def encode(s):
return torch.tensor([ord(c) for c in s])
def decode(t):
return ''.join([chr(i) for i in t.tolist()])
# derived constants
gene_length = len(GOAL)
gene_midpoint = gene_length // 2
target_gene = encode(GOAL)
keep_fittest_len = int(POP_SIZE * FRAC_FITTEST_SURVIVE)
num_tournament_contenders = int(keep_fittest_len * FRAC_TOURNAMENT)
num_children = POP_SIZE - keep_fittest_len
num_mutate = MUTATION_RATE * gene_length
assert num_tournament_contenders >= 2
# genetic algorithm
generation = 1
pool_shape = (POP_SIZE, gene_length)
pool = torch.randint(0, 255, pool_shape)
while True:
print(f"\n\ngeneration {generation}\n")
# sort population by fitness (inverse costs)
costs = torch.square(pool - target_gene).sum(dim = -1)
indices = costs.sort().indices
pool, costs = pool[indices], costs[indices]
# keep the fittest
pool, costs = pool[:keep_fittest_len], costs[:keep_fittest_len]
# display every generation
for gene, cost in zip(pool, costs):
print(f"{decode(gene)} ({cost.item()})")
# solved if any cost is 0
if (costs == 0).any():
break
# deterministic tournament selection - let top 2 winners become parents
contender_ids = torch.randn((num_children, keep_fittest_len)).argsort(dim = -1)[..., :num_tournament_contenders]
participants, tournaments = pool[contender_ids], costs[contender_ids]
top2_winners = tournaments.topk(2, dim = -1, largest = False, sorted = False).indices
top2_winners = top2_winners.unsqueeze(-1).expand(-1, -1, gene_length)
parents = participants.gather(1, top2_winners)
# cross over recombination of parents
parent1, parent2 = parents.unbind(dim = 1)
children = torch.cat((parent1[:, :gene_midpoint], parent2[:, gene_midpoint:]), dim = -1)
pool = torch.cat((pool, children), dim = 0)
# mutate genes in population
mutate_mask = torch.randn(pool_shape).argsort(dim = -1) < num_mutate
noise = torch.randint(0, 2, pool_shape) * 2 - 1
pool = torch.where(mutate_mask, pool + noise, pool)
pool.clamp_(0, 255)
generation += 1
|
genetic-algorithm-pytorch-main
|
ga.py
|
# Lint as: python3
"""HuggingFace/NLP is an open library of NLP datasets.
Simple check list for release from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py, setup.py as well as docs/source/conf.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
You may have to specify the repository url, use the following command then:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi nlp
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
8. Update the documentation commit in .circleci/deploy.sh for the accurate documentation to be displayed
9. Update README.md to redirect to correct documentation.
"""
import datetime
import itertools
import os
import sys
from setuptools import find_packages
from setuptools import setup
DOCLINES = __doc__.split('\n')
REQUIRED_PKGS = [
'numpy',
# Backend and serialization
'pyarrow>=0.16.0',
# For smart caching dataset processing
'dill',
# for downloading datasets over HTTPS
'requests>=2.19.0',
# progress bars in download and scripts
"tqdm >= 4.27",
# dataclasses for Python versions that don't have it
"dataclasses;python_version<'3.7'",
# filesystem locks e.g. to prevent parallel downloads
"filelock",
]
TESTS_REQUIRE = [
'apache-beam',
'absl-py',
'bs4',
'langdetect',
'mwparserfromhell',
'nltk',
'pytest',
'pytest-xdist',
'tensorflow',
'tldextract',
'zstandard'
]
QUALITY_REQUIRE = [
"black",
"isort @ git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort",
"flake8==3.7.9",
]
EXTRAS_REQUIRE = {
'apache-beam': ['apache-beam'],
'tensorflow': ['tensorflow>=2.2.0'],
'tensorflow_gpu': ['tensorflow-gpu>=2.2.0'],
'torch': ['torch'],
'dev': TESTS_REQUIRE + QUALITY_REQUIRE,
'tests': TESTS_REQUIRE,
'quality': QUALITY_REQUIRE,
}
setup(
name='nlp',
version="0.3.0",
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
author='HuggingFace Inc.',
author_email='thomas@huggingface.co',
url='https://github.com/huggingface/nlp',
download_url='https://github.com/huggingface/nlp/tags',
license='Apache 2.0',
package_dir={"": "src"},
packages=find_packages("src"),
package_data={
'nlp': [
'scripts/templates/*',
],
},
scripts=["nlp-cli"],
install_requires=REQUIRED_PKGS,
extras_require=EXTRAS_REQUIRE,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords='nlp machine learning datasets metrics',
)
|
nlp-master
|
setup.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERTScore metric. """
import nlp
import bert_score
_CITATION = """\
@inproceedings{bert-score,
title={BERTScore: Evaluating Text Generation with BERT},
author={Tianyi Zhang* and Varsha Kishore* and Felix Wu* and Kilian Q. Weinberger and Yoav Artzi},
booktitle={International Conference on Learning Representations},
year={2020},
url={https://openreview.net/forum?id=SkeHuCVFDr}
}
"""
_DESCRIPTION = """\
BERTScore leverages the pre-trained contextual embeddings from BERT and matches words in candidate and reference sentences by cosine similarity.
It has been shown to correlate with human judgment on sentence-level and system-level evaluation.
Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for evaluating different language generation tasks.
See the [README.md] file at https://github.com/Tiiiger/bert_score for more information.
"""
_KWARGS_DESCRIPTION = """
BERTScore Metrics with the hashcode from a source against one or more references.
Args:
`predictions` (list of str): prediction/candidate sentences
`refereces` (list of str or list of list of str): reference sentences
`lang` (str): language of the sentences; required (e.g. 'en')
`model_type` (str): bert specification, default using the suggested
model for the target langauge; has to specify at least one of
`model_type` or `lang`
`num_layers` (int): the layer of representation to use.
default using the number of layer tuned on WMT16 correlation data
`verbose` (bool): turn on intermediate status update
`idf` (bool or dict): use idf weighting, can also be a precomputed idf_dict
`device` (str): on which the contextual embedding model will be allocated on.
If this argument is None, the model lives on cuda:0 if cuda is available.
`nthreads` (int): number of threads
`batch_size` (int): bert score processing batch size
at least one of `model_type` or `lang`. `lang` needs to be
specified when `rescale_with_baseline` is True.
`rescale_with_baseline` (bool): rescale bertscore with pre-computed baseline
Returns:
'precision': Precision,
'recall': Recall,
'f1', F1 score,
'hashcode': Hashcode of the library,
"""
class BERTScore(nlp.Metric):
def _info(self):
return nlp.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://github.com/Tiiiger/bert_score",
inputs_description=_KWARGS_DESCRIPTION,
features=nlp.Features({
'predictions': nlp.Value('string', id='sequence'),
'references': nlp.Sequence(nlp.Value('string', id='sequence'), id='references'),
}),
codebase_urls=["https://github.com/Tiiiger/bert_score"],
reference_urls=["https://github.com/Tiiiger/bert_score",
"https://arxiv.org/abs/1904.09675"]
)
def _compute(
self,
predictions,
references,
lang=None,
model_type=None,
num_layers=None,
verbose=False,
idf=False,
device=None,
batch_size=64,
nthreads=4,
all_layers=False,
rescale_with_baseline=False,
):
if model_type is None:
assert lang is not None, "either lang or model_type should be specified"
model_type = bert_score.utils.lang2model[lang.lower()]
if num_layers is None:
num_layers = bert_score.utils.model2layers[model_type]
hashcode = bert_score.utils.get_hash(model_type, num_layers, idf, rescale_with_baseline)
if not hasattr(self, 'cached_bertscorer') or self.cached_bertscorer.hash != hashcode:
self.cached_bertscorer = bert_score.BERTScorer(
model_type=model_type,
num_layers=num_layers,
batch_size=batch_size,
nthreads=nthreads,
all_layers=all_layers,
idf=idf,
device=device,
lang=lang,
rescale_with_baseline=rescale_with_baseline,
)
(P, R, F) = self.cached_bertscorer.score(
cands=predictions, refs=references, verbose=verbose, batch_size=batch_size,
)
output_dict = {
'precision': P,
'recall': R,
'f1': F,
'hashcode': hashcode,
}
return output_dict
def add_batch(self, predictions=None, references=None, **kwargs):
""" Add a batch of predictions and references for the metric's stack.
"""
# Refefences can be strings or lists of strings
# Let's change strings to lists of strings with one element
if references is not None:
references = [[ref] if isinstance(ref, str) else ref for ref in references]
super().add_batch(predictions=predictions, references=references, **kwargs)
def add(self, prediction=None, reference=None, **kwargs):
""" Add one prediction and reference for the metric's stack.
"""
# Refefences can be strings or lists of strings
# Let's change strings to lists of strings with one element
if isinstance(reference, str):
reference = [reference]
super().add(prediction=prediction, reference=reference, **kwargs)
|
nlp-master
|
metrics/bertscore/bertscore.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" GLUE benchmark metric. """
import nlp
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
_CITATION = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
Note that each GLUE dataset has its own citation. Please see the source to see
the correct citation for each contained dataset."""
_DESCRIPTION = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
_KWARGS_DESCRIPTION = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
"""
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"accuracy": acc,
"f1": f1,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
class Glue(nlp.Metric):
def _info(self):
if self.config_name not in ["sst2", "mnli", "mnli_mismatched", "mnli_matched",
"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]:
raise KeyError('You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]')
return nlp.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=nlp.Features({
'predictions': nlp.Value('int64' if self.config_name != 'stsb' else 'float32'),
'references': nlp.Value('int64' if self.config_name != 'stsb' else 'float32'),
}),
codebase_urls=[],
reference_urls=[],
format='numpy'
)
def _compute(self, predictions, references):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(references, predictions)}
elif self.config_name == "stsb":
return pearson_and_spearman(predictions, references)
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_f1(predictions, references)
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(predictions, references)}
else:
raise KeyError('You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]')
|
nlp-master
|
metrics/glue/glue.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" ROUGE metric. """
import nlp
import numpy
import scipy
from .coval_backend.conll import reader # From: https://github.com/ns-moosavi/coval
from .coval_backend.conll import util
from .coval_backend.eval import evaluator
_CITATION = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
"""
_DESCRIPTION = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
_KWARGS_DESCRIPTION = """
Calculates coreference evaluation metrics.
Args:
predictions: list of predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
predictions: list of references for scoring in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_spans: By setting 'min_spans', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
"""
def get_coref_infos(key_lines,
sys_lines,
NP_only=False,
remove_nested=False,
keep_singletons=True,
min_span=False,
doc="dummy_doc"):
key_doc_lines = {doc: key_lines}
sys_doc_lines = {doc: sys_lines}
doc_coref_infos = {}
key_nested_coref_num = 0
sys_nested_coref_num = 0
key_removed_nested_clusters = 0
sys_removed_nested_clusters = 0
key_singletons_num = 0
sys_singletons_num = 0
key_clusters, singletons_num = reader.get_doc_mentions(
doc, key_doc_lines[doc], keep_singletons)
key_singletons_num += singletons_num
if NP_only or min_span:
key_clusters = reader.set_annotated_parse_trees(key_clusters,
key_doc_lines[doc],
NP_only, min_span)
sys_clusters, singletons_num = reader.get_doc_mentions(
doc, sys_doc_lines[doc], keep_singletons)
sys_singletons_num += singletons_num
if NP_only or min_span:
sys_clusters = reader.set_annotated_parse_trees(sys_clusters,
key_doc_lines[doc],
NP_only, min_span)
if remove_nested:
nested_mentions, removed_clusters = reader.remove_nested_coref_mentions(
key_clusters, keep_singletons)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
nested_mentions, removed_clusters = reader.remove_nested_coref_mentions(
sys_clusters, keep_singletons)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
sys_mention_key_cluster = reader.get_mention_assignments(
sys_clusters, key_clusters)
key_mention_sys_cluster = reader.get_mention_assignments(
key_clusters, sys_clusters)
doc_coref_infos[doc] = (key_clusters, sys_clusters,
key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
print('Number of removed nested coreferring mentions in the key '
'annotation: %s; and system annotation: %s' % (
key_nested_coref_num, sys_nested_coref_num))
print('Number of resulting singleton clusters in the key '
'annotation: %s; and system annotation: %s' % (
key_removed_nested_clusters, sys_removed_nested_clusters))
if not keep_singletons:
print('%d and %d singletons are removed from the key and system '
'files, respectively' % (
key_singletons_num, sys_singletons_num))
return doc_coref_infos
def evaluate(key_lines,
sys_lines, metrics, NP_only, remove_nested,
keep_singletons, min_span):
doc_coref_infos = get_coref_infos(key_lines,
sys_lines, NP_only,
remove_nested, keep_singletons, min_span)
output_scores = {}
conll = 0
conll_subparts_num = 0
for name, metric in metrics:
recall, precision, f1 = evaluator.evaluate_documents(doc_coref_infos,
metric,
beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += f1
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall,
f"{name}/precision": precision,
f"{name}/f1": f1})
print(name.ljust(10), 'Recall: %.2f' % (recall * 100),
' Precision: %.2f' % (precision * 100),
' F1: %.2f' % (f1 * 100))
if conll_subparts_num == 3:
conll = (conll / 3) * 100
print('CoNLL score: %.2f' % conll)
output_scores.update({f"conll_score": conll})
return output_scores
def check_gold_parse_annotation(key_lines):
has_gold_parse = False
for line in key_lines:
if not line.startswith("#"):
if len(line.split())> 6:
parse_col = line.split()[5]
if not parse_col == "-":
has_gold_parse = True
break
else:
break
return has_gold_parse
class Coval(nlp.Metric):
def __init__(self, **kwargs):
raise NotImplementedError("CoVal is currently under construction.")
def _info(self):
return nlp.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=nlp.Features({
'predictions': nlp.Value('string', id='sequence'),
'references': nlp.Value('string', id='sequence'),
}),
codebase_urls=["https://github.com/ns-moosavi/coval"],
reference_urls=["https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html"]
)
def _compute(self, predictions, references, keep_singletons=True,
NP_only=False, min_spans=False, remove_nested=False):
allmetrics = [('mentions', evaluator.mentions), ('muc', evaluator.muc),
('bcub', evaluator.b_cubed), ('ceafe', evaluator.ceafe),
('lea', evaluator.lea)]
if min_spans:
has_gold_parse = util.check_gold_parse_annotation(references)
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_spans'.")
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
score = evaluate(references, predictions, allmetrics, NP_only, remove_nested,
keep_singletons, min_spans)
return score
|
nlp-master
|
metrics/coval/coval.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" seqeval metric. """
from collections import defaultdict
import nlp
from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score
_CITATION = """\
"""
_DESCRIPTION = """\
seqeval is a Python framework for sequence labeling evaluation.
seqeval can evaluate the performance of chunking tasks such as named-entity recognition, part-of-speech tagging, semantic role labeling and so on.
This is well-tested by using the Perl script conlleval, which can be used for
measuring the performance of a system that has processed the CoNLL-2000 shared task data.
seqeval supports following formats:
IOB1
IOB2
IOE1
IOE2
IOBES
See the [README.md] file at https://github.com/chakki-works/seqeval for more information.
"""
_KWARGS_DESCRIPTION = """
Produces labelling scores along with its sufficient statistics
from a source against one or more references.
Args:
predictions: List of List of predicted labels (Estimated targets as returned by a tagger)
references: List of List of reference labels (Ground truth (correct) target values)
suffix: True if the types are not in IOBs format False otherwise. default: False
Returns:
Overall:
'accuracy': accuracy,
'precision': precision,
'recall': recall,
'f1': F1 score, also known as balanced F-score or F-measure,
Per type:
'precision': precision,
'recall': recall,
'f1': F1 score, also known as balanced F-score or F-measure,
"""
def end_of_chunk(prev_tag, tag, prev_type, type_):
"""Checks if a chunk ended between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_end: boolean.
"""
chunk_end = False
if (prev_tag in ["B", "I"] and tag in ["B", "S", "O"]) or prev_tag in ["E", "S"]:
chunk_end = True
if prev_tag not in ['O', '.'] and prev_type != type_:
chunk_end = True
return chunk_end
def start_of_chunk(prev_tag, tag, prev_type, type_):
"""Checks if a chunk started between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_start: boolean.
"""
chunk_start = False
if (prev_tag in ["E", "S", "O"] and tag in ["E", "I"]) or tag in ["B", "S"]:
chunk_start = True
if tag not in ['O', '.'] and prev_type != type_:
chunk_start = True
return chunk_start
def get_entities(seq, suffix=False):
"""Gets entities from sequence.
Args:
seq (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
"""
if any(isinstance(s, list) for s in seq):
seq = [item for sublist in seq for item in sublist + ['O']]
prev_tag = 'O'
prev_type = ''
begin_offset = 0
chunks = []
for i, chunk in enumerate(seq + ['O']):
if suffix:
tag = chunk[-1]
type_ = chunk.split('-')[0]
else:
tag = chunk[0]
type_ = chunk.split('-')[-1]
if end_of_chunk(prev_tag, tag, prev_type, type_):
chunks.append((prev_type, begin_offset, i-1))
if start_of_chunk(prev_tag, tag, prev_type, type_):
begin_offset = i
prev_tag = tag
prev_type = type_
return chunks
class Seqeval(nlp.Metric):
def _info(self):
return nlp.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://github.com/chakki-works/seqeval",
inputs_description=_KWARGS_DESCRIPTION,
features=nlp.Features({
'predictions': nlp.Sequence(nlp.Value('string', id='label'), id='sequence'),
'references': nlp.Sequence(nlp.Value('string', id='label'), id='sequence'),
}),
codebase_urls=["https://github.com/chakki-works/seqeval"],
reference_urls=["https://github.com/chakki-works/seqeval"]
)
def _compute(self, predictions, references, suffix=False):
true_entities = set(get_entities(references, suffix))
pred_entities = set(get_entities(predictions, suffix))
d1 = defaultdict(set)
d2 = defaultdict(set)
scores = {}
for e in true_entities:
d1[e[0]].add((e[1], e[2]))
for e in pred_entities:
d2[e[0]].add((e[1], e[2]))
for type_name, true_entities in d1.items():
scores[type_name] = {}
pred_entities = d2[type_name]
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
nb_true = len(true_entities)
p = nb_correct / nb_pred if nb_pred > 0 else 0
r = nb_correct / nb_true if nb_true > 0 else 0
f1 = 2 * p * r / (p + r) if p + r > 0 else 0
scores[type_name]["precision"] = p
scores[type_name]["recall"] = r
scores[type_name]["f1"] = f1
scores[type_name]["number"] = nb_true
scores["overall_precision"] = precision_score(y_true=references, y_pred=predictions, suffix=suffix)
scores["overall_recall"] = recall_score(y_true=references, y_pred=predictions, suffix=suffix)
scores["overall_f1"] = f1_score(y_true=references, y_pred=predictions, suffix=suffix)
scores["overall_accuracy"] = accuracy_score(y_true=references, y_pred=predictions)
return scores
|
nlp-master
|
metrics/seqeval/seqeval.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" XNLI benchmark metric. """
import nlp
_CITATION = """\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
"""
_DESCRIPTION = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
_KWARGS_DESCRIPTION = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
"""
def simple_accuracy(preds, labels):
return (preds == labels).mean()
class Xnli(nlp.Metric):
def _info(self):
return nlp.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=nlp.Features({
'predictions': nlp.Value('int64' if self.config_name != 'sts-b' else 'float32'),
'references': nlp.Value('int64' if self.config_name != 'sts-b' else 'float32'),
}),
codebase_urls=[],
reference_urls=[],
format='numpy'
)
def _compute(self, predictions, references):
return {"accuracy": simple_accuracy(predictions, references)}
|
nlp-master
|
metrics/xnli/xnli.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" GLEU metric. """
import random
import nlp
import scipy.stats
import numpy as np
from .gec_gleu import GLEU # From: https://github.com/cnap/gec-ranking/blob/master/scripts/gleu.py
_CITATION = """\
@InProceedings{napoles-EtAl:2015:ACL-IJCNLP,
author = {Napoles, Courtney and Sakaguchi, Keisuke and Post, Matt and Tetreault, Joel},
title = {Ground Truth for Grammatical Error Correction Metrics},
booktitle = {Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)},
month = {July},
year = {2015},
address = {Beijing, China},
publisher = {Association for Computational Linguistics},
pages = {588--593},
url = {http://www.aclweb.org/anthology/P15-2097}
}
@Article{napoles2016gleu,
author = {Napoles, Courtney and Sakaguchi, Keisuke and Post, Matt and Tetreault, Joel},
title = {{GLEU} Without Tuning},
journal = {eprint arXiv:1605.02592 [cs.CL]},
year = {2016},
url = {http://arxiv.org/abs/1605.02592}
}
"""
_DESCRIPTION = """\
The GLEU metric is a variant of BLEU proposed for evaluating grammatical error corrections
using n-gram overlap with a set of reference sentences, as opposed to precision/recall of specific
annotated errors (Napoles et al., 2015). GLEU hews more closely to human judgments than the rankings produced by
metrics such as MaxMatch and I-measure. The present metric is the second version of GLEU (Napoles et al., 2016)
modified to address problems that arise when using an increasing number of reference sets.
The modified metric does not require tuning and is recommended to be used instead of the original version.
"""
_KWARGS_DESCRIPTION = """
Computes GLEU score.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
"""
def get_gleu_stats(scores) :
mean = np.mean(scores)
std = np.std(scores)
ci = scipy.stats.norm.interval(0.95,loc=mean,scale=std)
return {'mean': mean,
'std': std,
'ci': ci}
class Gleu(nlp.Metric):
def __init__(self, **kwargs):
raise NotImplementedError("Gleu is currently under construction.")
def _info(self):
return nlp.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=nlp.Features({
'predictions': nlp.Sequence(nlp.Value('string', id='token'), id='sequence'),
'references': nlp.Sequence(nlp.Sequence(nlp.Value('string', id='token'), id='sequence'), id='references'),
}),
codebase_urls=["https://github.com/cnap/gec-ranking"],
reference_urls=["https://github.com/cnap/gec-ranking"]
)
def _compute(self, predictions, references, source, num_iterations=500, debug=False):
raise NotImplementedError("To finish")
gleu_calculator = GLEU()
gleu_calculator.load_sources(source)
gleu_calculator.load_references(references)
# first generate a random list of indices, using a different seed
# for each iteration
indices = []
for j in range(num_iterations) :
random.seed(j*101)
indices.append([random.randint(0,len(references)-1)
for i in range(len(predictions))])
if debug :
print('===== Sentence-level scores =====')
print('SID Mean Stdev 95%CI GLEU')
iter_stats = [ [0 for i in range(2*4+2)]
for j in range(num_iterations) ]
for i,h in enumerate(predictions) :
gleu_calculator.load_hypothesis_sentence(h)
# we are going to store the score of this sentence for each ref
# so we don't have to recalculate them 500 times
stats_by_ref = [ None for r in range(len(references)) ]
for j in range(num_iterations) :
ref = indices[j][i]
this_stats = stats_by_ref[ref]
if this_stats is None :
this_stats = [ s for s in gleu_calculator.gleu_stats(
i,r_ind=ref) ]
stats_by_ref[ref] = this_stats
iter_stats[j] = [ sum(scores)
for scores in zip(iter_stats[j], this_stats)]
if debug :
# sentence-level GLEU is the mean GLEU of the hypothesis
# compared to each reference
for r in range(len(references)) :
if stats_by_ref[r] is None :
stats_by_ref[r] = [s for s in gleu_calculator.gleu_stats(
i,r_ind=r) ]
print(i)
print(' '.join(get_gleu_stats([gleu_calculator.gleu(stats,smooth=True)
for stats in stats_by_ref])))
if debug :
print('\n==== Overall score =====')
print('Mean Stdev 95%CI GLEU')
print(' '.join(get_gleu_stats([gleu_calculator.gleu(stats)
for stats in iter_stats ])))
return get_gleu_stats([gleu_calculator.gleu(stats)
for stats in iter_stats ])[0]
|
nlp-master
|
metrics/gleu/gleu.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" SACREBLEU metric. """
import nlp
import sacrebleu as scb
_CITATION = """\
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
"""
_DESCRIPTION = """\
SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores.
Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text.
It also knows all the standard test sets and handles downloading, processing, and tokenization for you.
See the [README.md] file at https://github.com/mjpost/sacreBLEU for more information.
"""
_KWARGS_DESCRIPTION = """
Produces BLEU scores along with its sufficient statistics
from a source against one or more references.
Args:
predictions: The system stream (a sequence of segments)
references: A list of one or more reference streams (each a sequence of segments)
smooth: The smoothing method to use
smooth_value: For 'floor' smoothing, the floor to use
force: Ignore data that looks already tokenized
lowercase: Lowercase the data
tokenize: The tokenizer to use
Returns:
'score': BLEU score,
'counts': Counts,
'totals': Totals,
'precisions': Precisions,
'bp': Brevity penalty,
'sys_len': predictions length,
'ref_len': reference length,
"""
class Sacrebleu(nlp.Metric):
def _info(self):
return nlp.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://github.com/mjpost/sacreBLEU",
inputs_description=_KWARGS_DESCRIPTION,
features=nlp.Features({
'predictions': nlp.Value('string', id='sequence'),
'references': nlp.Sequence(nlp.Value('string', id='sequence'), id='references'),
}),
codebase_urls=["https://github.com/mjpost/sacreBLEU"],
reference_urls=["https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213"]
)
def _compute(self, predictions, references, smooth_method='exp',
smooth_value=None,
force=False,
lowercase=False,
tokenize=scb.sacrebleu.DEFAULT_TOKENIZER,
use_effective_order=False):
output = scb.corpus_bleu(
sys_stream=predictions,
ref_streams=references,
smooth_method=smooth_method,
smooth_value=smooth_value,
force=force,
lowercase=lowercase,
tokenize=tokenize,
use_effective_order=use_effective_order)
output_dict = {
'score': output.score,
'counts': output.counts,
'totals': output.totals,
'precisions': output.precisions,
'bp': output.bp,
'sys_len': output.sys_len,
'ref_len': output.ref_len,
}
return output_dict
|
nlp-master
|
metrics/sacrebleu/sacrebleu.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BLEU metric. """
import nlp
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_CITATION = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
"""
_DESCRIPTION = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
_KWARGS_DESCRIPTION = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
"""
class Bleu(nlp.Metric):
def _info(self):
return nlp.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=nlp.Features({
'predictions': nlp.Sequence(nlp.Value('string', id='token'), id='sequence'),
'references': nlp.Sequence(nlp.Sequence(nlp.Value('string', id='token'), id='sequence'), id='references'),
}),
codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"],
reference_urls=["https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213"]
)
def _compute(self, predictions, references, max_order=4, smooth=False):
score = compute_bleu(reference_corpus=references, translation_corpus=predictions, max_order=max_order, smooth=smooth)
(bleu, precisions, bp, ratio, translation_length, reference_length) = score
return {'bleu': bleu,
'precisions': precisions,
'brevity_penalty': bp,
'length_ratio': ratio,
'translation_length': translation_length,
'reference_length': reference_length}
|
nlp-master
|
metrics/bleu/bleu.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" SQuAD v2 metric. """
import nlp
from .evaluate import make_qid_to_has_ans, get_raw_scores, apply_no_ans_threshold, make_eval_dict, merge_eval
_CITATION = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_DESCRIPTION = """
This metric wrap the official scoring script for version 2 of the Stanford Question
Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions
written adversarially by crowdworkers to look similar to answerable ones.
To do well on SQuAD2.0, systems must not only answer questions when possible, but also
determine when no answer is supported by the paragraph and abstain from answering.
"""
_KWARGS_DESCRIPTION = """
Computes SQuAD v2 scores (F1 and EM).
Args:
predictions: List of triple for question-answers to score with the following elements:
- the question-answer 'id' field as given in the references (see below)
- the text of the answer
- the probability that the question has no answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a list of Dict {'text': text of the answer as a string}
no_answer_threshold: float
Probability threshold to decide that a question has no answer.
Returns:
'exact': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'total': Number of score considered
'HasAns_exact': Exact match (the normalized answer exactly match the gold answer)
'HasAns_f1': The F-score of predicted tokens versus the gold answer
'HasAns_total': Number of score considered
'NoAns_exact': Exact match (the normalized answer exactly match the gold answer)
'NoAns_f1': The F-score of predicted tokens versus the gold answer
'NoAns_total': Number of score considered
'best_exact': Best exact match (with varying threshold)
'best_exact_thresh': No-answer probability threshold associated to the best exact match
'best_f1': Best F1 (with varying threshold)
'best_f1_thresh': No-answer probability threshold associated to the best F1
"""
class SquadV2(nlp.Metric):
def _info(self):
return nlp.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=nlp.Features(
{
"predictions": {
"id": nlp.Value("string"),
"prediction_text": nlp.Value("string"),
"no_answer_probability": nlp.Value("float32"),
},
"references": {
"id": nlp.Value("string"),
"answers": nlp.features.Sequence(
{"text": nlp.Value("string"), "answer_start": nlp.Value("int32")}
),
},
}
),
codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
)
def _compute(self, predictions, references, no_answer_threshold=1.0):
predictions = dict((p["id"], p["prediction_text"]) for p in predictions)
dataset = [{"paragraphs": [{"qas": references}]}]
no_answer_probabilities = dict((p["id"], p["no_answer_probability"]) for p in predictions)
qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(dataset, predictions)
exact_thresh = apply_no_ans_threshold(exact_raw, no_answer_probabilities, qid_to_has_ans, no_answer_threshold)
f1_thresh = apply_no_ans_threshold(f1_raw, no_answer_probabilities, qid_to_has_ans, no_answer_threshold)
out_eval = make_eval_dict(exact_thresh, f1_thresh)
if has_ans_qids:
has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, "HasAns")
if no_ans_qids:
no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, "NoAns")
return out_eval
|
nlp-master
|
metrics/squad_v2/squad_v2.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.