python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for tensor-like ojects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose.
class _TensorLike(object):
"""Internal cls for grouping Tensor, SparseTensor, ..., for is_instance."""
pass
|
tensorflow-master
|
tensorflow/python/framework/tensor_like.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import unittest
from absl.testing import parameterized
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top
except:
pass
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(_TABLE_SHARED_NAME_PATTERN, str(node.attr["shared_name"].s)):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return pywrap_tensorflow.IsBuiltWithROCm()
def GpuSupportsHalfMatMulAndConv():
return pywrap_tensorflow.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various versions
# of python2.7.x.
for _ in range(2):
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" %
(new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of refferents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = _combine_named_parameters(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
combinations = [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
return combinations
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith(
unittest.TestLoader.testMethodPrefix) and not (
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or name == "test_session"):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return decorated
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.get_default_graph()._building_function:
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if tf2.enabled():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the precense
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True if a GPU device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evalaute `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def use_deterministic_cudnn(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CUDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = os.environ.get("TF_CUDNN_DETERMINISTIC", "")
os.environ["TF_CUDNN_DETERMINISTIC"] = "true"
result = f(self, *args, **kwargs)
os.environ["TF_CUDNN_DETERMINISTIC"] = original_var
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tensorflow.TF_GetXlaConstantFoldingDisabled()
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# The description is just for documentation purposes.
def disable_xla(description):
def disable_xla_impl(func):
"""Execute the test method only if xla is not enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_xla_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
def no_xla_auto_jit_impl(func):
"""This test is not intended to be run with XLA auto jit enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Skip test if using XLA is forced.
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return no_xla_auto_jit_impl
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tensorflow.TF_SetXlaAutoJitMode("2")
pywrap_tensorflow.TF_SetXlaMinClusterSize(1)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(False)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(True)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
context.context().summary_writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
tensor.values.numpy(), tensor.row_splits.numpy())
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections.Mapping)
if a_is_dict != isinstance(b, collections.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %s. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = {}".format(x))
msgs.append("not equal rhs = {}".format(y))
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
tensorflow-master
|
tensorflow/python/framework/test_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A TensorSpec class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import type_spec
from tensorflow.python.util.tf_export import tf_export
@tf_export("TensorSpec")
class TensorSpec(type_spec.BatchableTypeSpec):
"""Describes a tf.Tensor.
Metadata for describing the `tf.Tensor` objects accepted or returned
by some TensorFlow APIs.
"""
__slots__ = ["_shape", "_shape_tuple", "_dtype", "_name"]
def __init__(self, shape, dtype=dtypes.float32, name=None):
"""Creates a TensorSpec.
Args:
shape: Value convertible to `tf.TensorShape`. The shape of the tensor.
dtype: Value convertible to `tf.DType`. The type of the tensor values.
name: Optional name for the Tensor.
Raises:
TypeError: If shape is not convertible to a `tf.TensorShape`, or dtype is
not convertible to a `tf.DType`.
"""
self._shape = tensor_shape.TensorShape(shape)
try:
self._shape_tuple = tuple(self.shape.as_list())
except ValueError:
self._shape_tuple = None
self._dtype = dtypes.as_dtype(dtype)
self._name = name
@classmethod
def from_spec(cls, spec, name=None):
return cls(spec.shape, spec.dtype, name or spec.name)
@classmethod
def from_tensor(cls, tensor, name=None):
if isinstance(tensor, ops.EagerTensor):
return TensorSpec(tensor.shape, tensor.dtype, name)
elif isinstance(tensor, ops.Tensor):
return TensorSpec(tensor.shape, tensor.dtype, name or tensor.op.name)
else:
raise ValueError("`tensor` should be a tf.Tensor")
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of the tensor."""
return self._shape
@property
def dtype(self):
"""Returns the `dtype` of elements in the tensor."""
return self._dtype
@property
def name(self):
"""Returns the (optionally provided) name of the described tensor."""
return self._name
def is_compatible_with(self, spec_or_tensor):
"""Returns True if spec_or_tensor is compatible with this TensorSpec.
Two tensors are considered compatible if they have the same dtype
and their shapes are compatible (see `tf.TensorShape.is_compatible_with`).
Args:
spec_or_tensor: A tf.TensorSpec or a tf.Tensor
Returns:
True if spec_or_tensor is compatible with self.
"""
return (isinstance(spec_or_tensor, (TensorSpec, ops.Tensor)) and
self._dtype.is_compatible_with(spec_or_tensor.dtype) and
self._shape.is_compatible_with(spec_or_tensor.shape))
def __repr__(self):
return "TensorSpec(shape={}, dtype={}, name={})".format(
self.shape, repr(self.dtype), repr(self.name))
def __hash__(self):
return hash((self._shape_tuple, self.dtype))
def __eq__(self, other):
# pylint: disable=protected-access
return (type(self) is type(other) and
self._shape_tuple == other._shape_tuple
and self._dtype == other._dtype
and self._name == other._name)
def __ne__(self, other):
return not self == other
value_type = property(lambda self: ops.Tensor)
def most_specific_compatible_type(self, other):
if (type(self) is not type(other)) or (self._dtype != other.dtype):
raise ValueError("Types are not compatible: %r vs %r" % (self, other))
shape = self._shape.most_specific_compatible_shape(other.shape)
name = self._name if self._name == other.name else None
return TensorSpec(shape, self._dtype, name)
def _serialize(self):
return (self._shape, self._dtype, self._name)
_component_specs = property(lambda self: self)
def _to_components(self, value):
try:
value = ops.convert_to_tensor(value, self._dtype)
except (TypeError, ValueError):
raise ValueError("Value %r is not convertible to a tensor with dtype %s "
"and shape %s." % (value, self._dtype, self._shape))
if not value.shape.is_compatible_with(self._shape):
raise ValueError("Value %r is not convertible to a tensor with dtype %s "
"and shape %s." % (value, self._dtype, self._shape))
return value
def _from_components(self, components):
return components
def _from_compatible_tensor_list(self, tensor_list):
# TODO(b/112266545): It would be cleaner to create a new `ensure_shape()`
# op here and return that, instead of mutating the input's shape using
# `Tensor.set_shape()`. However, that would add extra ops, which could
# impact performance. When this bug is resolved, we should be able to add
# the `ensure_shape()` ops and optimize them away using contextual shape
# information.
assert len(tensor_list) == 1
tensor_list[0].set_shape(self._shape)
return tensor_list[0]
def _to_batchable_tensor_list(self, value, batched=False):
if batched and self._shape.merge_with(value.shape).ndims == 0:
raise ValueError("Unbatching a tensor is only supported for rank >= 1")
return self._to_components(value)
def _batch(self, batch_size):
return TensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._shape),
self._dtype)
def _unbatch(self):
if self._shape.ndims == 0:
raise ValueError("Unbatching a tensor is only supported for rank >= 1")
return TensorSpec(self._shape[1:], self._dtype)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return ops.Tensor
# TODO(b/133606651): Should is_compatible_with should check min/max bounds?
class BoundedTensorSpec(TensorSpec):
"""A `TensorSpec` that specifies minimum and maximum values.
Example usage:
```python
spec = tensor_spec.BoundedTensorSpec((1, 2, 3), tf.float32, 0, (5, 5, 5))
tf_minimum = tf.convert_to_tensor(spec.minimum, dtype=spec.dtype)
tf_maximum = tf.convert_to_tensor(spec.maximum, dtype=spec.dtype)
```
Bounds are meant to be inclusive. This is especially important for
integer types. The following spec will be satisfied by tensors
with values in the set {0, 1, 2}:
```python
spec = tensor_spec.BoundedTensorSpec((3, 5), tf.int32, 0, 2)
```
"""
__slots__ = ("_minimum", "_maximum")
def __init__(self, shape, dtype, minimum, maximum, name=None):
"""Initializes a new `BoundedTensorSpec`.
Args:
shape: Value convertible to `tf.TensorShape`. The shape of the tensor.
dtype: Value convertible to `tf.DType`. The type of the tensor values.
minimum: Number or sequence specifying the minimum element bounds
(inclusive). Must be broadcastable to `shape`.
maximum: Number or sequence specifying the maximum element bounds
(inclusive). Must be broadcastable to `shape`.
name: Optional string containing a semantic name for the corresponding
array. Defaults to `None`.
Raises:
ValueError: If `minimum` or `maximum` are not provided or not
broadcastable to `shape`.
TypeError: If the shape is not an iterable or if the `dtype` is an invalid
numpy dtype.
"""
super(BoundedTensorSpec, self).__init__(shape, dtype, name)
if minimum is None or maximum is None:
raise ValueError("minimum and maximum must be provided; but saw "
"'%s' and '%s'" % (minimum, maximum))
try:
minimum_shape = np.shape(minimum)
common_shapes.broadcast_shape(
tensor_shape.TensorShape(minimum_shape), self.shape)
except ValueError as exception:
raise ValueError("minimum is not compatible with shape. "
"Message: {!r}.".format(exception))
try:
maximum_shape = np.shape(maximum)
common_shapes.broadcast_shape(
tensor_shape.TensorShape(maximum_shape), self.shape)
except ValueError as exception:
raise ValueError("maximum is not compatible with shape. "
"Message: {!r}.".format(exception))
self._minimum = np.array(minimum, dtype=self.dtype.as_numpy_dtype())
self._minimum.setflags(write=False)
self._maximum = np.array(maximum, dtype=self.dtype.as_numpy_dtype())
self._maximum.setflags(write=False)
@classmethod
def from_spec(cls, spec):
dtype = dtypes.as_dtype(spec.dtype)
minimum = getattr(spec, "minimum", dtype.min)
maximum = getattr(spec, "maximum", dtype.max)
return BoundedTensorSpec(spec.shape, dtype, minimum, maximum, spec.name)
@property
def minimum(self):
"""Returns a NumPy array specifying the minimum bounds (inclusive)."""
return self._minimum
@property
def maximum(self):
"""Returns a NumPy array specifying the maximum bounds (inclusive)."""
return self._maximum
def __repr__(self):
s = "BoundedTensorSpec(shape={}, dtype={}, name={}, minimum={}, maximum={})"
return s.format(self.shape, repr(self.dtype), repr(self.name),
repr(self.minimum), repr(self.maximum))
def __eq__(self, other):
tensor_spec_eq = super(BoundedTensorSpec, self).__eq__(other)
return (tensor_spec_eq and np.allclose(self.minimum, other.minimum) and
np.allclose(self.maximum, other.maximum))
def __reduce__(self):
return BoundedTensorSpec, (self._shape, self._dtype, self._minimum,
self._maximum, self._name)
def _serialize(self):
return (self._shape, self._dtype, self._minimum, self._maximum, self._name)
pywrap_tensorflow.RegisterType("TensorSpec", TensorSpec)
# Note: we do not include Tensor names when constructing TypeSpecs.
type_spec.register_type_spec_from_value_converter(
ops.Tensor,
lambda tensor: TensorSpec(tensor.shape, tensor.dtype))
|
tensorflow-master
|
tensorflow/python/framework/tensor_spec.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
def raise_exception():
raise RuntimeError("did not expect to be called")
class SmartCondTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testTrue(self):
with ops.Graph().as_default():
with session.Session():
x = constant_op.constant(2)
y = constant_op.constant(5)
z = smart_cond.smart_cond(True, lambda: math_ops.multiply(x, 16),
lambda: math_ops.multiply(y, 5))
self.assertEqual(z.eval(), 32)
@test_util.run_deprecated_v1
def testFalse(self):
with ops.Graph().as_default():
with session.Session():
x = constant_op.constant(4)
y = constant_op.constant(3)
z = smart_cond.smart_cond(False, lambda: math_ops.multiply(x, 16),
lambda: math_ops.multiply(y, 3))
self.assertEqual(z.eval(), 9)
def testUnknown(self):
with ops.Graph().as_default():
with session.Session():
x = array_ops.placeholder(dtype=dtypes.int32)
y = smart_cond.smart_cond(x > 0, lambda: constant_op.constant(1),
lambda: constant_op.constant(2))
self.assertEqual(y.eval(feed_dict={x: 1}), 1)
self.assertEqual(y.eval(feed_dict={x: -1}), 2)
def testEval(self):
with ops.Graph().as_default():
with session.Session():
x = constant_op.constant(1)
y = constant_op.constant(2)
# x * y > 0 can be evaluated at graph construction time, so the false
# branch shouldn't be evaluated at all.
z = smart_cond.smart_cond(x * y > 0, lambda: constant_op.constant(1),
raise_exception)
self.assertEqual(z.eval(feed_dict={x: 1}), 1)
def testPlaceholderWithDefault(self):
with ops.Graph().as_default():
with session.Session():
x = array_ops.placeholder_with_default(1, shape=())
y = smart_cond.smart_cond(x > 0, lambda: constant_op.constant(1),
lambda: constant_op.constant(2))
self.assertEqual(y.eval(), 1)
self.assertEqual(y.eval(feed_dict={x: -1}), 2)
def testMissingArg1(self):
with ops.Graph().as_default():
with session.Session():
x = constant_op.constant(1)
with self.assertRaises(TypeError):
smart_cond.smart_cond(True, false_fn=lambda: x)
def testMissingArg2(self):
with ops.Graph().as_default():
with session.Session():
x = constant_op.constant(1)
with self.assertRaises(TypeError):
smart_cond.smart_cond(True, lambda: x)
class SmartCaseTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testTrue(self):
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
conditions = [(True, lambda: constant_op.constant(1)),
(x == 0, raise_exception)]
y = smart_cond.smart_case(conditions, default=raise_exception,
exclusive=False)
z = smart_cond.smart_case(conditions, default=raise_exception,
exclusive=True)
with session.Session() as sess:
# No feed_dict necessary
self.assertEqual(self.evaluate(y), 1)
self.assertEqual(self.evaluate(z), 1)
@test_util.run_deprecated_v1
def testFalse(self):
conditions = [(False, raise_exception)]
y = smart_cond.smart_case(conditions,
default=lambda: constant_op.constant(1),
exclusive=False)
z = smart_cond.smart_case(conditions,
default=lambda: constant_op.constant(1),
exclusive=True)
with session.Session() as sess:
self.assertEqual(self.evaluate(y), 1)
self.assertEqual(self.evaluate(z), 1)
@test_util.run_deprecated_v1
def testMix(self):
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
y = constant_op.constant(10)
conditions = [(x > 1, lambda: constant_op.constant(1)),
(y < 1, raise_exception),
(False, raise_exception),
(True, lambda: constant_op.constant(3))]
z = smart_cond.smart_case(conditions, default=raise_exception)
with session.Session() as sess:
self.assertEqual(sess.run(z, feed_dict={x: 2}), 1)
self.assertEqual(sess.run(z, feed_dict={x: 0}), 3)
class SmartConstantValueTest(test_util.TensorFlowTestCase):
# TODO(skyewm): this is essentially a regression test for
# TF_TryEvaluateConstant, and is not really a valid smart_constant_value test
# (smart_constant_value is only supposed to return bools). Move the
# TF_TryEvaluateConstant call to tensor_util.constant_value and make this a
# constant_value test instead.
def testCond(self):
with ops.Graph().as_default():
pred = array_ops.placeholder_with_default(True, shape=())
x = control_flow_ops.cond(pred,
lambda: constant_op.constant(1),
lambda: constant_op.constant(2))
self.assertIsNone(smart_cond.smart_constant_value(x))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/smart_cond_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to convert variables to constants in TensorFlow 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import tensor_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.saver import export_meta_graph
def _run_inline_graph_optimization(func):
"""Apply function inline optimization to the graph.
Returns the GraphDef after Grappler's function inlining optimization is
applied. This optimization does not work on models with control flow.
Args:
func: ConcreteFunction.
Returns:
GraphDef
"""
meta_graph = export_meta_graph(
graph_def=func.graph.as_graph_def(), graph=func.graph)
# Clear the initializer_name for the variables collections, since they are not
# needed after saved to saved_model.
for name in [
"variables", "model_variables", "trainable_variables", "local_variables"
]:
raw_list = []
for raw in meta_graph.collection_def["variables"].bytes_list.value:
variable = variable_pb2.VariableDef()
variable.ParseFromString(raw)
variable.ClearField("initializer_name")
raw_list.append(variable.SerializeToString())
meta_graph.collection_def[name].bytes_list.value[:] = raw_list
# Add a collection 'train_op' so that Grappler knows the outputs.
fetch_collection = meta_graph_pb2.CollectionDef()
for array in func.inputs + func.outputs:
fetch_collection.node_list.value.append(array.name)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
# Initialize RewriterConfig with everything disabled except function inlining.
config = config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
rewrite_options.min_graph_nodes = -1 # do not skip small graphs
rewrite_options.optimizers.append("function")
return tf_optimizer.OptimizeGraph(config, meta_graph)
def _get_tensors_from_graph(graph, tensors):
"""Gets the Tensors in `graph` with the name of the tensors in `tensors`.
Args:
graph: TensorFlow Graph.
tensors: List of Tensors.
Returns:
List of Tensors.
"""
new_tensors = []
for orig_tensor in tensors:
new_tensor = graph.get_tensor_by_name(orig_tensor.name)
if new_tensor.shape.rank is None:
new_tensor.set_shape(orig_tensor.shape)
new_tensors.append(new_tensor)
return new_tensors
def _get_tensor_name(name):
"""Returns the name of the input tensor.
Args:
name: str
Returns:
str
"""
return name.split(":")[0]
def _get_tensor_data(func):
"""Gets the tensor data for all Placeholders in the model.
Returns a dictionary that maps the tensor name to a dictionary containing:
data: numpy data
index: int index in func.graph.captures
is_variable: bool indicating whether the tensor is a variable or not
Args:
func: ConcreteFunction.
Returns:
Dict
"""
tensor_data = {}
map_index_to_variable = {
func.captured_inputs.index(var.handle): var
for var in func.graph.variables
}
# Iterates through all captures which are represented as Placeholders.
for idx, (val_tensor, name_tensor) in enumerate(func.graph.captures.items()):
tensor_name = _get_tensor_name(name_tensor.name)
is_variable = idx in map_index_to_variable
if is_variable:
data = map_index_to_variable[idx].numpy()
else:
data = val_tensor.numpy()
tensor_data[tensor_name] = {
"data": data,
"index": idx,
"is_variable": is_variable,
}
return tensor_data
def _populate_const_op(output_node, node_name, dtype, data, data_shape):
"""Creates a Const op.
Args:
output_node: TensorFlow NodeDef.
node_name: str node name.
dtype: AttrValue with a populated .type field.
data: numpy data value.
data_shape: Tuple of integers containing data shape.
"""
output_node.op = "Const"
output_node.name = node_name
output_node.attr["dtype"].CopyFrom(dtype)
tensor = tensor_util.make_tensor_proto(
data, dtype=dtype.type, shape=data_shape)
output_node.attr["value"].tensor.CopyFrom(tensor)
def _construct_concrete_function(func, output_graph_def,
converted_input_indices):
"""Constructs a concrete function from the `output_graph_def`.
Args:
func: ConcreteFunction
output_graph_def: GraphDef proto.
converted_input_indices: Set of integers of input indices that were
converted to constants.
Returns:
ConcreteFunction.
"""
# Create a ConcreteFunction from the new GraphDef.
input_tensors = func.graph.captures.values()
converted_inputs = set(
[input_tensors[index] for index in converted_input_indices])
not_converted_inputs = set(func.inputs).difference(converted_inputs)
not_converted_inputs_map = {
tensor.name: tensor for tensor in not_converted_inputs
}
new_input_names = [tensor.name for tensor in not_converted_inputs]
new_output_names = [tensor.name for tensor in func.outputs]
new_func = wrap_function.function_from_graph_def(output_graph_def,
new_input_names,
new_output_names)
# Manually propagate shape for input tensors where the shape is not correctly
# propagated. Scalars shapes are lost when wrapping the function.
for input_tensor in new_func.inputs:
input_tensor.set_shape(not_converted_inputs_map[input_tensor.name].shape)
return new_func
def convert_variables_to_constants_v2(func):
"""Replaces all the variables in a graph with constants of the same values.
TensorFlow 2.0 function for converting all Variable ops into Const ops holding
the same values. This makes it possible to describe the network fully with a
single GraphDef file, and allows the removal of a lot of ops related to
loading and saving the variables. This function runs Grappler's function
inlining optimization in order to return a single subgraph.
The current implementation only works for graphs that do not contain any
control flow or embedding related ops.
Args:
func: ConcreteFunction.
Returns:
ConcreteFunction containing a simplified version of the original.
"""
# TODO(nupurgarg): Replace ResourceGather with Gather.
# TODO(nupurgarg): Change attr for Variables in control flow and functions.
graph_def = _run_inline_graph_optimization(func)
# Get mapping from node name to node.
name_to_node = {_get_tensor_name(node.name): node for node in graph_def.node}
# Get mapping from node name to variable value.
tensor_data = _get_tensor_data(func)
# Get variable data.
reference_variables = {}
resource_identities = {}
placeholders = {}
converted_input_indices = set()
def _save_placeholder(node_name, dtype):
placeholders[node_name] = {
"dtype": dtype,
"data": tensor_data[node_name]["data"],
}
converted_input_indices.add(tensor_data[node_name]["index"])
for node in graph_def.node:
if node.op == "VariableV2":
# Get data for VariableV2 ops (reference variables) that cannot be lifted.
with func.graph.as_default():
identity_node = array_ops.identity(
func.graph.as_graph_element(node.name + ":0"))
reference_variables[node.name] = (
func.prune([], [identity_node.name])()[0])
elif node.name in tensor_data and not tensor_data[node.name]["is_variable"]:
# Get dtype and data for non-variable Placeholders (ex. values for 1.X
# Const ops that are loaded as Placeholders in 2.0)
_save_placeholder(node.name, node.attr["dtype"])
elif node.op == "ReadVariableOp":
# Get dtype and data for Placeholder ops associated with ReadVariableOp.
# There can be an Identity in between the ReadVariableOp and Placeholder.
# Store the dtype for the Identity ops.
input_name = _get_tensor_name(node.input[0])
while name_to_node[input_name].op == "Identity":
resource_identities[input_name] = node.attr["dtype"]
input_name = _get_tensor_name(name_to_node[input_name].input[0])
if name_to_node[input_name].op != "Placeholder":
raise ValueError("Cannot find the Placeholder op that is an input "
"to the ReadVariableOp.")
_save_placeholder(input_name, node.attr["dtype"])
# Reconstruct the graph with constants in place of variables.
output_graph_def = graph_pb2.GraphDef()
how_many_converted = 0
for input_node in graph_def.node:
output_node = output_graph_def.node.add()
# Convert VariableV2 ops to Const ops.
if input_node.name in reference_variables:
data = reference_variables[input_node.name]
dtype = attr_value_pb2.AttrValue(type=data.dtype.as_datatype_enum)
_populate_const_op(output_node, input_node.name, dtype, data.numpy(),
data.shape)
how_many_converted += 1
# Convert Placeholder ops to Const ops.
elif input_node.name in placeholders:
data = placeholders[input_node.name]["data"]
dtype = placeholders[input_node.name]["dtype"]
_populate_const_op(output_node, input_node.name, dtype, data, data.shape)
how_many_converted += 1
# Change the dtype for Identity ops that are inputs to ReadVariableOps.
elif input_node.name in resource_identities:
output_node.CopyFrom(input_node)
output_node.attr["T"].CopyFrom(resource_identities[input_node.name])
# Convert ReadVariableOps to Identity ops.
elif input_node.op == "ReadVariableOp":
output_node.op = "Identity"
output_node.name = input_node.name
output_node.input.extend([input_node.input[0]])
output_node.attr["T"].CopyFrom(input_node.attr["dtype"])
if "_class" in input_node.attr:
output_node.attr["_class"].CopyFrom(input_node.attr["_class"])
else:
output_node.CopyFrom(input_node)
logging.info("Converted %d variables to const ops.", how_many_converted)
return _construct_concrete_function(func, output_graph_def,
converted_input_indices)
|
tensorflow-master
|
tensorflow/python/framework/convert_to_constants.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Including this as a dependency will result in Tensorflow tests using XLA.
This function is defined by default in test_util.py to False. The test_util then
attempts to import this module. If this file is made available through the BUILD
rule, then this function is overridden and will instead cause Tensorflow graphs
to be compiled with XLA.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def is_xla_enabled():
"""Returns true to state XLA should be enabled for Tensorflow tests."""
return True
|
tensorflow-master
|
tensorflow/python/framework/is_xla_test_true.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registry for tensor conversion functions."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.util.tf_export import tf_export
_tensor_conversion_func_registry = {}
_tensor_conversion_func_cache = {}
_tensor_conversion_func_lock = threading.Lock()
# TODO(josh11b): Add ctx argument to conversion_func() signature.
@tf_export("register_tensor_conversion_function")
def register_tensor_conversion_function(base_type,
conversion_func,
priority=100):
"""Registers a function for converting objects of `base_type` to `Tensor`.
The conversion function must have the following signature:
```python
def conversion_func(value, dtype=None, name=None, as_ref=False):
# ...
```
It must return a `Tensor` with the given `dtype` if specified. If the
conversion function creates a new `Tensor`, it should use the given
`name` if specified. All exceptions will be propagated to the caller.
The conversion function may return `NotImplemented` for some
inputs. In this case, the conversion process will continue to try
subsequent conversion functions.
If `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
NOTE: The conversion functions will execute in order of priority,
followed by order of registration. To ensure that a conversion function
`F` runs before another conversion function `G`, ensure that `F` is
registered with a smaller priority than `G`.
Args:
base_type: The base type or tuple of base types for all objects that
`conversion_func` accepts.
conversion_func: A function that converts instances of `base_type` to
`Tensor`.
priority: Optional integer that indicates the priority for applying this
conversion function. Conversion functions with smaller priority values run
earlier than conversion functions with larger priority values. Defaults to
100.
Raises:
TypeError: If the arguments do not have the appropriate type.
"""
global _tensor_conversion_func_cache
with _tensor_conversion_func_lock:
if not (isinstance(base_type, type) or
(isinstance(base_type, tuple) and
all(isinstance(x, type) for x in base_type))):
raise TypeError("base_type must be a type or a tuple of types.")
if not callable(conversion_func):
raise TypeError("conversion_func must be callable.")
# context._context is checked so that we don't inadvertently create it.
# This is because enable_eager_execution will fail when called from the main
# function if the context._context is already created, and the
# register_tensor_conversion_function calls happen when the module is
# imported.
if context._context is not None and context.executing_eagerly( # pylint: disable=protected-access
) and isinstance(base_type, six.integer_types + (
float,
np.ndarray,
)):
# TODO(nareshmodi): consider setting a context variable which disables the
# fastpath instead.
raise TypeError(
"Cannot register conversions for numpy arrays, python number types "
"when executing eagerly.")
try:
funcs_at_priority = _tensor_conversion_func_registry[priority]
except KeyError:
funcs_at_priority = []
_tensor_conversion_func_registry[priority] = funcs_at_priority
funcs_at_priority.append((base_type, conversion_func))
_tensor_conversion_func_cache = {}
|
tensorflow-master
|
tensorflow/python/framework/tensor_conversion_registry.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import re
import sys
import threading
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lock_util
from tensorflow.python.util import memory
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_stack
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.tf_export import tf_export
# Temporary global switches determining if we should enable the work-in-progress
# calls to the C API. These will be removed once all functionality is supported.
_USE_C_API = True
_USE_C_SHAPES = True
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/ops_eager_execution",
"Whether ops.enable_eager_execution() is called.")
# pylint: disable=protected-access
_TensorLike = tensor_like._TensorLike
_tensor_conversion_func_registry = \
tensor_conversion_registry._tensor_conversion_func_registry
_tensor_conversion_func_cache = \
tensor_conversion_registry._tensor_conversion_func_cache
_tensor_conversion_func_lock = \
tensor_conversion_registry._tensor_conversion_func_lock
# pylint: enable=protected-access
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _UserDeviceSpec(object):
"""Store user-specified device and provide computation of merged device."""
def __init__(self, device_name_or_function):
self._device_name_or_function = device_name_or_function
self.display_name = str(self._device_name_or_function)
self.function = device_name_or_function
self.raw_string = None
if isinstance(device_name_or_function, pydev.MergeDevice):
self.is_null_merge = device_name_or_function.is_null_merge
elif callable(device_name_or_function):
self.is_null_merge = False
dev_func = self._device_name_or_function
func_name = function_utils.get_func_name(dev_func)
func_code = function_utils.get_func_code(dev_func)
if func_code:
fname = func_code.co_filename
lineno = func_code.co_firstlineno
else:
fname = "unknown"
lineno = -1
self.display_name = "%s<%s, %d>" % (func_name, fname, lineno)
elif device_name_or_function is None:
# NOTE(taylorrobie): This MUST be False. None signals a break in the
# device stack, so `is_null_merge` must be False for such a case to
# allow callers to safely skip over null merges without missing a None.
self.is_null_merge = False
else:
self.raw_string = device_name_or_function
self.function = pydev.merge_device(device_name_or_function)
self.is_null_merge = self.function.is_null_merge
# We perform this check in __init__ because it is of non-trivial cost,
# and self.string_merge is typically called many times.
self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)
def string_merge(self, node_def):
if self.fast_string_merge:
return self.function.shortcut_string_merge(node_def)
return compat.as_str(_device_string(self.function(node_def)))
class NullContextmanager(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
"""A unique (within this program execution) integer."""
return c_api.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
text = repr(tensor.numpy()) if is_repr else str(tensor.numpy())
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
@tf_export("Tensor")
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow `tf.compat.v1.Session`.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
`tf.Session.run`.
`t.eval()` is a shortcut for calling
`tf.compat.v1.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.compat.v1.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
# This will be set by self._as_tf_output().
self._tf_output = None
# This will be set by self.shape().
self._shape_val = None
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
self._id = uid()
self._name = None
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if self._name is None:
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
self._name = "%s:%d" % (self._op.name, self._value_index)
return self._name
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
`tf.TensorShape`
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to launch the graph in a session. This
can be used for debugging, and providing early error messages. For
example:
```python
c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
print(c.shape)
==> TensorShape([Dimension(2), Dimension(3)])
d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
print(d.shape)
==> TensorShape([Dimension(4), Dimension(2)])
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
e = tf.matmul(c, d)
f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
print(f.shape)
==> TensorShape([Dimension(3), Dimension(4)])
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `TensorShape` representing the shape of this tensor.
"""
if self._shape_val is None:
self._shape_val = self._c_api_shape()
return self._shape_val
def _get_input_ops_without_shapes(self, target_op):
"""Returns ops needing shape inference to compute target_op's shape."""
result = []
stack = [self._op]
visited = set()
while stack:
op = stack.pop()
if op in visited:
continue
result.append(op)
stack.extend(t.op for t in op.inputs if t._shape_val is None)
visited.add(op)
return result
def _c_api_shape(self):
"""Returns the TensorShape of this tensor according to the C API."""
c_graph = self._op._graph._c_graph # pylint: disable=protected-access
shape_vector, unknown_shape = c_api.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vector = [None if d == -1 else d for d in shape_vector]
return tensor_shape.TensorShape(shape_vector)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def __iter__(self):
if not context.executing_eagerly():
raise TypeError(
"Tensor objects are only iterable when eager execution is "
"enabled. To iterate over this tensor use tf.map_fn.")
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
for i in xrange(shape[0]):
yield self[i]
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self.shape.ndims
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.compat.v1.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
NOTE: This shape is not enforced at runtime. Setting incorrect shapes can
result in inconsistencies between the statically-known graph and the runtime
value of tensors. For runtime validation of the shape, use `tf.ensure_shape`
instead.
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
# Reset cached shape.
self._shape_val = None
# We want set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
c_api.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph, # pylint: disable=protected-access
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
consumer_names = c_api.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
# pylint: enable=protected-access
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
# NOTE: Beyond preventing unnecessary (re-)allocation, the cached object
# also guarantees that a dictionary of tf_output objects will retain a
# deterministic (yet unsorted) order which prevents memory blowup in the
# cache of executor(s) stored for every session.
if self._tf_output is None:
self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)
return self._tf_output
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" %
self.get_shape()) if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
# Necessary to support Python's collection membership operators
return id(self)
def __eq__(self, other):
# Necessary to support Python's collection membership operators
# NOTE(taylorrobie): equivalent to: id(self) == id(other)
return self is other
def __copy__(self):
# TODO(b/77597810): get rid of Tensor copies.
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (e.g. in an `if` statement). For
example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
This disallows ambiguities between testing the Python value vs testing the
dynamic condition of the `Tensor`.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
# TODO(agarwal): consider getting rid of this.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Returns a numpy array or a scalar with the same contents as the Tensor.
TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying
buffer but instead always explicitly copy? Note that currently it may or may
not copy based on whether the numpy data is properly aligned or not.
Returns:
A numpy array or a scalar. Numpy array may share memory with the
Tensor object. Any changes to one may be reflected in the other. A scalar
value is returned when self has rank 0.
Raises:
ValueError: if the type of this Tensor is not representable in numpy.
"""
if self.dtype == dtypes.resource:
raise ValueError("Resource handles are not convertible to numpy.")
maybe_arr = self._cpu_nograd()._numpy() # pylint: disable=protected-access
return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
# __int__, __float__ and __index__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
# TODO(slebedev): avoid redundant copy in all of the following methods.
def __int__(self):
return int(self.numpy())
def __long__(self):
return long(self.numpy())
def __float__(self):
return float(self.numpy())
def __index__(self):
maybe_arr = self.numpy()
if isinstance(maybe_arr, np.ndarray):
return maybe_arr.__index__()
return int(maybe_arr) # Must be a NumPy scalar.
def __array__(self, dtype=None):
# This is only called if the buffer interface conversion failed.
# Remove once numpy/numpy#13507 is merged and released or py_function
# creates EagerTensors with a non-nullptr context.
return np.asarray(self.numpy(), dtype=dtype)
def __format__(self, format_spec):
return self.numpy().__format__(format_spec)
def __reduce__(self):
return (convert_to_tensor, (self.numpy(),))
def _numpy(self):
raise NotImplementedError()
@property
def backing_device(self):
"""Returns the name of the device holding this tensor's memory.
`.backing_device` is usually the same as `.device`, which returns
the device on which the kernel of the operation that produced this tensor
ran. However, some operations can produce tensors on a different device
(e.g., an operation that executes on the GPU but produces output tensors
in host memory).
"""
raise NotImplementedError()
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _num_elements(self):
"""Number of elements of this Tensor.
Unlike regular Tensors, the number of elements is always known for
EagerTensors.
This is more performant than tensor.shape.num_elements
Returns:
Long - num elements in the tensor
"""
raise NotImplementedError()
def _copy_to_device(self, context, device): # pylint: disable=redefined-outer-name
raise NotImplementedError()
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % (
self._id, self.shape, self.dtype.name, numpy_text(self, is_repr=True))
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy_nograd(self, ctx=None, device_name=None):
"""Copies tensor to dest device, but doesn't record the operation."""
# pylint: disable=protected-access
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
new_tensor = self._copy_to_device(context=ctx._handle, device=device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return new_tensor
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
new_tensor = self._copy_nograd(ctx, device_name)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [
dresult._copy(device_name=self_device)
if hasattr(dresult, "_copy") else dresult
]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
@property
def shape(self):
if self._tensor_shape is None: # pylint: disable=access-member-before-definition
# `_tensor_shape` is declared and defined in the definition of
# `EagerTensor`, in C.
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
return self._tensor_shape
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
@property
def ndim(self):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
def __len__(self):
"""Returns the length of the first dimension in the Tensor."""
if not self.shape.ndims:
raise TypeError("Scalar tensor has no `len()`")
return self._shape_tuple()[0]
def _cpu_nograd(self):
"""A copy of this Tensor with contents backed by host memory.
The copy cannot be differentiated through.
Returns:
A CPU-memory backed Tensor object with the same contents as this Tensor.
"""
return self._copy_nograd(context.context(), "CPU:0")
@deprecation.deprecated(None, "Use tf.identity instead.")
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
@deprecation.deprecated(None, "Use tf.identity instead.")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def __bool__(self):
return bool(self.numpy())
def __nonzero__(self):
return self.__bool__()
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?")
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase)
def _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False):
_ = name, as_ref
if dtype and not dtype.is_compatible_with(t.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, t.dtype.name, str(t)))
return t
tensor_conversion_registry.register_tensor_conversion_function(
Tensor, _TensorTensorConversionFunction, 0)
register_dense_tensor_like_type(Tensor)
@tf_export(v1=["convert_to_tensor"])
def convert_to_tensor(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
dtype_hint: same meaning as preferred_dtype, and overrides it.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
preferred_dtype = deprecation.deprecated_argument_lookup(
"dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
@tf_export("convert_to_tensor", v1=[])
def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
dtype_hint: Optional element type for the returned tensor, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
return internal_convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=dtype_hint,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def internal_convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None,
accept_symbolic_tensors=True,
accept_composite_tensors=False):
"""Implementation of the public convert_to_tensor."""
if ctx is None:
ctx = context.context()
if isinstance(value, EagerTensor):
if ctx.executing_eagerly():
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
value = _TensorTensorConversionFunction(value, dtype=dtype)
return value
else:
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
elif ((not accept_symbolic_tensors) and isinstance(value, Tensor) and
ctx.executing_eagerly()):
# Found a symbolic tensor in an eager context.
# This happens when we use the Keras functional API (i.e. calling layers
# on the output of `keras.Input()`, which is symbolic) while eager
# execution is enabled.
if _is_keras_symbolic_tensor(value):
# If the graph of the tensor isn't the Keras graph, we should still
# fail, for the time being. TODO(fchollet): consider allowing
# all symbolic tensors to raise this exception in this case.
raise core._SymbolicException( # pylint: disable=protected-access
"Using the symbolic output of a Keras layer during eager execution.")
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
unwrapped_type = type(value)
conversion_func_list = _tensor_conversion_func_cache.get(unwrapped_type, None)
if conversion_func_list is None:
with _tensor_conversion_func_lock:
conversion_func_list = []
for _, funcs_at_priority in sorted(
_tensor_conversion_func_registry.items()):
for base_type, conversion_func in funcs_at_priority:
if isinstance(value, base_type):
conversion_func_list.append((base_type, conversion_func))
_tensor_conversion_func_cache[unwrapped_type] = conversion_func_list
for base_type, conversion_func in conversion_func_list:
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError, errors.UnimplementedError,
errors.InvalidArgumentError):
# Could not coerce the conversion to use the preferred dtype.
ret = None
if ret is not None and ret is not NotImplemented:
if (ret.dtype.base_dtype !=
dtypes.as_dtype(preferred_dtype).base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype,
dtypes.as_dtype(preferred_dtype).base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
is_acceptable_type = (
isinstance(ret, Tensor) or
(accept_composite_tensors and
isinstance(ret, composite_tensor.CompositeTensor)))
if not is_acceptable_type:
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, unwrapped_type))
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a sequence.")
ret = []
if ctx is None:
ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_composite(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor` or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_composite(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_composite(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor`, or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, composite_tensor.CompositeTensor):
value_dtype = getattr(value, "dtype", None)
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(
value,
dtype=dtype,
name=name,
as_ref=as_ref,
accept_composite_tensors=True)
def internal_convert_n_to_tensor_or_composite(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor`, or objects that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `CompositeTensor`, and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_composite(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_composite(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor``, or objects that can be
consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor` and/or `CompositeTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_composite(
values=values, dtype=dtype, name=name, as_ref=False)
def _device_string(dev_spec):
if pydev.is_device_spec(dev_spec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, device=None, attrs=None): # pylint: disable=redefined-outer-name
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
device: string, device, or function from NodeDef to string. Value for the
"device" attribute of the NodeDef proto.
attrs: Optional dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef()
node_def.op = compat.as_bytes(op_type)
node_def.name = compat.as_bytes(name)
if attrs is not None:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
if device is not None:
if callable(device):
node_def.device = device(node_def)
else:
node_def.device = _device_string(device)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$")
def _create_c_op(graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
c_api.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized)
try:
c_op = c_api.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a TensorFlow `Graph` that takes zero or
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
`tf.matmul`)
or `tf.Graph.create_op`.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
as output.
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
`tf.Session.run`.
`op.run()` is a shortcut for calling
`tf.compat.v1.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for
attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and
`device`. The `input` attribute is irrelevant here as it will be
computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the `Tensors`
computed by this operation. The length of this list indicates the
number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a control
dependency.
input_types: List of `DType` objects representing the types of the tensors
accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x
in inputs]`. Operations that expect reference-typed inputs must specify
these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type
that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "SwigPyObject":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs], input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
# pylint: disable=protected-access
self._id_value = self._graph._next_id()
self._original_op = original_op
self._traceback = tf_stack.extract_stack()
# List of _UserDevSpecs holding code location of device context manager
# invocations and the users original argument to them.
self._device_code_locations = None
# Dict mapping op name to file and line information for op colocation
# context managers.
self._colocation_code_locations = None
self._control_flow_context = self.graph._get_control_flow_context()
# pylint: enable=protected-access
# Initialize self._c_op.
if c_op:
self._c_op = c_op
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
# TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.
# Refactor so we don't have to do this here.
grouped_inputs = self._reconstruct_sequence_inputs(
op_def, inputs, node_def.attr)
self._c_op = _create_c_op(self._graph, node_def, grouped_inputs,
control_input_ops)
# Initialize self._outputs.
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(c_api_util.tf_output(self._c_op, i))
for i in range(num_outputs)
]
self._outputs = [
Tensor(self, i, output_type)
for i, output_type in enumerate(output_types)
]
self._graph._add_op(self) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing()
def _control_flow_post_processing(self):
"""Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
"""
for input_tensor in self.inputs:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def _reconstruct_sequence_inputs(self, op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
return c_api.TF_OperationName(self._c_op)
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return c_api.TF_OperationDevice(self._c_op)
@property
def _device_assignments(self):
"""Code locations for device context managers active at op creation.
This property will return a list of traceable_stack.TraceableObject
instances where .obj is a string representing the assigned device
(or information about the function that would be applied to this op
to compute the desired device) and the filename and lineno members
record the location of the relevant device context manager.
For example, suppose file_a contained these lines:
file_a.py:
15: with tf.device('/gpu:0'):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the device context manager
would have these member values:
t_obj.obj -> '/gpu:0'
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._device_assignments would return the list [t_obj].
Returns:
[str: traceable_stack.TraceableObject, ...] as per this method's
description, above.
"""
return self._device_code_locations or []
@property
def _colocation_dict(self):
"""Code locations for colocation context managers active at op creation.
This property will return a dictionary for which the keys are nodes with
which this Operation is colocated, and for which the values are
traceable_stack.TraceableObject instances. The TraceableObject instances
record the location of the relevant colocation context manager but have the
"obj" field set to None to prevent leaking private data.
For example, suppose file_a contained these lines:
file_a.py:
14: node_a = tf.constant(3, name='NODE_A')
15: with tf.compat.v1.colocate_with(node_a):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the colocation context manager
would have these member values:
t_obj.obj -> None
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._colocation_dict would return the dictionary
{ 'NODE_A': t_obj }
Returns:
{str: traceable_stack.TraceableObject} as per this method's description,
above.
"""
locations_dict = self._colocation_code_locations or {}
return locations_dict.copy()
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in c_api.h
The length of this list indicates the number of output endpoints
of the operation.
"""
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(self._tf_output(i))
for i in xrange(num_outputs)
]
# In all the tests we have output_types that are passed into
# Operation.__init__ are a list of ints (which is illegal according
# to the docstring), but input_types are instances of DType.
# This extra assert is to catch if we ever use DType for output_types.
if output_types:
assert isinstance(output_types[0], int)
return output_types
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = c_api.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = c_api.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
self._set_device_from_string(compat.as_str(_device_string(device)))
def _set_device_from_string(self, device_str):
"""Fast path to set device if the type is known to be a string.
This function is called frequently enough during graph construction that
there are non-trivial performance gains if the caller can guarantee that
the specified device is already a string.
Args:
device_str: A string specifying where to place this op.
"""
c_api.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
device_str)
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
c_api.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_while_inputs(self, tensors):
"""See AddWhileInputHack in python_api.h.
NOTE: This is for TF internal use only. Please don't use it.
Args:
tensors: list of Tensors
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
for tensor in tensors:
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
c_api.AddWhileInputHack(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._c_op)
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
"""Removes any control inputs to this operation."""
c_api.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def _add_outputs(self, types, shapes):
"""Adds new Tensors to self.outputs.
Note: this is generally unsafe to use. This is used in certain situations in
conjunction with _set_type_list_attr.
Arguments:
types: list of DTypes
shapes: list of TensorShapes
"""
assert len(types) == len(shapes)
orig_num_outputs = len(self.outputs)
for i in range(len(types)):
t = Tensor(self, orig_num_outputs + i, types[i])
self._outputs.append(t)
t.set_shape(shapes[i])
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
# pylint: disable=protected-access
class _InputList(object):
"""Immutable input list wrapper."""
def __init__(self, inputs):
self._inputs = inputs
def __iter__(self):
return iter(self._inputs)
def __len__(self):
return len(self._inputs)
def __bool__(self):
return bool(self._inputs)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._inputs[i]
# pylint: enable=protected-access
@property
def inputs(self):
"""The list of `Tensor` objects representing the data inputs of this op."""
if self._inputs_val is None:
tf_outputs = c_api.GetOperationInputs(self._c_op)
# pylint: disable=protected-access
retval = [
self.graph._get_tensor_by_tf_output(tf_output)
for tf_output in tf_outputs
]
# pylint: enable=protected-access
self._inputs_val = Operation._InputList(retval)
return self._inputs_val
@property
def _inputs(self):
logging.warning("Operation._inputs is private, use Operation.inputs "
"instead. Operation._inputs will eventually be removed.")
return self.inputs
@_inputs.setter
def _inputs(self, value):
raise ValueError("Cannot assign _inputs")
@property
def _input_types(self):
num_inputs = c_api.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@_input_types.setter
def _input_types(self, value):
raise ValueError("Cannot assign _input_types")
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op))
for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
"""The `Operation` objects which have a control dependency on this op.
Before any of the ops in self._control_outputs can execute tensorflow will
ensure self has finished executing.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlOutputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op))
for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_inputs(self):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
return self.control_inputs
@_control_inputs.setter
def _control_inputs(self, value):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
# Copy value because it may be self._control_inputs_val (in particular if
# this is called from self._control_inputs += ...), and we don't want to
# clear value below.
value = copy.copy(value)
self._remove_all_control_inputs()
self._add_control_inputs(value)
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return c_api.TF_OperationOpType(self._c_op)
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationToNodeDef(self._c_op, buf)
data = c_api.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def _node_def(self):
logging.warning("Operation._node_def is private, use Operation.node_def "
"instead. Operation._node_def will eventually be removed.")
return self.node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def _op_def(self):
logging.warning("Operation._op_def is private, use Operation.op_def "
"instead. Operation._op_def will eventually be removed.")
return self.op_def
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return tf_stack.convert_stack(self._traceback)
@property
def traceback_with_start_lines(self):
"""Same as traceback but includes start line of function definition.
Returns:
A list of 5-tuples (filename, lineno, name, code, func_start_lineno).
"""
return tf_stack.convert_stack(
self._traceback, include_func_start_lineno=True)
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
buf = c_api.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
# pylint: disable=protected-access
c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, buf)
# pylint: enable=protected-access
finally:
c_api.TF_DeleteBuffer(buf)
def _set_func_attr(self, attr_name, func_name):
"""Private method used to set a function attribute in the node_def."""
func = attr_value_pb2.NameAttrList(name=func_name)
self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))
def _set_func_list_attr(self, attr_name, func_names):
"""Private method used to set a list(function) attribute in the node_def."""
funcs = [attr_value_pb2.NameAttrList(name=func_name)
for func_name in func_names]
funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))
def _set_type_list_attr(self, attr_name, types):
"""Private method used to set a list(type) attribute in the node_def."""
if not types:
return
if isinstance(types[0], dtypes.DType):
types = [dt.as_datatype_enum for dt in types]
types_list = attr_value_pb2.AttrValue.ListValue(type=types)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))
def _set_shape_list_attr(self, attr_name, shapes):
"""Private method used to set a list(shape) attribute in the node_def."""
shapes = [s.as_proto() for s in shapes]
shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))
def _clear_attr(self, attr_name):
"""Private method used to clear an attribute in the node_def."""
# pylint: disable=protected-access
c_api.ClearAttr(self._graph._c_graph, self._c_op, attr_name)
# pylint: enable=protected-access
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func")
try:
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = c_api.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
oneof_value = x.WhichOneof("value")
if oneof_value is None:
return []
if oneof_value == "list":
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(t) for t in x.list.type]
else:
return list(getattr(x.list, f))
return []
if oneof_value == "type":
return dtypes.as_dtype(x.type)
assert oneof_value in fields, "Unsupported field type in " + str(x)
return getattr(x, oneof_value)
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
@deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient")
@tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"])
def no_gradient(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.no_gradient("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Aliases for the old names, will be eventually removed.
NoGradient = no_gradient
NotDifferentiable = no_gradient
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
_shape_registry = registry.Registry("shape functions")
_default_shape_function_registry = registry.Registry("default shape functions")
# These are set to common_shapes.call_cpp_shape_fn by op generated code
# (generated by python_op_gen.cc).
# It is set outside ops.py to avoid a circular dependency.
_call_cpp_shape_fn = None
_call_cpp_shape_fn_and_require_op = None
def _set_call_cpp_shape_fn(call_cpp_shape_fn):
"""Sets default shape fns from passed common_shapes.call_cpp_shape_fn."""
global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op
if _call_cpp_shape_fn:
return # already registered
def call_without_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=False)
_call_cpp_shape_fn = call_without_requiring
def call_with_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=True)
_call_cpp_shape_fn_and_require_op = call_with_requiring
class RegisterShape(object):
"""No longer used.
Was: A decorator for registering a shape function.
Shape functions must now be registered via the SetShapeFn on the
original Op specification in C++.
"""
def __init__(self, op_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers "f" as the shape function for "op_type"."""
if f is None:
assert _call_cpp_shape_fn
# None is a special "weak" value that provides a default shape function,
# and can be overridden by a non-None registration.
try:
_default_shape_function_registry.register(_call_cpp_shape_fn,
self._op_type)
except KeyError:
# Ignore duplicate registrations of the weak value. This can
# occur if the op library input to wrapper generation
# inadvertently links in one or more of the standard op
# libraries.
pass
else:
_shape_registry.register(f, self._op_type)
return f
def set_shape_and_handle_data_for_outputs(_):
"""No op. TODO(b/74620627): Remove this."""
pass
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
_MUTATION_LOCK_GROUP = 0
_SESSION_RUN_LOCK_GROUP = 1
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
`tf.Operation` objects,
which represent units of computation; and
`tf.Tensor` objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
`tf.compat.v1.get_default_graph`.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
```python
c = tf.constant(4.0)
assert c.graph is tf.compat.v1.get_default_graph()
```
Another typical usage involves the
`tf.Graph.as_default`
context manager, which overrides the current default graph for the
lifetime of the context:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
`tf.GraphKeys.GLOBAL_VARIABLES`) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects core state that can be returned via public accessors.
# Thread-safety is provided on a best-effort basis to support buggy
# programs, and is not guaranteed by the public `tf.Graph` API.
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
# The group lock synchronizes Session.run calls with methods that create
# and mutate ops (e.g. Graph.create_op()). This synchronization is
# necessary because it's illegal to modify an operation after it's been run.
# The group lock allows any number of threads to mutate ops at the same time
# but if any modification is going on, all Session.run calls have to wait.
# Similarly, if one or more Session.run calls are going on, all mutate ops
# have to wait until all Session.run calls have finished.
self._group_lock = lock_util.GroupLock(num_groups=2)
self._nodes_by_id = {} # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = {} # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
# Functions that will be applied to choose a device if none is specified.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._device_function_stack is used instead.
self._graph_device_function_stack = traceable_stack.TraceableStack()
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._control_dependencies_stack is used instead.
self._graph_control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
self._graph_colocation_stack = traceable_stack.TraceableStack()
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = set()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Allow optimizers and other objects to pseudo-uniquely key graphs (this key
# will be shared when defining function graphs, for example, so optimizers
# being called inside function definitions behave as if they were seeing the
# actual outside graph).
self._graph_key = "grap-key-%d/" % (uid(),)
# A string with the last reduction method passed to
# losses.compute_weighted_loss(), or None. This is required only for
# backward compatibility with Estimator and optimizer V1 use cases.
self._last_loss_reduction = None
# Flag that is used to indicate whether loss has been scaled by optimizer.
# If this flag has been set, then estimator uses it to scale losss back
# before reporting. This is required only for backward compatibility with
# Estimator and optimizer V1 use cases.
self._is_loss_scaled_by_optimizer = False
self._container = ""
self._registered_ops = op_def_registry.get_registered_ops()
# Set to True if this graph is being built in an
# AutomaticControlDependencies context.
self._add_control_dependencies = False
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# The C API requires all ops to have shape functions. Disable this
# requirement (many custom ops do not have shape functions, and we don't
# want to break these existing cases).
c_api.SetRequireShapeInferenceFns(self._c_graph, False)
if tf2.enabled():
self.switch_to_thread_local()
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator, priority=100):
"""Scope which defines a variable creation function.
Args:
creator: A callable taking `next_creator` and `kwargs`. See the
`tf.variable_creator_scope` docstring.
priority: Creators with a higher `priority` are called first. Within the
same priority, creators are called inner-to-outer.
Yields:
`_variable_creator_scope` is a context manager with a side effect, but
doesn't return a value.
Raises:
RuntimeError: If variable creator scopes are not properly nested.
"""
# This step keeps a reference to the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = self._variable_creator_stack
new = list(old)
new.append((priority, creator))
# Sorting is stable, so we'll put higher-priority creators later in the list
# but otherwise maintain registration order.
new.sort(key=lambda item: item[0])
self._thread_local._variable_creator_stack = new # pylint: disable=protected-access
try:
yield
finally:
if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access
raise RuntimeError(
"Exiting variable_creator_scope without proper nesting.")
self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
# This previously returned a copy of the stack instead of the stack itself,
# to guard against accidental mutation. Consider, however, code that wants
# to save and restore the variable creator stack:
# def f():
# original_stack = graph._variable_creator_stack
# graph._variable_creator_stack = new_stack
# ... # Some code
# graph._variable_creator_stack = original_stack
#
# And lets say you have some code that calls this function with some
# variable_creator:
# def g():
# with variable_scope.variable_creator_scope(creator):
# f()
# When exiting the variable creator scope, it would see a different stack
# object than it expected leading to a "Exiting variable_creator_scope
# without proper nesting" error.
return self._thread_local._variable_creator_stack # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op):
"""Adds 'op' to the graph.
Args:
op: the Operator or Tensor to add.
Raises:
TypeError: if op is not an Operation or Tensor.
ValueError: if the op.name or op._id are already used.
"""
self._check_not_finalized()
if not isinstance(op, (Tensor, Operation)):
raise TypeError("op must be a Tensor or Operation: %s" % op)
with self._lock:
# pylint: disable=protected-access
if op._id in self._nodes_by_id:
raise ValueError("cannot add an op with id %d as it already "
"exists in the graph" % op._id)
if op.name in self._nodes_by_name:
raise ValueError("cannot add op with name %s as that name "
"is already used" % op.name)
self._nodes_by_id[op._id] = op
self._nodes_by_name[op.name] = op
self._version = max(self._version, op._id)
# pylint: enable=protected-access
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
`tf.Graph.graph_def_versions`.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphVersions(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a `tf.compat.v1.train.QueueRunner`.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`.
Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
"""If this graph contains functions, copy them to `graph_def`."""
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphToGraphDef(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
for function_def in graph.library.function:
defined_function = self._functions[function_def.signature.name]
try:
func_graph = defined_function.graph
except AttributeError:
# _DefinedFunction doesn't have a graph, _EagerDefinedFunction
# does. Both rely on ops.py, so we can't really isinstance check
# them.
continue
input_shapes = function_def.attr["_input_shapes"]
try:
func_graph_inputs = func_graph.inputs
except AttributeError:
continue
for input_tensor in func_graph_inputs:
if input_tensor.dtype == dtypes.resource:
# TODO(allenl): Save and restore handle data, then save the
# resource placeholder's shape. Right now some shape functions get
# confused if we set the shape of the resource placeholder (to a
# scalar of course) and there isn't any handle data.
input_shapes.list.shape.add().CopyFrom(
tensor_shape.TensorShape(None).as_proto())
else:
input_shapes.list.shape.add().CopyFrom(
input_tensor.get_shape().as_proto())
for node in function_def.node_def:
try:
op = func_graph.get_operation_by_name(node.name)
except KeyError:
continue
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return compat.as_str(name) in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(compat.as_str(name), None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
gradient = (
function._grad_func._c_func.func if function._grad_func else None)
c_api.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient)
# pylint: enable=protected-access
self._functions[compat.as_str(name)] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
del compute_shapes
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
return self._create_op_internal(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
"""Creates an `Operation` in this graph.
Implements `Graph.create_op()` without the overhead of the deprecation
wrapper.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, device=None, attrs=attrs)
input_ops = set([t.op for t in inputs])
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. `_mutation_lock` ensures a
# Session.run call cannot occur between creating and mutating the op.
with self._mutation_lock():
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
# been used. This is ok, just leave _names_in_use as-is in this case.
# TODO(skyewm): make the C API guarantee no name conflicts.
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" %
(key, value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
# Snapshot the colocation stack metadata before we might generate error
# messages using it. Note that this snapshot depends on the actual stack
# and is independent of the op's _class attribute.
# pylint: disable=protected-access
op._colocation_code_locations = self._snapshot_colocation_stack_metadata()
# pylint: enable=protected-access
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack.peek_objs():
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# pylint: disable=protected-access
op._set_device(colocation_op.device)
# pylint: enable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr(
"_class",
attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op.op_def.is_stateful:
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
"""Creates `Operations` in this graph for any new TF_Operations.
This is useful for when TF_Operations are indirectly created by the C API
outside of the Operation constructor (e.g. by TF_ImportGraphDef,
TF_FinishWhile). This ensures there are corresponding Operations for all
TF_Operations in the underlying TF_Graph.
Args:
compute_devices: (Optional.) If True, device functions will be executed to
compute the device properties of each new Operation.
Returns:
A list of the new `Operation` objects.
"""
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can
also be any object with an `_as_graph_element()` method that returns a
value of one of these types. Note: `_as_graph_element` will be called
inside the graph's lock and so may not modify the graph.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." %
(repr(name), repr(op_name), len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." %
(type(obj).__name__, types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friedly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = c_api.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
def _next_id(self):
"""Id for next Operation instance. Also increments the internal id."""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
return self._next_id_counter
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
c_api.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf)
# pylint: enable=protected-access
data = c_api.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
return op_def
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly.
Use this method with the `with` keyword to specify that ops created within
the scope of a block should be added to this graph. In this case, once
the scope of the `with` is exited, the previous default graph is set again
as default. There is a stack, so it's ok to have multiple nested levels
of `as_default` calls.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
If eager execution is enabled ops created under this context manager will be
added to the graph instead of executed eagerly.
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
if hasattr(item, "name") and regex.match(item.name):
c.append(item)
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
self._default_original_op = op
try:
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
try:
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
# For the sake of checking for names in use, we treat names as case
# insensitive (e.g. foo = Foo).
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
# Increment the number for "name_key".
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = "%s_%d" % (name, i - 1)
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.compat.v1.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None and self._control_flow_context is not None:
self._control_flow_context.EnterGradientColocation(op, gradient_uid)
try:
yield
finally:
self._control_flow_context.ExitGradientColocation(op, gradient_uid)
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within the
context, rather than applying all colocation properties on the stack.
If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
op = _op_to_colocate_with(op)
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = traceable_stack.TraceableStack()
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
# offset refers to the stack frame used for storing code location.
# We use 4, the sum of 1 to use our caller's stack frame and 3
# to jump over layers of context managers above us.
self._colocation_stack.push_obj(op, offset=4)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
def _add_device_to_stack(self, device_name_or_function, offset=0):
"""Add device to stack manually, separate from a context manager."""
total_offset = 1 + offset
spec = _UserDeviceSpec(device_name_or_function)
self._device_function_stack.push_obj(spec, offset=total_offset)
return spec
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in the
context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If device scopes are not properly nested.
"""
self._add_device_to_stack(device_name_or_function, offset=2)
old_top_of_stack = self._device_function_stack.peek_top_obj()
try:
yield
finally:
new_top_of_stack = self._device_function_stack.peek_top_obj()
if old_top_of_stack is not new_top_of_stack:
raise RuntimeError("Exiting device scope without proper scope nesting.")
self._device_function_stack.pop_obj()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in LIFO order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
# pylint: disable=protected-access
prior_device_string = None
for device_spec in self._device_function_stack.peek_objs():
if device_spec.is_null_merge:
continue
if device_spec.function is None:
break
device_string = device_spec.string_merge(op)
# Take advantage of the fact that None is a singleton and Python interns
# strings, since identity checks are faster than equality checks.
if device_string is not prior_device_string:
op._set_device_from_string(device_string)
prior_device_string = device_string
op._device_code_locations = self._snapshot_device_function_stack_metadata()
# pylint: enable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.queue.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.queue.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.queue.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
self._container = container_name
try:
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition to the
current control dependencies. None to indicate that the dependencies
should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
self._seen_nodes.add(op)
def op_in_group(self, op):
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend([c for c in controller.control_inputs if c not in input_ops])
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Also note that though execution of ops created under this scope will trigger
execution of the dependencies, the ops created under this scope might still
be pruned from a normal tensorflow graph. For example, in the following
snippet of code the dependencies are never executed:
```python
loss = model.loss()
with tf.control_dependencies(dependencies):
loss = loss + tf.constant(1) # note: dependencies ignored in the
# backward pass
return tf.gradients(loss, model.variables)
```
This is because evaluating the gradient graph does not require evaluating
the constant(1) op created in the forward pass.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the
context. Can also be `None` to clear the control dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
# The hasattr(handle) is designed to match ResourceVariables. This is so
# control dependencies on a variable or on an unread variable don't
# trigger reads.
if (isinstance(c, IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to AttrValue protocol
buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to kernel
label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op type
strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
"""Make device, colocation and dependencies stacks thread-local.
Device, colocation and dependencies stacks are not thread-local be default.
If multiple threads access them, then the state is shared. This means that
one thread may affect the behavior of another thread.
After this method is called, the stacks become thread-local. If multiple
threads access them, then the state is not shared. Each thread uses its own
value; a thread doesn't affect other threads by mutating such a stack.
The initial value for every thread's stack is set to the current value
of the stack when `switch_to_thread_local()` was first called.
"""
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where device_function_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_device_function_stack"):
stack_copy_for_this_thread = self._graph_device_function_stack.copy()
self._thread_local._device_function_stack = stack_copy_for_this_thread
return self._thread_local._device_function_stack
# pylint: enable=protected-access
else:
return self._graph_device_function_stack
@property
def _device_functions_outer_to_inner(self):
user_device_specs = self._device_function_stack.peek_objs()
device_functions = [spec.function for spec in user_device_specs]
device_functions_outer_to_inner = list(reversed(device_functions))
return device_functions_outer_to_inner
def _snapshot_device_function_stack_metadata(self):
"""Return device function stack as a list of TraceableObjects.
Returns:
[traceable_stack.TraceableObject, ...] where each TraceableObject's .obj
member is a displayable name for the user's argument to Graph.device, and
the filename and lineno members point to the code location where
Graph.device was called directly or indirectly by the user.
"""
snapshot = []
for obj in self._device_function_stack.peek_traceable_objs():
obj_copy = obj.copy_metadata()
obj_copy.obj = obj.obj.display_name
snapshot.append(obj_copy)
return snapshot
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._device_function_stack = device_function_stack
# pylint: enable=protected-access
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
"""Return thread-local copy of colocation stack."""
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_colocation_stack"):
stack_copy_for_this_thread = self._graph_colocation_stack.copy()
self._thread_local._colocation_stack = stack_copy_for_this_thread
return self._thread_local._colocation_stack
# pylint: enable=protected-access
else:
return self._graph_colocation_stack
def _snapshot_colocation_stack_metadata(self):
"""Return colocation stack metadata as a dictionary."""
return {
traceable_obj.obj.name: traceable_obj.copy_metadata()
for traceable_obj in self._colocation_stack.peek_traceable_objs()
}
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._colocation_stack = colocation_stack
# pylint: enable=protected-access
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where control_dependencies_stack
# doesn't yet exist.
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
@property
def _distribution_strategy_stack(self):
"""A stack to maintain distribution strategy context for each thread."""
if not hasattr(self._thread_local, "_distribution_strategy_stack"):
self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access
return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access
@_distribution_strategy_stack.setter
def _distribution_strategy_stack(self, _distribution_strategy_stack):
self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access
_distribution_strategy_stack)
@property
def _global_distribute_strategy_scope(self):
"""For implementing `tf.distribute.set_strategy()`."""
if not hasattr(self._thread_local, "distribute_strategy_scope"):
self._thread_local.distribute_strategy_scope = None
return self._thread_local.distribute_strategy_scope
@_global_distribute_strategy_scope.setter
def _global_distribute_strategy_scope(self, distribute_strategy_scope):
self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)
@property
def _auto_cast_variable_read_dtype(self):
"""The dtype that instances of `AutoCastVariable` will be casted to.
This is None if `AutoCastVariables` should not be casted.
See `AutoCastVariable` for more information.
Returns:
The dtype that instances of `AutoCastVariable` will be casted to.
"""
if not hasattr(self._thread_local, "_auto_cast_variable_read_dtype"):
self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access
return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access
@_auto_cast_variable_read_dtype.setter
def _auto_cast_variable_read_dtype(self, _auto_cast_variable_read_dtype):
self._thread_local._auto_cast_variable_read_dtype = ( # pylint: disable=protected-access
_auto_cast_variable_read_dtype)
@tf_contextlib.contextmanager
def _enable_auto_casting_variables(self, dtype):
"""Context manager to automatically cast AutoCastVariables.
If an AutoCastVariable `var` is used under this context manager, it will be
casted to `dtype` before being used.
See `AutoCastVariable` for more information.
Args:
dtype: The dtype that AutoCastVariables should be casted to.
Yields:
Nothing.
"""
prev_read_dtype = self._auto_cast_variable_read_dtype
try:
self._auto_cast_variable_read_dtype = dtype
yield
finally:
self._auto_cast_variable_read_dtype = prev_read_dtype
def _mutation_lock(self):
"""Returns a lock to guard code that creates & mutates ops.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_MUTATION_LOCK_GROUP)
def _session_run_lock(self):
"""Returns a lock to guard code for Session.run.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export(v1=["device"])
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See `tf.Graph.device` for more details.
Args:
device_name_or_function: The device name or function to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.executing_eagerly():
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
elif executing_eagerly_outside_functions():
@tf_contextlib.contextmanager
def combined(device_name_or_function):
with get_default_graph().device(device_name_or_function):
if not callable(device_name_or_function):
with context.device(device_name_or_function):
yield
else:
yield
return combined(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("device", v1=[])
def device_v2(device_name):
"""Specifies the device for ops created/executed in this context.
`device_name` can be fully specified, as in "/job:worker/task:1/device:cpu:0",
or partially specified, containing only a subset of the "/"-separated
fields. Any fields which are specified override device annotations from outer
scopes. For example:
```python
with tf.device('/job:foo'):
# ops created here have devices with /job:foo
with tf.device('/job:bar/task:0/device:gpu:2'):
# ops created here have the fully specified device above
with tf.device('/device:gpu:1'):
# ops created here have the device '/job:foo/device:gpu:1'
```
Args:
device_name: The device name to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If a function is passed in.
"""
if callable(device_name):
raise RuntimeError("tf.device does not support functions.")
return device(device_name)
@tf_export(v1=["container"])
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
if not hasattr(op, "device"):
op = internal_convert_to_tensor_or_indexed_slices(op)
return device(op.device)
else:
return NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
# Internal interface to colocate_with. colocate_with has been deprecated from
# public API. There are still a few internal uses of colocate_with. Add internal
# only API for those uses to avoid deprecation warning.
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@deprecation.deprecated(
date=None, instructions="Colocations handled automatically by placer.")
@tf_export(v1=["colocate_with"])
def _colocate_with(op, ignore_existing=False):
return colocate_with(op, ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See `tf.Graph.control_dependencies`
for more details.
When eager execution is enabled, any callable object in the `control_inputs`
list will be called.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the context.
Can also be `None` to clear the control dependencies. If eager execution
is enabled, any callable object in the `control_inputs` list will be
called.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.executing_eagerly():
if control_inputs:
# Excute any pending callables.
for control in control_inputs:
if callable(control):
control()
return NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export(v1=["get_default_session"])
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
context.context().context_switches.push(default.building_function,
default.as_default,
default._device_function_stack)
try:
with super(_DefaultGraphStack,
self).get_controller(default) as g, context.graph_mode():
yield g
finally:
# If an exception is raised here it may be hiding a related exception in
# the try-block (just above).
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# Shared helper used in init_scope and executing_eagerly_outside_functions
# to obtain the outermost context that is not building a function, and the
# innermost non empty device stack.
def _get_outer_context_and_inner_device_stack():
"""Get the outermost context not building a function."""
default_graph = get_default_graph()
outer_context = None
innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not innermost_nonempty_device_stack:
innermost_nonempty_device_stack = stack_entry.device_stack
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
return outer_context, innermost_nonempty_device_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_export("init_scope")
@tf_contextlib.contextmanager
def init_scope():
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
When eager execution is enabled, code inside an init_scope block runs with
eager execution enabled even when defining graph functions via
tf.contrib.eager.defun. For example:
```python
tf.compat.v1.enable_eager_execution()
@tf.contrib.eager.defun
def func():
# A defun-decorated function constructs TensorFlow graphs,
# it does not execute eagerly.
assert not tf.executing_eagerly()
with tf.init_scope():
# Initialization runs with eager execution enabled
assert tf.executing_eagerly()
```
Raises:
RuntimeError: if graph state is incompatible with this initialization.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
scope = get_default_graph().get_name_scope()
if scope and scope[-1] != "/":
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + "/"
outer_context, innermost_nonempty_device_stack = (
_get_outer_context_and_inner_device_stack())
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(scope), control_dependencies(
None), tape.stop_recording():
context_manager = NullContextmanager
context_manager_input = None
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access
elif innermost_nonempty_device_stack is not None:
for device_spec in innermost_nonempty_device_stack.peek_objs():
if device_spec.function is None:
break
if device_spec.raw_string:
context_manager = context.device
context_manager_input = device_spec.raw_string
break
# It is currently not possible to have a device function in V2,
# but in V1 we are unable to apply device functions in eager mode.
# This means that we will silently skip some of the entries on the
# device stack in V1 + eager mode.
with context_manager(context_manager_input):
yield
finally:
# If an exception is raised here it may be hiding a related exception in
# try-block (just above).
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
def executing_eagerly_outside_functions():
"""Returns True if executing eagerly, even if inside a graph function."""
if context.executing_eagerly():
return True
else:
outer_context, _ = _get_outer_context_and_inner_device_stack()
with outer_context():
return context.executing_eagerly()
def inside_function():
return get_default_graph().building_function
@tf_export(v1=["enable_eager_execution"])
def enable_eager_execution(config=None, device_policy=None,
execution_mode=None):
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)
and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.compat.v1.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
Args:
config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the
environment in which operations are executed. Note that
`tf.compat.v1.ConfigProto` is also used to configure graph execution (via
`tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`
are not implemented (or are irrelevant) when eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will
be picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
_api_usage_gauge.get_cell().set(True)
if context.default_execution_mode != context.EAGER_MODE:
return enable_eager_execution_internal(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=None)
@tf_export(v1=["disable_eager_execution"])
def disable_eager_execution():
"""Disables eager execution.
This function can only be called before any Graphs, Ops, or Tensors have been
created. It can be used at the beginning of the program for complex migration
projects from TensorFlow 1.x to 2.x.
"""
_api_usage_gauge.get_cell().set(False)
context.default_execution_mode = context.GRAPH_MODE
c = context.context_safe()
if c is not None:
c._thread_local_data.is_eager = False # pylint: disable=protected-access
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on
remote devices. GrpcServers need to be started by creating an identical
server_def to this, and setting the appropriate task_indexes, so that the
servers can communicate. It will then be possible to execute operations on
remote devices.
Raises:
ValueError
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError("config must be a tf.ConfigProto, but got %s" %
type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
if context.default_execution_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context.default_execution_mode = context.EAGER_MODE
# pylint: disable=protected-access
if context._context is None:
context._context = context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def)
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError(
"Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config, context._context._device_policy,
device_policy, context._context._execution_mode, execution_mode))
else:
# We already created everything, so update the thread local data.
context._context._thread_local_data.is_eager = True
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export(v1=["reset_default_graph"])
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will
result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export(v1=["get_default_graph"])
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph():
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." %
(item, original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
if get_default_graph().building_function:
return get_default_graph()
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or get_default_graph()
@tf_export(v1=["GraphKeys"])
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
`tf.compat.v1.global_variables`
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
`tf.compat.v1.trainable_variables`
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
`tf.compat.v1.summary.merge_all`
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
`tf.compat.v1.train.start_queue_runners`
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
`tf.compat.v1.moving_average_variables`
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
@deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.")
def VARIABLES(cls): # pylint: disable=no-self-argument
return cls.GLOBAL_VARIABLES
def dismantle_graph(graph):
"""Cleans up reference cycles from a `Graph`.
Helpful for making sure the garbage collector doesn't need to run after a
temporary `Graph` is no longer needed.
Args:
graph: A `Graph` object to destroy. Neither it nor any of its ops are usable
after this function runs.
"""
memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access
# Now clean up Operation<->Graph reference cycles by clearing all of the
# attributes for the Graph and its ops.
graph_operations = graph.get_operations()
for op in graph_operations:
op.__dict__ = {}
graph.__dict__ = {}
@tf_export(v1=["add_to_collection"])
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See `tf.Graph.add_to_collection`
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
@tf_export(v1=["add_to_collections"])
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See `tf.Graph.add_to_collections`
for more details.
Args:
names: The key for the collections. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collections. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
@tf_export(v1=["get_collection_ref"])
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See `tf.Graph.get_collection_ref`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
@tf_export(v1=["get_collection"])
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See `tf.Graph.get_collection`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items without
a `name` attribute are never returned if a scope is supplied and the
choice or `re.match` means that a `scope` without special tokens filters
by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
name_scope_cache = {}
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["name_scope"])
class name_scope(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
`tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
if not (default_name is None or isinstance(default_name, six.string_types)):
raise TypeError(
"`default_name` type (%s) is not a string type. You likely meant to "
"pass this into the `values` kwarg." % type(default_name))
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
self._ctx = context.context()
self._in_eager_mode = self._ctx.executing_eagerly()
self._has_symbolic_input_in_eager = False
if self._values and self._in_eager_mode:
# The presence of a graph tensor in `self._values` overrides the context.
for value in self._values:
if hasattr(value, "graph"):
self._has_symbolic_input_in_eager = True
self._name_scope = value.graph.name_scope(self._name)
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._has_symbolic_input_in_eager:
return self._name_scope.__enter__()
if self._in_eager_mode:
scope_name, old_name = enter_eager_name_scope(self._ctx, self._name,
self._default_name)
self._old_name = old_name
return scope_name
else:
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
if self._values is None:
self._values = []
g = _get_graph_from_inputs(self._values)
self._g_manager = g.as_default()
self._g_manager.__enter__()
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, type_arg, value_arg, traceback_arg):
if self._has_symbolic_input_in_eager:
self._name_scope.__exit__(type_arg, value_arg, traceback_arg)
elif self._in_eager_mode:
self._ctx.scope_name = self._old_name
else:
self._name_scope.__exit__(type_arg, value_arg, traceback_arg)
self._g_manager.__exit__(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def enter_eager_name_scope(ctx, name, default_name=None):
"""Updates the eager context to enter the given name scope."""
old_name = ctx.scope_name
if not name:
scope_name = ""
else:
if name[-1] == "/":
# A trailing slash breaks out of nested name scopes, indicating a
# fully specified scope name, for compatibility with Graph.name_scope.
scope_name = name
else:
# TODO(tomhennigan) Benchmark and consider removing the cache.
cache_key = name, old_name, default_name
scope_name = name_scope_cache.get(cache_key, None)
if scope_name is None:
scope_name = name + "/"
if old_name:
scope_name = old_name + scope_name
name_scope_cache[cache_key] = scope_name
ctx.scope_name = scope_name
return scope_name, old_name
@tf_export("name_scope", v1=[])
class name_scope_v2(name_scope):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,
and `MyOp/c`.
If the scope name already exists, the name will be made unique by appending
`_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`,
etc.
"""
def __init__(self, name):
"""Initialize the context manager.
Args:
name: The prefix to use on all names created within the name scope.
Raises:
ValueError: If name is None, or not a string.
"""
if name is None or not isinstance(name, six.string_types):
raise ValueError("name for name_scope must be a string.")
self._name = name
self._exit_fns = []
@property
def name(self):
return self._name
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
ctx = context.context()
if ctx.executing_eagerly():
scope_name, old_scope_name = enter_eager_name_scope(ctx, self._name)
self._exit_fns.append(
lambda *a: setattr(ctx, "scope_name", old_scope_name))
else:
scope = get_default_graph().name_scope(self._name)
scope_name = scope.__enter__()
self._exit_fns.append(scope.__exit__)
return scope_name
def __exit__(self, type_arg, value_arg, traceback_arg):
exit_fn = self._exit_fns.pop()
exit_fn(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_export(v1=["op_scope"])
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") %
(op.name, dtype, name, as_ref))
def _op_to_colocate_with(v):
"""Operation object corresponding to v to use for colocation constraints."""
if v is None:
return None
if isinstance(v, Operation):
return v
# We always want to colocate with the reference op.
# When 'v' is a ResourceVariable, the reference op is the handle creating op.
#
# What this should be is:
# if isinstance(v, ResourceVariable):
# return v.handle.op
# However, that would require a circular import dependency.
# As of October 2018, there were attempts underway to remove
# colocation constraints altogether. Assuming that will
# happen soon, perhaps this hack to work around the circular
# import dependency is acceptable.
if hasattr(v, "handle") and hasattr(v.handle, "op") and isinstance(
v.handle.op, Operation):
return v.handle.op
return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op
def _is_keras_symbolic_tensor(x):
return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph"
tensor_conversion_registry.register_tensor_conversion_function(
Operation, _operation_conversion_error)
# These symbols were originally defined in this module; import them for
# backwards compatibility until all references have been updated to access
# them from the indexed_slices.py module.
IndexedSlices = indexed_slices.IndexedSlices
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
convert_to_tensor_or_indexed_slices = \
indexed_slices.convert_to_tensor_or_indexed_slices
convert_n_to_tensor_or_indexed_slices = \
indexed_slices.convert_n_to_tensor_or_indexed_slices
internal_convert_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_to_tensor_or_indexed_slices
internal_convert_n_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_n_to_tensor_or_indexed_slices
register_tensor_conversion_function = \
tensor_conversion_registry.register_tensor_conversion_function
|
tensorflow-master
|
tensorflow/python/framework/ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations that generate constants.
See the [constants guide](https://tensorflow.org/api_guides/python/constant_op).
"""
# Must be separate from array_ops to avoid a cyclic dependency.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.util.tf_export import tf_export
def _eager_reshape(tensor, shape, ctx):
"""Eager-only version of Reshape op; requires tensor is an eager Tensor."""
attr_t = tensor._datatype_enum() # pylint: disable=protected-access
attr_tshape, (shape,) = execute.args_to_matching_eager(
[shape], ctx, dtypes.int32)
inputs_flat = [tensor, shape]
attrs = ("T", attr_t, "Tshape", attr_tshape)
result, = execute.execute(
b"Reshape", 1, inputs=inputs_flat, attrs=attrs, ctx=ctx)
return result
def _eager_fill(dims, value, ctx):
"""Eager-only version of Fill op; requires value is an eager Tensor."""
attr_t = value.dtype.as_datatype_enum
dims = convert_to_eager_tensor(dims, ctx, dtypes.int32)
inputs_flat = [dims, value]
attrs = ("T", attr_t, "index_type", types_pb2.DT_INT32)
result, = execute.execute(
b"Fill", 1, inputs=inputs_flat, attrs=attrs, ctx=ctx)
return result
def _eager_identity(tensor, ctx):
"""Eager-only version of Identity op; requires tensor is an eager Tensor."""
attrs = ("T", tensor.dtype.as_datatype_enum)
result, = execute.execute(
b"Identity", 1, inputs=[tensor], attrs=attrs, ctx=ctx)
return result
def convert_to_eager_tensor(value, ctx, dtype=None):
"""Converts the given `value` to an `EagerTensor`.
Note that this function could return cached copies of created constants for
performance reasons.
Args:
value: value to convert to EagerTensor.
ctx: value of context.context().
dtype: optional desired dtype of the converted EagerTensor.
Returns:
EagerTensor created from value.
Raises:
TypeError: if `dtype` is not compatible with the type of t.
"""
if isinstance(value, ops.EagerTensor):
if dtype is not None and value.dtype != dtype:
raise TypeError("Expected tensor with type %r not %r" % (
dtype, value.dtype))
return value
if dtype is not None:
try:
dtype = dtype.as_datatype_enum
except AttributeError:
dtype = dtypes.as_dtype(dtype).as_datatype_enum
ctx.ensure_initialized()
device = ctx.device_name
handle = ctx._handle # pylint: disable=protected-access
if isinstance(value, (float,) + six.integer_types):
# Use a scalar cache. This will put each scalar of each type only once on
# each device. Scalars don't use much device memory but copying scalars can
# trigger memcpys which are slow.
cache_key = device, value, dtype, type(value)
scalar_cache = ctx.scalar_cache()
tensor = scalar_cache.get(cache_key, None)
if tensor is not None:
return ops.EagerTensor(
value, handle, device, dtype, tensor)
t = ops.EagerTensor(value, handle, device, dtype)
scalar_cache[cache_key] = t
return t
else:
return ops.EagerTensor(value, handle, device, dtype)
@tf_export(v1=["constant"])
def constant_v1(
value, dtype=None, shape=None, name="Const", verify_shape=False):
"""Creates a constant tensor.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` and (optionally) `shape` (see examples
below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the `shape` argument (if
specified). In the case where the list length is less than the number of
elements specified by `shape`, the last element in the list will be used
to fill the remaining entries.
The argument `shape` is optional. If present, it specifies the dimensions of
the resulting tensor. If not present, the shape of `value` is used.
If the argument `dtype` is not specified, then the type is inferred from
the type of `value`.
For example:
```python
# Constant 1-D Tensor populated with value list.
tensor = tf.constant([1, 2, 3, 4, 5, 6, 7]) => [1 2 3 4 5 6 7]
# Constant 2-D tensor populated with scalar value -1.
tensor = tf.constant(-1.0, shape=[2, 3]) => [[-1. -1. -1.]
[-1. -1. -1.]]
```
`tf.constant` differs from `tf.fill` in a few ways:
* `tf.constant` supports arbitrary constants, not just uniform scalar
Tensors like `tf.fill`.
* `tf.constant` creates a `Const` node in the computation graph with the
exact value at graph construction time. On the other hand, `tf.fill`
creates an Op in the graph that is expanded at runtime.
* Because `tf.constant` only embeds constant values in the graph, it does
not support dynamic shapes based on other runtime Tensors, whereas
`tf.fill` does.
Args:
value: A constant value (or list) of output type `dtype`.
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
verify_shape: Boolean that enables verification of a shape of values.
Returns:
A Constant Tensor.
Raises:
TypeError: if shape is incorrectly specified or unsupported.
"""
return _constant_impl(value, dtype, shape, name, verify_shape=verify_shape,
allow_broadcast=False)
@tf_export("constant", v1=[])
def constant(value, dtype=None, shape=None, name="Const"):
"""Creates a constant tensor.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` and (optionally) `shape` (see examples
below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the `shape` argument (if
specified). In the case where the list length is less than the number of
elements specified by `shape`, the last element in the list will be used
to fill the remaining entries.
The argument `shape` is optional. If present, it specifies the dimensions of
the resulting tensor. If not present, the shape of `value` is used.
If the argument `dtype` is not specified, then the type is inferred from
the type of `value`.
For example:
```python
# Constant 1-D Tensor populated with value list.
tensor = tf.constant([1, 2, 3, 4, 5, 6]) => [1 2 3 4 5 6]
# Constant 1-D Tensor populated with value list.
tensor = tf.constant([1, 2, 3, 4, 5, 6], shape=(2,3))
=> [[1 2 3], [4 5 6]]
# Constant 2-D tensor populated with scalar value -1.
tensor = tf.constant(-1.0, shape=[2, 3]) => [[-1. -1. -1.]
[-1. -1. -1.]]
```
`tf.constant` differs from `tf.fill` in a few ways:
* `tf.constant` supports arbitrary constants, not just uniform scalar
Tensors like `tf.fill`.
* `tf.constant` creates a `Const` node in the computation graph with the
exact value at graph construction time. On the other hand, `tf.fill`
creates an Op in the graph that is expanded at runtime.
* Because `tf.constant` only embeds constant values in the graph, it does
not support dynamic shapes based on other runtime Tensors, whereas
`tf.fill` does.
Args:
value: A constant value (or list) of output type `dtype`.
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
Raises:
TypeError: if shape is incorrectly specified or unsupported.
"""
return _constant_impl(value, dtype, shape, name, verify_shape=False,
allow_broadcast=True)
def _constant_impl(
value, dtype, shape, name, verify_shape, allow_broadcast):
"""Implementation of constant."""
ctx = context.context()
if ctx.executing_eagerly():
t = convert_to_eager_tensor(value, ctx, dtype)
if shape is None:
return t
shape = tensor_shape.as_shape(shape)
if shape == t.shape:
return t
if verify_shape:
raise TypeError("Expected Tensor's shape: %s, got %s." % (tuple(shape),
tuple(t.shape)))
num_t = t.shape.num_elements()
# TODO(josh11b): Implement shape -> eager tensor conversion.
if num_t == shape.num_elements():
return _eager_reshape(t, shape.as_list(), ctx)
if num_t == 1:
if t.dtype == dtypes.bool:
# We don't have a Fill kernel for bool dtype on GPU. So we first run
# Fill on CPU and then copy to GPU if needed.
with ops.device("/device:CPU:0"):
x = _eager_fill(shape.as_list(), t.cpu(), ctx)
return _eager_identity(x, ctx)
else:
return _eager_fill(shape.as_list(), t, ctx)
raise TypeError("Eager execution of tf.constant with unsupported shape "
"(value has %d elements, shape is %s with %d elements)." %
(num_t, shape, shape.num_elements()))
g = ops.get_default_graph()
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape, verify_shape=verify_shape,
allow_broadcast=allow_broadcast))
dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
const_tensor = g.create_op(
"Const", [], [dtype_value.type],
attrs={"value": tensor_value,
"dtype": dtype_value},
name=name).outputs[0]
return const_tensor
def is_constant(tensor_or_op):
if isinstance(tensor_or_op, ops.Tensor):
op = tensor_or_op.op
else:
op = tensor_or_op
return op.type == "Const"
def _constant_tensor_conversion_function(v, dtype=None, name=None,
as_ref=False):
_ = as_ref
return constant(v, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
(list, tuple), _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
np.ndarray, _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
np.generic, _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
object, _constant_tensor_conversion_function, 200)
def _tensor_shape_tensor_conversion_function(s,
dtype=None,
name=None,
as_ref=False):
"""Function to convert TensorShape to Tensor."""
_ = as_ref
if not s.is_fully_defined():
raise ValueError(
"Cannot convert a partially known TensorShape to a Tensor: %s" % s)
s_list = s.as_list()
int64_value = 0
for dim in s_list:
if dim >= 2**31:
int64_value = dim
break
if dtype is not None:
if dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
if dtype == dtypes.int32 and int64_value:
raise ValueError("Cannot convert a TensorShape to dtype int32; "
"a dimension is too large (%s)" % int64_value)
else:
dtype = dtypes.int64 if int64_value else dtypes.int32
if name is None:
name = "shape_as_tensor"
return constant(s_list, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
tensor_shape.TensorShape, _tensor_shape_tensor_conversion_function, 100)
def _dimension_tensor_conversion_function(d,
dtype=None,
name=None,
as_ref=False):
"""Function to convert Dimension to Tensor."""
_ = as_ref
if d.value is None:
raise ValueError("Cannot convert an unknown Dimension to a Tensor: %s" % d)
if dtype is not None:
if dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
else:
dtype = dtypes.int32
if name is None:
name = "shape_as_tensor"
return constant(d.value, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
tensor_shape.Dimension, _dimension_tensor_conversion_function, 100)
|
tensorflow-master
|
tensorflow/python/framework/constant_op.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to manipulate a tensor graph in python.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
_VARIABLE_OPS = {
"Assign",
"AssignAdd",
"AssignSub",
"Queue",
"ScatterAdd",
"ScatterSub",
"ScatterUpdate",
"TruncatedNormal",
"Variable",
"VariableV2",
}
def _is_variable_op(op):
"""Returns true if 'op' refers to a Variable node."""
return op in _VARIABLE_OPS
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.must_run_on_cpu`")
@tf_export(v1=["graph_util.must_run_on_cpu"])
def must_run_on_cpu(node, pin_variables_on_cpu=False):
"""Returns True if the given node_def must run on CPU, otherwise False.
Args:
node: The node to be assigned to a device. Could be either an ops.Operation
or NodeDef.
pin_variables_on_cpu: If True, this function will return False if node_def
represents a variable-related op.
Returns:
True if the given node must run on CPU, otherwise False.
"""
if isinstance(node, ops.Operation):
node_def = node.node_def
else:
assert isinstance(node, node_def_pb2.NodeDef)
node_def = node
# If the op is a variable-related op, should we pin it on CPU?
if pin_variables_on_cpu and _is_variable_op(node_def.op):
return True
# Constant operations producing a string or int32 must run on CPU.
if node_def.op == "Const":
# Get the value of the 'dtype' attr
dtype = node_def.attr["dtype"].type
if dtype == dtypes.string or dtype == dtypes.int32:
return True
if node_def.op in ["DynamicStitch", "ParallelDynamicStitch"]:
dtype = node_def.attr["T"].type
if dtype == dtypes.int32:
# DynamicStitch on GPU only works for int32 values.
return True
if node_def.op in ["Cast"]:
dtype = node_def.attr["SrcT"].type
if dtype == dtypes.int32:
# Cast on GPU does not works for int32 values.
return True
return False
################################################################################
#
# device functions for use in with g.device(...)
#
################################################################################
def _node_name(n):
if n.startswith("^"):
return n[1:]
else:
return n.split(":")[0]
def _extract_graph_summary(graph_def):
"""Extracts useful information from the graph and returns them."""
name_to_input_name = {} # Keyed by the dest node name.
name_to_node = {} # Keyed by node name.
# Keeps track of node sequences. It is important to still output the
# operations in the original order.
name_to_seq_num = {} # Keyed by node name.
seq = 0
for node in graph_def.node:
n = _node_name(node.name)
name_to_node[n] = node
name_to_input_name[n] = [_node_name(x) for x in node.input]
name_to_seq_num[n] = seq
seq += 1
return name_to_input_name, name_to_node, name_to_seq_num
def _assert_nodes_are_present(name_to_node, nodes):
"""Assert that nodes are present in the graph."""
for d in nodes:
assert d in name_to_node, "%s is not in graph" % d
def _bfs_for_reachable_nodes(target_nodes, name_to_input_name):
"""Breadth first search for reachable nodes from target nodes."""
nodes_to_keep = set()
# Breadth first search to find all the nodes that we should keep.
next_to_visit = target_nodes[:]
while next_to_visit:
node = next_to_visit[0]
del next_to_visit[0]
if node in nodes_to_keep:
# Already visited this node.
continue
nodes_to_keep.add(node)
if node in name_to_input_name:
next_to_visit += name_to_input_name[node]
return nodes_to_keep
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.extract_sub_graph`")
@tf_export(v1=["graph_util.extract_sub_graph"])
def extract_sub_graph(graph_def, dest_nodes):
"""Extract the subgraph that can reach any of the nodes in 'dest_nodes'.
Args:
graph_def: A graph_pb2.GraphDef proto.
dest_nodes: A list of strings specifying the destination node names.
Returns:
The GraphDef of the sub-graph.
Raises:
TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto.
"""
if not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be a graph_pb2.GraphDef proto.")
if isinstance(dest_nodes, six.string_types):
raise TypeError("dest_nodes must be a list.")
name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
graph_def)
_assert_nodes_are_present(name_to_node, dest_nodes)
nodes_to_keep = _bfs_for_reachable_nodes(dest_nodes, name_to_input_name)
nodes_to_keep_list = sorted(
list(nodes_to_keep), key=lambda n: name_to_seq_num[n])
# Now construct the output GraphDef
out = graph_pb2.GraphDef()
for n in nodes_to_keep_list:
out.node.extend([copy.deepcopy(name_to_node[n])])
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
return out
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.tensor_shape_from_node_def_name`"
)
@tf_export(v1=["graph_util.tensor_shape_from_node_def_name"])
def tensor_shape_from_node_def_name(graph, input_name):
"""Convenience function to get a shape from a NodeDef's input string."""
# To get a tensor, the name must be in the form <input>:<port>, for example
# 'Mul:0'. The GraphDef input strings don't always have the port specified
# though, so if there isn't a colon we need to add a default ':0' to the end.
if ":" not in input_name:
canonical_name = input_name + ":0"
else:
canonical_name = input_name
tensor = graph.get_tensor_by_name(canonical_name)
shape = tensor.get_shape()
return shape
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.convert_variables_to_constants`")
@tf_export(v1=["graph_util.convert_variables_to_constants"])
def convert_variables_to_constants(sess,
input_graph_def,
output_node_names,
variable_names_whitelist=None,
variable_names_blacklist=None):
"""Replaces all the variables in a graph with constants of the same values.
If you have a trained graph containing Variable ops, it can be convenient to
convert them all to Const ops holding the same values. This makes it possible
to describe the network fully with a single GraphDef file, and allows the
removal of a lot of ops related to loading and saving the variables.
Args:
sess: Active TensorFlow session containing the variables.
input_graph_def: GraphDef object holding the network.
output_node_names: List of name strings for the result nodes of the graph.
variable_names_whitelist: The set of variable names to convert (by default,
all variables are converted).
variable_names_blacklist: The set of variable names to omit converting
to constants.
Returns:
GraphDef containing a simplified version of the original.
"""
def get_input_name(node):
"""Gets the name of the first input. Errors if suffix is not :0."""
details = node.input[0].split(":")
if len(details) == 1 or int(details[1]) == 0:
return details[0]
# While it is valid for input tensors to have a suffix that is not :0, this
# method is used to find the associated ops, not tensors, and therefore it
# is not valid.
raise ValueError("Tensor name '{0}' is invalid.".format(node.input[0]))
def create_const_op(node_name, dtype, data, data_shape=None):
"""Creates a Const op."""
output_node = node_def_pb2.NodeDef()
output_node.op = "Const"
output_node.name = node_name
output_node.attr["dtype"].CopyFrom(dtype)
output_node.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(
data, dtype=dtype.type, shape=data_shape)))
return output_node
# This graph only includes the nodes needed to evaluate the output nodes, and
# removes unneeded nodes like those involved in saving and assignment.
inference_graph = extract_sub_graph(input_graph_def, output_node_names)
# Identify the ops in the graph.
map_name_to_node = {
node.name: node for node in inference_graph.node
}
# Get list of variables.
variable_names = []
variable_dict_names = []
resource_identity_types = {}
for node in inference_graph.node:
if node.op in ["Variable", "VariableV2", "VarHandleOp"]:
variable_name = node.name
if ((variable_names_whitelist is not None and
variable_name not in variable_names_whitelist) or
(variable_names_blacklist is not None and
variable_name in variable_names_blacklist)):
continue
variable_dict_names.append(variable_name)
if node.op == "VarHandleOp":
variable_names.append(variable_name + "/Read/ReadVariableOp:0")
else:
variable_names.append(variable_name + ":0")
elif node.op in ["ReadVariableOp", "ResourceGather"]:
# There can be one or more Identity ops in between the ReadVariableOp and
# VarHandleOp. Store the Identity ops with the associated dtypes.
source_op_name = get_input_name(node)
while map_name_to_node[source_op_name].op == "Identity":
resource_identity_types[source_op_name] = node.attr["dtype"]
source_op_name = get_input_name(map_name_to_node[source_op_name])
if map_name_to_node[source_op_name].op != "VarHandleOp":
raise ValueError("Cannot find the variable that is an input "
"to the ReadVariableOp.")
# Gets map of variables and the associated data.
if variable_names:
returned_variables = sess.run(variable_names)
else:
returned_variables = []
variables_data_map = dict(zip(variable_dict_names, returned_variables))
logging.info("Froze %d variables.", len(returned_variables))
# Reconstruct the graph with constants in place of variables.
output_graph_def = graph_pb2.GraphDef()
how_many_converted = 0
for input_node in inference_graph.node:
output_node = node_def_pb2.NodeDef()
if input_node.name in variables_data_map:
data = variables_data_map[input_node.name]
output_node = create_const_op(input_node.name, input_node.attr["dtype"],
data, data.shape)
how_many_converted += 1
elif input_node.name in resource_identity_types:
# Converts the Identities of type RESOURCE_DT to the appropriate type
# based on the input they are referencing.
output_node.CopyFrom(input_node)
output_node.attr["T"].CopyFrom(resource_identity_types[input_node.name])
elif input_node.op == "ReadVariableOp":
# The first branch converts all VarHandleOps of ResourceVariables to
# constants, so we need to convert the associated ReadVariableOps to
# Identity ops.
output_node.op = "Identity"
output_node.name = input_node.name
output_node.input.extend([input_node.input[0]])
output_node.attr["T"].CopyFrom(input_node.attr["dtype"])
if "_class" in input_node.attr:
output_node.attr["_class"].CopyFrom(input_node.attr["_class"])
elif input_node.op == "ResourceGather":
# The first branch converts all VarHandleOps of ResourceGather to
# constants, so we need to convert the associated ResourceGather to Gather
# ops with a Const axis feeding into it.
if input_node.attr["batch_dims"].i != 0:
raise ValueError("batch_dims != 0 is not supported by freeze_graph.")
axis_data = input_node.attr["batch_dims"].i
axis_node_name = input_node.name + "/axis"
axis_dtype = input_node.attr["Tindices"]
output_axis_node = create_const_op(axis_node_name, axis_dtype, axis_data)
output_graph_def.node.extend([output_axis_node])
output_node.op = "GatherV2"
output_node.name = input_node.name
output_node.input.extend(
[input_node.input[0], input_node.input[1], axis_node_name])
output_node.attr["Tparams"].CopyFrom(input_node.attr["dtype"])
output_node.attr["Tindices"].CopyFrom(input_node.attr["Tindices"])
output_node.attr["Taxis"].CopyFrom(axis_dtype)
if "_class" in input_node.attr:
output_node.attr["_class"].CopyFrom(input_node.attr["_class"])
else:
output_node.CopyFrom(input_node)
output_graph_def.node.extend([output_node])
output_graph_def.library.CopyFrom(inference_graph.library)
logging.info("Converted %d variables to const ops.", how_many_converted)
return output_graph_def
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.remove_training_nodes`")
@tf_export(v1=["graph_util.remove_training_nodes"])
def remove_training_nodes(input_graph, protected_nodes=None):
"""Prunes out nodes that aren't needed for inference.
There are nodes like Identity and CheckNumerics that are only useful
during training, and can be removed in graphs that will be used for
nothing but inference. Here we identify and remove them, returning an
equivalent graph. To be specific, CheckNumerics nodes are always removed, and
Identity nodes that aren't involved in control edges are spliced out so that
their input and outputs are directly connected.
Args:
input_graph: Model to analyze and prune.
protected_nodes: An optional list of names of nodes to be kept
unconditionally. This is for example useful to preserve Identity output
nodes.
Returns:
A list of nodes with the unnecessary ones removed.
"""
if not protected_nodes:
protected_nodes = []
types_to_remove = {"CheckNumerics": True}
input_nodes = input_graph.node
names_to_remove = {}
for node in input_nodes:
if node.op in types_to_remove and node.name not in protected_nodes:
names_to_remove[node.name] = True
nodes_after_removal = []
for node in input_nodes:
if node.name in names_to_remove:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
input_before_removal = node.input
del new_node.input[:]
for full_input_name in input_before_removal:
input_name = re.sub(r"^\^", "", full_input_name)
if input_name in names_to_remove:
continue
new_node.input.append(full_input_name)
nodes_after_removal.append(new_node)
types_to_splice = {"Identity": True}
control_input_names = set()
node_names_with_control_input = set()
for node in nodes_after_removal:
for node_input in node.input:
if "^" in node_input:
control_input_names.add(node_input.replace("^", ""))
node_names_with_control_input.add(node.name)
names_to_splice = {}
for node in nodes_after_removal:
if node.op in types_to_splice and node.name not in protected_nodes:
# We don't want to remove nodes that have control edge inputs, because
# they might be involved in subtle dependency issues that removing them
# will jeopardize.
if node.name not in node_names_with_control_input:
names_to_splice[node.name] = node.input[0]
# We also don't want to remove nodes which are used as control edge inputs.
names_to_splice = {name: value for name, value in names_to_splice.items()
if name not in control_input_names}
nodes_after_splicing = []
for node in nodes_after_removal:
if node.name in names_to_splice:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
input_before_removal = node.input
del new_node.input[:]
for full_input_name in input_before_removal:
input_name = re.sub(r"^\^", "", full_input_name)
while input_name in names_to_splice:
full_input_name = names_to_splice[input_name]
input_name = re.sub(r"^\^", "", full_input_name)
new_node.input.append(full_input_name)
nodes_after_splicing.append(new_node)
output_graph = graph_pb2.GraphDef()
output_graph.node.extend(nodes_after_splicing)
return output_graph
|
tensorflow-master
|
tensorflow/python/framework/graph_util_impl.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that the contrib module shows up properly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
class ContribTest(test.TestCase):
def testContrib(self):
# pylint: disable=g-import-not-at-top
import tensorflow as tf
_ = tf.contrib.layers # `tf.contrib` is loaded lazily on first use.
assert tf_inspect.ismodule(tf.contrib)
def testLayers(self):
# pylint: disable=g-import-not-at-top
import tensorflow as tf
assert tf_inspect.ismodule(tf.contrib.layers)
def testLinearOptimizer(self):
# pylint: disable=g-import-not-at-top
import tensorflow as tf
assert tf_inspect.ismodule(tf.contrib.linear_optimizer)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/framework/contrib_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception types for TensorFlow errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import traceback
import warnings
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import error_interpolation
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import tf_stack
from tensorflow.python.util.tf_export import tf_export
def _compact_stack_trace(op):
"""Returns a traceback for `op` with common file prefixes stripped."""
compact_traces = []
common_prefix = error_interpolation.traceback_files_common_prefix([[op]])
for frame in op.traceback:
frame = list(frame)
filename = frame[tf_stack.TB_FILENAME]
if filename.startswith(common_prefix):
filename = filename[len(common_prefix):]
frame[tf_stack.TB_FILENAME] = filename
compact_traces.append(tuple(frame))
return compact_traces
@tf_export("errors.OpError", v1=["errors.OpError", "OpError"])
@deprecation.deprecated_endpoints("OpError")
class OpError(Exception):
"""A generic error that is raised when TensorFlow execution fails.
Whenever possible, the session will raise a more specific subclass
of `OpError` from the `tf.errors` module.
"""
def __init__(self, node_def, op, message, error_code):
"""Creates a new `OpError` indicating that a particular op failed.
Args:
node_def: The `node_def_pb2.NodeDef` proto representing the op that
failed, if known; otherwise None.
op: The `ops.Operation` that failed, if known; otherwise None.
message: The message string describing the failure.
error_code: The `error_codes_pb2.Code` describing the error.
"""
super(OpError, self).__init__()
self._node_def = node_def
self._op = op
self._message = message
self._error_code = error_code
def __reduce__(self):
# Allow the subclasses to accept less arguments in their __init__.
init_argspec = tf_inspect.getargspec(self.__class__.__init__)
args = tuple(getattr(self, arg) for arg in init_argspec.args[1:])
return self.__class__, args
@property
def message(self):
"""The error message that describes the error."""
return self._message
@property
def op(self):
"""The operation that failed, if known.
*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`
or `Recv` op, there will be no corresponding
`tf.Operation`
object. In that case, this will return `None`, and you should
instead use the `tf.errors.OpError.node_def` to
discover information about the op.
Returns:
The `Operation` that failed, or None.
"""
return self._op
@property
def error_code(self):
"""The integer error code that describes the error."""
return self._error_code
@property
def node_def(self):
"""The `NodeDef` proto representing the op that failed."""
return self._node_def
def __str__(self):
if self._op is not None:
output = ["%s\n\nOriginal stack trace for %r:\n" % (self.message,
self._op.name,)]
curr_traceback_list = traceback.format_list(
_compact_stack_trace(self._op))
output.extend(curr_traceback_list)
# pylint: disable=protected-access
original_op = self._op._original_op
# pylint: enable=protected-access
while original_op is not None:
output.append(
"\n...which was originally created as op %r, defined at:\n"
% (original_op.name,))
prev_traceback_list = curr_traceback_list
curr_traceback_list = traceback.format_list(
_compact_stack_trace(original_op))
# Attempt to elide large common subsequences of the subsequent
# stack traces.
#
# TODO(mrry): Consider computing the actual longest common subsequence.
is_eliding = False
elide_count = 0
last_elided_line = None
for line, line_in_prev in zip(curr_traceback_list, prev_traceback_list):
if line == line_in_prev:
if is_eliding:
elide_count += 1
last_elided_line = line
else:
output.append(line)
is_eliding = True
elide_count = 0
else:
if is_eliding:
if elide_count > 0:
output.extend(
["[elided %d identical lines from previous traceback]\n"
% (elide_count - 1,), last_elided_line])
is_eliding = False
output.extend(line)
# pylint: disable=protected-access
original_op = original_op._original_op
# pylint: enable=protected-access
return "".join(output)
else:
return self.message
OK = error_codes_pb2.OK
tf_export("errors.OK").export_constant(__name__, "OK")
CANCELLED = error_codes_pb2.CANCELLED
tf_export("errors.CANCELLED").export_constant(__name__, "CANCELLED")
UNKNOWN = error_codes_pb2.UNKNOWN
tf_export("errors.UNKNOWN").export_constant(__name__, "UNKNOWN")
INVALID_ARGUMENT = error_codes_pb2.INVALID_ARGUMENT
tf_export("errors.INVALID_ARGUMENT").export_constant(__name__,
"INVALID_ARGUMENT")
DEADLINE_EXCEEDED = error_codes_pb2.DEADLINE_EXCEEDED
tf_export("errors.DEADLINE_EXCEEDED").export_constant(__name__,
"DEADLINE_EXCEEDED")
NOT_FOUND = error_codes_pb2.NOT_FOUND
tf_export("errors.NOT_FOUND").export_constant(__name__, "NOT_FOUND")
ALREADY_EXISTS = error_codes_pb2.ALREADY_EXISTS
tf_export("errors.ALREADY_EXISTS").export_constant(__name__, "ALREADY_EXISTS")
PERMISSION_DENIED = error_codes_pb2.PERMISSION_DENIED
tf_export("errors.PERMISSION_DENIED").export_constant(__name__,
"PERMISSION_DENIED")
UNAUTHENTICATED = error_codes_pb2.UNAUTHENTICATED
tf_export("errors.UNAUTHENTICATED").export_constant(__name__, "UNAUTHENTICATED")
RESOURCE_EXHAUSTED = error_codes_pb2.RESOURCE_EXHAUSTED
tf_export("errors.RESOURCE_EXHAUSTED").export_constant(__name__,
"RESOURCE_EXHAUSTED")
FAILED_PRECONDITION = error_codes_pb2.FAILED_PRECONDITION
tf_export("errors.FAILED_PRECONDITION").export_constant(__name__,
"FAILED_PRECONDITION")
ABORTED = error_codes_pb2.ABORTED
tf_export("errors.ABORTED").export_constant(__name__, "ABORTED")
OUT_OF_RANGE = error_codes_pb2.OUT_OF_RANGE
tf_export("errors.OUT_OF_RANGE").export_constant(__name__, "OUT_OF_RANGE")
UNIMPLEMENTED = error_codes_pb2.UNIMPLEMENTED
tf_export("errors.UNIMPLEMENTED").export_constant(__name__, "UNIMPLEMENTED")
INTERNAL = error_codes_pb2.INTERNAL
tf_export("errors.INTERNAL").export_constant(__name__, "INTERNAL")
UNAVAILABLE = error_codes_pb2.UNAVAILABLE
tf_export("errors.UNAVAILABLE").export_constant(__name__, "UNAVAILABLE")
DATA_LOSS = error_codes_pb2.DATA_LOSS
tf_export("errors.DATA_LOSS").export_constant(__name__, "DATA_LOSS")
# pylint: disable=line-too-long
@tf_export("errors.CancelledError")
class CancelledError(OpError):
"""Raised when an operation or step is cancelled.
For example, a long-running operation (e.g.
`tf.QueueBase.enqueue` may be
cancelled by running another operation (e.g.
`tf.QueueBase.close`,
or by `tf.Session.close`.
A step that is running such a long-running operation will fail by raising
`CancelledError`.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `CancelledError`."""
super(CancelledError, self).__init__(node_def, op, message, CANCELLED)
# pylint: enable=line-too-long
@tf_export("errors.UnknownError")
class UnknownError(OpError):
"""Unknown error.
An example of where this error may be returned is if a Status value
received from another address space belongs to an error-space that
is not known to this address space. Also errors raised by APIs that
do not return enough error information may be converted to this
error.
@@__init__
"""
def __init__(self, node_def, op, message, error_code=UNKNOWN):
"""Creates an `UnknownError`."""
super(UnknownError, self).__init__(node_def, op, message, error_code)
@tf_export("errors.InvalidArgumentError")
class InvalidArgumentError(OpError):
"""Raised when an operation receives an invalid argument.
This may occur, for example, if an operation is receives an input
tensor that has an invalid value or shape. For example, the
`tf.matmul` op will raise this
error if it receives an input that is not a matrix, and the
`tf.reshape` op will raise
this error if the new shape does not match the number of elements in the input
tensor.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InvalidArgumentError`."""
super(InvalidArgumentError, self).__init__(node_def, op, message,
INVALID_ARGUMENT)
@tf_export("errors.DeadlineExceededError")
class DeadlineExceededError(OpError):
"""Raised when a deadline expires before an operation could complete.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DeadlineExceededError`."""
super(DeadlineExceededError, self).__init__(node_def, op, message,
DEADLINE_EXCEEDED)
@tf_export("errors.NotFoundError")
class NotFoundError(OpError):
"""Raised when a requested entity (e.g., a file or directory) was not found.
For example, running the
`tf.WholeFileReader.read`
operation could raise `NotFoundError` if it receives the name of a file that
does not exist.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `NotFoundError`."""
super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND)
@tf_export("errors.AlreadyExistsError")
class AlreadyExistsError(OpError):
"""Raised when an entity that we attempted to create already exists.
For example, running an operation that saves a file
(e.g. `tf.train.Saver.save`)
could potentially raise this exception if an explicit filename for an
existing file was passed.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AlreadyExistsError`."""
super(AlreadyExistsError, self).__init__(node_def, op, message,
ALREADY_EXISTS)
@tf_export("errors.PermissionDeniedError")
class PermissionDeniedError(OpError):
"""Raised when the caller does not have permission to run an operation.
For example, running the
`tf.WholeFileReader.read`
operation could raise `PermissionDeniedError` if it receives the name of a
file for which the user does not have the read file permission.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `PermissionDeniedError`."""
super(PermissionDeniedError, self).__init__(node_def, op, message,
PERMISSION_DENIED)
@tf_export("errors.UnauthenticatedError")
class UnauthenticatedError(OpError):
"""The request does not have valid authentication credentials.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnauthenticatedError`."""
super(UnauthenticatedError, self).__init__(node_def, op, message,
UNAUTHENTICATED)
@tf_export("errors.ResourceExhaustedError")
class ResourceExhaustedError(OpError):
"""Some resource has been exhausted.
For example, this error might be raised if a per-user quota is
exhausted, or perhaps the entire file system is out of space.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `ResourceExhaustedError`."""
super(ResourceExhaustedError, self).__init__(node_def, op, message,
RESOURCE_EXHAUSTED)
@tf_export("errors.FailedPreconditionError")
class FailedPreconditionError(OpError):
"""Operation was rejected because the system is not in a state to execute it.
This exception is most commonly raised when running an operation
that reads a `tf.Variable`
before it has been initialized.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `FailedPreconditionError`."""
super(FailedPreconditionError, self).__init__(node_def, op, message,
FAILED_PRECONDITION)
@tf_export("errors.AbortedError")
class AbortedError(OpError):
"""The operation was aborted, typically due to a concurrent action.
For example, running a
`tf.QueueBase.enqueue`
operation may raise `AbortedError` if a
`tf.QueueBase.close` operation
previously ran.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AbortedError`."""
super(AbortedError, self).__init__(node_def, op, message, ABORTED)
@tf_export("errors.OutOfRangeError")
class OutOfRangeError(OpError):
"""Raised when an operation iterates past the valid input range.
This exception is raised in "end-of-file" conditions, such as when a
`tf.QueueBase.dequeue`
operation is blocked on an empty queue, and a
`tf.QueueBase.close`
operation executes.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `OutOfRangeError`."""
super(OutOfRangeError, self).__init__(node_def, op, message,
OUT_OF_RANGE)
@tf_export("errors.UnimplementedError")
class UnimplementedError(OpError):
"""Raised when an operation has not been implemented.
Some operations may raise this error when passed otherwise-valid
arguments that it does not currently support. For example, running
the `tf.nn.max_pool2d` operation
would raise this error if pooling was requested on the batch dimension,
because this is not yet supported.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnimplementedError`."""
super(UnimplementedError, self).__init__(node_def, op, message,
UNIMPLEMENTED)
@tf_export("errors.InternalError")
class InternalError(OpError):
"""Raised when the system experiences an internal error.
This exception is raised when some invariant expected by the runtime
has been broken. Catching this exception is not recommended.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InternalError`."""
super(InternalError, self).__init__(node_def, op, message, INTERNAL)
@tf_export("errors.UnavailableError")
class UnavailableError(OpError):
"""Raised when the runtime is currently unavailable.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnavailableError`."""
super(UnavailableError, self).__init__(node_def, op, message,
UNAVAILABLE)
@tf_export("errors.DataLossError")
class DataLossError(OpError):
"""Raised when unrecoverable data loss or corruption is encountered.
For example, this may be raised by running a
`tf.WholeFileReader.read`
operation, if the file is truncated while it is being read.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DataLossError`."""
super(DataLossError, self).__init__(node_def, op, message, DATA_LOSS)
_CODE_TO_EXCEPTION_CLASS = {
CANCELLED: CancelledError,
UNKNOWN: UnknownError,
INVALID_ARGUMENT: InvalidArgumentError,
DEADLINE_EXCEEDED: DeadlineExceededError,
NOT_FOUND: NotFoundError,
ALREADY_EXISTS: AlreadyExistsError,
PERMISSION_DENIED: PermissionDeniedError,
UNAUTHENTICATED: UnauthenticatedError,
RESOURCE_EXHAUSTED: ResourceExhaustedError,
FAILED_PRECONDITION: FailedPreconditionError,
ABORTED: AbortedError,
OUT_OF_RANGE: OutOfRangeError,
UNIMPLEMENTED: UnimplementedError,
INTERNAL: InternalError,
UNAVAILABLE: UnavailableError,
DATA_LOSS: DataLossError,
}
c_api.PyExceptionRegistry_Init(_CODE_TO_EXCEPTION_CLASS)
_EXCEPTION_CLASS_TO_CODE = {
class_: code for code, class_ in _CODE_TO_EXCEPTION_CLASS.items()}
@tf_export(v1=["errors.exception_type_from_error_code"])
def exception_type_from_error_code(error_code):
return _CODE_TO_EXCEPTION_CLASS[error_code]
@tf_export(v1=["errors.error_code_from_exception_type"])
def error_code_from_exception_type(cls):
try:
return _EXCEPTION_CLASS_TO_CODE[cls]
except KeyError:
warnings.warn("Unknown class exception")
return UnknownError(None, None, "Unknown class exception", None)
def _make_specific_exception(node_def, op, message, error_code):
try:
exc_type = exception_type_from_error_code(error_code)
return exc_type(node_def, op, message)
except KeyError:
warnings.warn("Unknown error code: %d" % error_code)
return UnknownError(node_def, op, message, error_code)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
# TODO(b/77295559): expand use of TF_Status* SWIG typemap and deprecate this.
@tf_export(v1=["errors.raise_exception_on_not_ok_status"]) # pylint: disable=invalid-name
class raise_exception_on_not_ok_status(object):
"""Context manager to check for C API status."""
def __enter__(self):
self.status = c_api_util.ScopedTFStatus()
return self.status.status
def __exit__(self, type_arg, value_arg, traceback_arg):
try:
if c_api.TF_GetCode(self.status.status) != 0:
raise _make_specific_exception(
None, None,
compat.as_text(c_api.TF_Message(self.status.status)),
c_api.TF_GetCode(self.status.status))
# Delete the underlying status object from memory otherwise it stays alive
# as there is a reference to status from this from the traceback due to
# raise.
finally:
del self.status
return False # False values do not suppress exceptions
|
tensorflow-master
|
tensorflow/python/framework/errors_impl.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global registry for OpDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import op_def_pb2
_registered_ops = {}
def register_op_list(op_list):
"""Register all the ops in an op_def_pb2.OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
if op_def.name in _registered_ops:
if _registered_ops[op_def.name] != op_def:
raise ValueError(
"Registered op_def for %s (%s) not equal to op_def to register (%s)"
% (op_def.name, _registered_ops[op_def.name], op_def))
else:
_registered_ops[op_def.name] = op_def
def get_registered_ops():
"""Returns a dictionary mapping names to OpDefs."""
return _registered_ops
|
tensorflow-master
|
tensorflow/python/framework/op_def_registry.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import registry
from tensorflow.python.platform import test
def bar():
pass
class RegistryTest(test.TestCase, parameterized.TestCase):
class Foo(object):
pass
# Test the registry basics on both classes (Foo) and functions (bar).
@parameterized.parameters([Foo, bar])
def testRegistryBasics(self, candidate):
myreg = registry.Registry('testRegistry')
with self.assertRaises(LookupError):
myreg.lookup('testKey')
myreg.register(candidate)
self.assertEqual(myreg.lookup(candidate.__name__), candidate)
myreg.register(candidate, 'testKey')
self.assertEqual(myreg.lookup('testKey'), candidate)
self.assertEqual(
sorted(myreg.list()), sorted(['testKey', candidate.__name__]))
def testDuplicate(self):
myreg = registry.Registry('testbar')
myreg.register(bar, 'Bar')
with self.assertRaisesRegexp(
KeyError, r'Registering two testbar with name \'Bar\'! '
r'\(Previous registration was in [^ ]+ .*.py:[0-9]+\)'):
myreg.register(bar, 'Bar')
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/framework/registry_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Type specifications for TensorFlow APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
# Use LazyLoader to avoid circular dependencies.
tensor_spec = LazyLoader(
"tensor_spec", globals(),
"tensorflow.python.framework.tensor_spec")
ops = LazyLoader(
"ops", globals(),
"tensorflow.python.framework.ops")
# TODO(b/133606651) Deprecate the tf.data.experimental.Structure endpoint.
@tf_export("TypeSpec", "data.experimental.Structure")
@six.add_metaclass(abc.ABCMeta)
class TypeSpec(object):
"""Specifies a TensorFlow value type.
A `tf.TypeSpec` provides metadata describing an object accepted or returned
by TensorFlow APIs. Concrete subclasses, such as `tf.TensorSpec` and
`tf.RaggedTensorSpec`, are used to describe different value types.
"""
# === Subclassing ===
#
# Each `TypeSpec` subclass must define:
#
# * A "component encoding" for values.
# * A "serialization" for types.
#
# The component encoding for a value is a nested structure of `tf.Tensor`
# or `CompositeTensor` that can be used by the `TypeSpec` to reconstruct
# the value. Each individual `TypeSpec` must use the same nested structure
# for all values -- this structure is defined by the `component_specs`
# attribute. Decomposing values into components, and reconstructing them
# from those components, should be inexpensive. In particular, it should
# *not* require any TensorFlow ops.
#
# The serialization for a `TypeSpec` is a nested tuple of values that can
# be used to reconstruct the `TypeSpec`. See the documentation for
# `_serialize()` for more information.
__slots__ = []
@abc.abstractproperty
def value_type(self):
"""The Python type for values that are compatible with this TypeSpec."""
raise NotImplementedError("%s.value_type" % type(self).__name__)
def is_compatible_with(self, spec_or_value):
"""Returns true if `spec_or_value` is compatible with this TypeSpec."""
# === Subclassing ===
# If not overridden by subclasses, the default behavior is to convert
# `spec_or_value` to a `TypeSpec` (if it isn't already); and then to
# consider two `TypeSpec`s compatible if they have the same type, and
# the values returned by `_serialize` are compatible (where
# `tf.TensorShape`, `tf.TensorSpec`, and `tf.DType` are checked for
# compatibility using their `is_compatible_with` method; and all other
# types are considered compatible if they are equal).
if not isinstance(spec_or_value, TypeSpec):
spec_or_value = type_spec_from_value(spec_or_value)
if type(self) is not type(spec_or_value):
return False
return self.__is_compatible(self._serialize(),
spec_or_value._serialize()) # pylint: disable=protected-access
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# === Subclassing ===
# If not overridden by a subclass, the default behavior is to raise a
# `ValueError` if `self` and `other` have different types, or if their type
# serializations differ by anything other than `TensorShape`s. Otherwise,
# the two type serializations are combined (using
# `most_specific_compatible_shape` to combine `TensorShape`s), and the
# result is used to construct and return a new `TypeSpec`.
if type(self) is not type(other):
raise ValueError("No TypeSpec is compatible with both %s and %s" %
(self, other))
merged = self.__most_specific_compatible_type_serialization(
self._serialize(), other._serialize()) # pylint: disable=protected-access
return self._deserialize(merged)
# === Component encoding for values ===
@abc.abstractmethod
def _to_components(self, value):
"""Encodes `value` as a nested structure of `Tensor` or `CompositeTensor`.
Args:
value: A value compatible with this `TypeSpec`. (Caller is responsible
for ensuring compatibility.)
Returns:
A nested structure of `tf.Tensor` or `tf.CompositeTensor` compatible with
`self._component_specs`, which can be used to reconstruct `value`.
"""
# === Subclassing ===
# This method must be inexpensive (do not call TF ops).
raise NotImplementedError("%s._to_components()" % type(self).__name__)
@abc.abstractmethod
def _from_components(self, components):
"""Reconstructs a value from a nested structure of Tensor/CompositeTensor.
Args:
components: A nested structure of `tf.Tensor` or `tf.CompositeTensor`,
compatible with `self._component_specs`. (Caller is repsonsible for
ensuring compatibility.)
Returns:
A value that is compatible with this `TypeSpec`.
"""
# === Subclassing ===
# This method must be inexpensive (do not call TF ops).
raise NotImplementedError("%s._from_components()" % type(self).__name__)
@abc.abstractproperty
def _component_specs(self):
"""A nested structure of TypeSpecs for this type's components.
Returns:
A nested structure describing the component encodings that are returned
by this TypeSpec's `_to_components` method. In particular, for a
TypeSpec `spec` and a compatible value `value`:
```
nest.map_structure(lambda t, c: assert t.is_compatible_with(c),
spec._component_specs, spec._to_components(value))
```
"""
raise NotImplementedError("%s._component_specs()" % type(self).__name__)
# === Tensor list encoding for values ===
def _to_tensor_list(self, value):
"""Encodes `value` as a flat list of `tf.Tensor`.
By default, this just flattens `self._to_components(value)` using
`nest.flatten`. However, subclasses may override this to return a
different tensor encoding for values. In particular, some subclasses
of `BatchableTypeSpec` override this method to return a "boxed" encoding
for values, which then can be batched or unbatched. See
`BatchableTypeSpec` for more details.
Args:
value: A value with compatible this `TypeSpec`. (Caller is responsible
for ensuring compatibility.)
Returns:
A list of `tf.Tensor`, compatible with `self._flat_tensor_specs`, which
can be used to reconstruct `value`.
"""
return nest.flatten(self._to_components(value), expand_composites=True)
def _from_tensor_list(self, tensor_list):
"""Reconstructs a value from a flat list of `tf.Tensor`.
Args:
tensor_list: A flat list of `tf.Tensor`, compatible with
`self._flat_tensor_specs`.
Returns:
A value that is compatible with this `TypeSpec`.
Raises:
ValueError: If `tensor_list` is not compatible with
`self._flat_tensor_specs`.
"""
self.__check_tensor_list(tensor_list)
return self._from_compatible_tensor_list(tensor_list)
def _from_compatible_tensor_list(self, tensor_list):
"""Reconstructs a value from a compatible flat list of `tf.Tensor`.
Args:
tensor_list: A flat list of `tf.Tensor`, compatible with
`self._flat_tensor_specs`. (Caller is responsible for ensuring
compatibility.)
Returns:
A value that is compatible with this `TypeSpec`.
"""
return self._from_components(nest.pack_sequence_as(
self._component_specs, tensor_list, expand_composites=True))
@property
def _flat_tensor_specs(self):
"""A list of TensorSpecs compatible with self._to_tensor_list(v)."""
return nest.flatten(self._component_specs, expand_composites=True)
# TODO(b/133606651) Remove this attribute once code in tf.data has been
# refactored to use _flat_tensor_specs instead.
@property
def _flat_shapes(self):
"""The `tf.TensorShape`s for the tensor list encoding."""
return [spec.shape for spec in self._flat_tensor_specs]
# TODO(b/133606651) Remove this attribute once code in tf.data has been
# refactored to use _flat_tensor_specs instead.
@property
def _flat_types(self):
"""The `tf.DType`s for the tensor list encoding."""
return [spec.dtype for spec in self._flat_tensor_specs]
# === Serialization for types ===
@abc.abstractmethod
def _serialize(self):
"""Returns a nested tuple containing the state of this TypeSpec.
The serialization may contain the following value types: boolean,
integer, string, float, None, `TensorSpec`, `tf.TensorShape`, `tf.DType`,
`np.ndarray`, `TypeSpec`, and nested tuples of any of the above.
This method is used to provide default definitions for: equality
testing (__eq__, __ne__), hashing (__hash__), pickling (__reduce__),
string representation (__repr__), `self.is_compatible_with()`,
`self.most_specific_compatible_type()`, and protobuf serialization
(e.g. TensorInfo and StructuredValue).
"""
raise NotImplementedError("%s._serialize()" % type(self).__name__)
@classmethod
def _deserialize(cls, serialization):
"""Reconstructs a TypeSpec from a value returned by `serialize`."""
return cls(*serialization)
# === Operators ===
def __eq__(self, other):
# pylint: disable=protected-access
return self.__get_cmp_key() == other.__get_cmp_key()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.__get_cmp_key())
def __reduce__(self):
return type(self), self._serialize()
def __repr__(self):
return "%s%r" % (type(self).__name__, self._serialize())
# === Legacy Output ===
# TODO(b/133606651) Document and/or deprecate the legacy_output methods.
# (These are used by tf.data.)
def _to_legacy_output_types(self):
raise NotImplementedError("%s._to_legacy_output_types()" %
type(self).__name__)
def _to_legacy_output_shapes(self):
raise NotImplementedError("%s._to_legacy_output_shapes()" %
type(self).__name__)
def _to_legacy_output_classes(self):
return self.value_type
# === Private Helper Methods ===
def __check_tensor_list(self, tensor_list):
expected = self._flat_tensor_specs
specs = [type_spec_from_value(t) for t in tensor_list]
if len(specs) != len(expected):
raise ValueError("Incompatible input: wrong number of tensors")
for i, (s1, s2) in enumerate(zip(specs, expected)):
if not s1.is_compatible_with(s2):
raise ValueError("Incompatible input: tensor %d (%s) is incompatible "
"with %s" % (i, tensor_list[i], s2))
def __get_cmp_key(self):
"""Returns a hashable eq-comparable key for `self`."""
# TODO(b/133606651): Decide whether to cache this value.
return (type(self), self.__make_cmp_key(self._serialize()))
def __make_cmp_key(self, value):
"""Converts `value` to a hashable key."""
if isinstance(value, (int, float, bool, dtypes.DType, TypeSpec)):
return value
elif isinstance(value, compat.bytes_or_text_types):
return value
elif value is None:
return value
elif isinstance(value, tuple):
return tuple([self.__make_cmp_key(v) for v in value])
elif isinstance(value, tensor_shape.TensorShape):
if value.ndims is None:
# Note: we include a type object in the tuple, to ensure we can't get
# false-positive matches (since users can't include type objects).
return (tensor_shape.TensorShape, None)
else:
return (tensor_shape.TensorShape, tuple(value.as_list()))
elif isinstance(value, np.ndarray):
return (np.ndarray, value.shape,
TypeSpec.__nested_list_to_tuple(value.tolist()))
else:
raise ValueError("Unsupported value type %s returned by "
"%s._serialize" %
(type(value).__name__, type(self).__name__))
@staticmethod
def __nested_list_to_tuple(value):
"""Converts a nested list to a corresponding nested tuple."""
if isinstance(value, list):
return tuple(TypeSpec.__nested_list_to_tuple(v) for v in value)
else:
return value
@staticmethod
def __is_compatible(a, b):
"""Returns true if the given type serializations compatible."""
if type(a) is not type(b):
return False
if isinstance(a, tuple):
return (len(a) == len(b) and
all(TypeSpec.__is_compatible(x, y) for (x, y) in zip(a, b)))
elif isinstance(a, (TypeSpec, tensor_shape.TensorShape, dtypes.DType)):
return a.is_compatible_with(b)
else:
return a == b
@staticmethod
def __most_specific_compatible_type_serialization(a, b):
"""Helper for most_specific_compatible_type.
Combines two type serializations as follows:
* If they are both tuples of the same length, then recursively combine
the respective tuple elements.
* If they are both TypeSpecs, then combine using
TypeSpec.most_specific_comptible_type.
* If they are both TensorShapes, then combine using
TensorShape.most_specific_compatible_shape.
* If they are both TensorSpecs with the same dtype, then combine using
TensorShape.most_specific_compatible_shape to combine shapes.
* If they are equal, then return a.
* If none of the above, then raise a ValueError.
Args:
a: A serialized TypeSpec or nested component from a serialized TypeSpec.
b: A serialized TypeSpec or nested component from a serialized TypeSpec.
Returns:
A value with the same type and structure as `a` and `b`.
Raises:
ValueError: If `a` and `b` are incompatible.
"""
if type(a) is not type(b):
raise ValueError("Types are not compatible: %r vs %r" % (a, b))
if isinstance(a, tuple):
if len(a) != len(b):
raise ValueError("Types are not compatible: %r vs %r" % (a, b))
return tuple(TypeSpec.__most_specific_compatible_type_serialization(x, y)
for (x, y) in zip(a, b))
elif isinstance(a, tensor_shape.TensorShape):
return a.most_specific_compatible_shape(b)
elif isinstance(a, list):
raise AssertionError("_serialize() should not return list values.")
elif isinstance(a, TypeSpec):
return a.most_specific_compatible_type(b)
else:
if a != b:
raise ValueError("Types are not compatible: %r vs %r" % (a, b))
return a
class BatchableTypeSpec(TypeSpec):
"""TypeSpec with a batchable tensor encoding.
The batchable tensor encoding is a list of `tf.Tensor`s that supports
batching and unbatching. In particular, stacking (or unstacking)
values with the same `TypeSpec` must be equivalent to stacking (or
unstacking) each of their tensor lists. Unlike the component encoding
(returned by `self._to_components)`, the batchable tensor encoding
may require using encoding/decoding ops.
If a subclass's batchable tensor encoding is not simply a flattened version
of the component encoding, then the subclass must override `_to_tensor_list`,
`_from_tensor_list`, and _flat_tensor_specs`.
"""
__slots__ = []
@abc.abstractmethod
def _batch(self, batch_size):
"""Returns a TypeSpec representing a batch of objects with this TypeSpec.
Args:
batch_size: An `int` representing the number of elements in a batch,
or `None` if the batch size may vary.
Returns:
A `TypeSpec` representing a batch of objects with this TypeSpec.
"""
raise NotImplementedError("%s._batch" % type(self).__name__)
@abc.abstractmethod
def _unbatch(self):
"""Returns a TypeSpec representing a single element this TypeSpec.
Returns:
A `TypeSpec` representing a single element of objects with this TypeSpec.
"""
raise NotImplementedError("%s._unbatch" % type(self).__name__)
def _to_batched_tensor_list(self, value):
"""Returns a tensor list encoding for value with rank>0."""
tensor_list = self._to_tensor_list(value)
if any(t.shape.ndims == 0 for t in tensor_list):
raise ValueError("Value %s has insufficient rank for batching." % value)
return tensor_list
def type_spec_from_value(value):
"""Returns a `TypeSpec` that represents the given `value`.
Args:
value: A value that can be accepted or returned by TensorFlow APIs.
Returns:
A `TypeSpec` that is compatible with `value`.
Raises:
TypeError: If a TypeSpec cannot be built for `value`, because its type
is not supported.
"""
spec = _type_spec_from_value(value)
if spec is not None:
return spec
# Fallback: try converting value to a tensor.
try:
tensor = ops.convert_to_tensor(value)
spec = _type_spec_from_value(tensor)
if spec is not None:
return spec
except (ValueError, TypeError):
pass
raise TypeError("Could not build a TypeSpec for %r with type %s" %
(value, type(value).__name__))
def _type_spec_from_value(value):
"""Returns a `TypeSpec` that represents the given `value`."""
if isinstance(value, ops.Tensor):
# Note: we do not include Tensor names when constructing TypeSpecs.
return tensor_spec.TensorSpec(value.shape, value.dtype)
if isinstance(value, composite_tensor.CompositeTensor):
return value._type_spec # pylint: disable=protected-access
# If `value` is a list and all of its elements can be represented by the same
# batchable type spec, then we can represent the entire list using a single
# type spec that captures the type accurately (unlike the `convert_to_tensor`
# fallback).
if isinstance(value, list) and value:
subspecs = [_type_spec_from_value(v) for v in value]
if isinstance(subspecs[0], BatchableTypeSpec):
merged_subspec = subspecs[0]
try:
for subspec in subspecs[1:]:
merged_subspec = merged_subspec.most_specific_compatible_type(subspec)
return merged_subspec._batch(len(subspecs)) # pylint: disable=protected-access
except (ValueError, TypeError):
pass # incompatible subspecs
for entry in reversed(_TYPE_CONVERSION_FUNCTION_REGISTRY):
type_object, converter_fn, allow_subclass = entry
if ((type(value) is type_object) or # pylint: disable=unidiomatic-typecheck
(allow_subclass and isinstance(value, type_object))):
return converter_fn(value)
_TYPE_CONVERSION_FUNCTION_REGISTRY = []
def register_type_spec_from_value_converter(type_object, converter_fn,
allow_subclass=False):
"""Registers a function for converting values with a given type to TypeSpecs.
If multiple registered `type_object`s match a value, then the most recent
registration takes precedence. Custom converters should not be defined for
`CompositeTensor`s; use `CompositeTensor._type_spec` instead.
Args:
type_object: A Python `type` object representing the type of values
accepted by `converter_fn`.
converter_fn: A function that takes one argument (an instance of the
type represented by `type_object`) and returns a `TypeSpec`.
allow_subclass: If true, then use `isinstance(value, type_object)` to
check for matches. If false, then use `type(value) is type_object`.
"""
_, type_object = tf_decorator.unwrap(type_object)
_TYPE_CONVERSION_FUNCTION_REGISTRY.append(
(type_object, converter_fn, allow_subclass))
pywrap_tensorflow.RegisterType("TypeSpec", TypeSpec)
|
tensorflow-master
|
tensorflow/python/framework/type_spec.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests enabling eager execution at process level."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.platform import googletest
class OpsEnableAndDisableEagerTest(googletest.TestCase):
def setUp(self):
# test for enable eager test
ops.enable_eager_execution()
self.assertTrue(context.executing_eagerly())
# Calling enable eager execution a second time should not cause an error.
ops.enable_eager_execution()
self.assertTrue(context.executing_eagerly())
def tearDown(self):
# test for disable eager test
ops.disable_eager_execution()
self.assertFalse(context.executing_eagerly())
# Calling disable eager execution a second time should not cause an error.
ops.disable_eager_execution()
self.assertFalse(context.executing_eagerly())
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/ops_enable_eager_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Protobuf related tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class ProtoTest(test.TestCase):
# TODO(vrv): re-enable this test once we figure out how this can
# pass the pip install test (where the user is expected to have
# protobuf installed).
def _testLargeProto(self):
# create a constant of size > 64MB.
a = constant_op.constant(np.zeros([1024, 1024, 17]))
# Serialize the resulting graph def.
gdef = a.op.graph.as_graph_def()
serialized = gdef.SerializeToString()
unserialized = ops.Graph().as_graph_def()
# Deserialize back. Protobuf python library should support
# protos larger than 64MB.
unserialized.ParseFromString(serialized)
self.assertProtoEquals(unserialized, gdef)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/framework/proto_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to represent a device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.tf_export import tf_export
# ==============================================================================
# == Global Implementation Details =============================================
# ==============================================================================
_STRING_TO_COMPONENTS_CACHE = {}
_COMPONENTS_TO_STRING_CACHE = {}
def _as_str_or_none(inp):
return None if inp is None else str(inp)
def _as_int_or_none(inp):
return None if inp is None else int(inp)
def _as_device_str_or_none(device_type):
# For backwards compatibility only, we support lowercase variants of
# cpu and gpu but turn them into uppercase here.
if device_type in ("cpu", "gpu"):
return device_type.upper()
return _as_str_or_none(device_type)
@tf_export("DeviceSpec", v1=[])
class DeviceSpecV2(object):
"""Represents a (possibly partial) specification for a TensorFlow device.
`DeviceSpec`s are used throughout TensorFlow to describe where state is stored
and computations occur. Using `DeviceSpec` allows you to parse device spec
strings to verify their validity, merge them or compose them programmatically.
Example:
```python
# Place the operations on device "GPU:0" in the "ps" job.
device_spec = DeviceSpec(job="ps", device_type="GPU", device_index=0)
with tf.device(device_spec):
# Both my_var and squared_var will be placed on /job:ps/device:GPU:0.
my_var = tf.Variable(..., name="my_variable")
squared_var = tf.square(my_var)
```
If a `DeviceSpec` is partially specified, it will be merged with other
`DeviceSpec`s according to the scope in which it is defined. `DeviceSpec`
components defined in inner scopes take precedence over those defined in
outer scopes.
```python
with tf.device(DeviceSpec(job="train", )):
with tf.device(DeviceSpec(job="ps", device_type="GPU", device_index=0):
# Nodes created here will be assigned to /job:ps/device:GPU:0.
with tf.device(DeviceSpec(device_type="GPU", device_index=1):
# Nodes created here will be assigned to /job:train/device:GPU:1.
```
A `DeviceSpec` consists of 5 components -- each of
which is optionally specified:
* Job: The job name.
* Replica: The replica index.
* Task: The task index.
* Device type: The device type string (e.g. "CPU" or "GPU").
* Device index: The device index.
"""
__slots__ = ("_job", "_replica", "_task", "_device_type", "_device_index",
"_as_string", "_hash")
def __init__(self, job=None, replica=None, task=None, device_type=None,
device_index=None):
"""Create a new `DeviceSpec` object.
Args:
job: string. Optional job name.
replica: int. Optional replica index.
task: int. Optional task index.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left
unspecified, device represents 'any' device_index.
"""
self._job = _as_str_or_none(job)
self._replica = _as_int_or_none(replica)
self._task = _as_int_or_none(task)
self._device_type = _as_device_str_or_none(device_type)
self._device_index = _as_int_or_none(device_index)
self._as_string = self._components_to_string(
job=self._job, replica=self._replica, task=self._task,
device_type=self._device_type, device_index=self._device_index)
self._hash = hash(self.to_string())
def to_string(self):
"""Return a string representation of this `DeviceSpec`.
Returns:
a string of the form
/job:<name>/replica:<id>/task:<id>/device:<device_type>:<id>.
"""
return self._as_string
@classmethod
def from_string(cls, spec):
"""Construct a `DeviceSpec` from a string.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
A DeviceSpec.
"""
return cls(*cls._string_to_components(spec))
def parse_from_string(self, spec):
"""Parse a `DeviceSpec` name into its components.
2.x behavior change:
In TensorFlow 1.x, this function mutates its own state and returns itself.
In 2.x, DeviceSpecs are immutable, and this function will return a
DeviceSpec which contains the spec.
Recommended:
```
# my_spec and my_updated_spec are unrelated.
my_spec = tf.DeviceSpec.from_string("/CPU:0")
my_updated_spec = tf.DeviceSpec.from_string("/GPU:0")
with tf.device(my_updated_spec):
...
```
Will work in 1.x and 2.x (though deprecated in 2.x):
```
my_spec = tf.DeviceSpec.from_string("/CPU:0")
my_updated_spec = my_spec.parse_from_string("/GPU:0")
with tf.device(my_updated_spec):
...
```
Will NOT work in 2.x:
```
my_spec = tf.DeviceSpec.from_string("/CPU:0")
my_spec.parse_from_string("/GPU:0") # <== Will not update my_spec
with tf.device(my_spec):
...
```
In general, `DeviceSpec.from_string` should completely replace
`DeviceSpec.parse_from_string`, and `DeviceSpec.replace` should
completely replace setting attributes directly.
Args:
spec: an optional string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
The `DeviceSpec`.
Raises:
ValueError: if the spec was not valid.
"""
return self.from_string(spec)
def make_merged_spec(self, dev):
"""Returns a new DeviceSpec which incorporates `dev`.
When combining specs, `dev` will take precidence over the current spec.
So for instance:
```
first_spec = tf.DeviceSpec(job=0, device_type="CPU")
second_spec = tf.DeviceSpec(device_type="GPU")
combined_spec = first_spec.make_merged_spec(second_spec)
```
is equivalent to:
```
combined_spec = tf.DeviceSpec(job=0, device_type="GPU")
```
Args:
dev: a `DeviceSpec`
Returns:
A new `DeviceSpec` which combines `self` and `dev`
"""
return self.__class__(*self._get_combined_properties(dev))
def replace(self, **kwargs):
"""Convenience method for making a new DeviceSpec by overriding fields.
For instance:
```
my_spec = DeviceSpec=(job="my_job", device="CPU")
my_updated_spec = my_spec.replace(device="GPU")
my_other_spec = my_spec.replace(device=None)
```
Args:
**kwargs: This method takes the same args as the DeviceSpec constructor
Returns:
A DeviceSpec with the fields specified in kwargs overridden.
"""
init_kwargs = dict(
job=self.job, replica=self.replica, task=self.task,
device_type=self.device_type, device_index=self.device_index)
# Explicitly provided kwargs take precidence.
init_kwargs.update(kwargs)
return self.__class__(**init_kwargs)
@property
def job(self):
return self._job
@property
def replica(self):
return self._replica
@property
def task(self):
return self._task
@property
def device_type(self):
return self._device_type
@property
def device_index(self):
return self._device_index
def _get_combined_properties(self, dev):
"""Combine the current DeviceSpec with another DeviceSpec.
The combination of DeviceSpecs is will give priority to dev.
Args:
dev: a `DeviceSpec`
Returns:
A tuple of (job, replica, task, device_type, device_index) which
represents the combination of self and dev.
"""
return (
dev.job if dev.job is not None else self.job,
dev.replica if dev.replica is not None else self.replica,
dev.task if dev.task is not None else self.task,
dev.device_type if dev.device_type is not None else self.device_type,
dev.device_index if dev.device_index is not None else self.device_index,
)
@staticmethod
def _string_to_components(spec=None):
"""Stateless portion of device spec string parsing.
Args:
spec: An optional string specifying a device specification.
Returns:
The parsed components of `spec`. Note that the result of this function
must go through attribute setters of DeviceSpec, and should therefore NOT
be used directly.
"""
cached_result = _STRING_TO_COMPONENTS_CACHE.get(spec)
if cached_result is not None:
return cached_result
raw_spec = spec # keep a copy of the original to update the cache
job, replica, task, device_type, device_index = None, None, None, None, None
spec = spec or ""
splits = [x.split(":") for x in spec.split("/")]
for y in splits:
ly = len(y)
if y:
# NOTE(taylorrobie): these will go through setters later.
if ly == 2 and y[0] == "job":
job = y[1]
elif ly == 2 and y[0] == "replica":
replica = y[1]
elif ly == 2 and y[0] == "task":
task = y[1]
elif ((ly == 1 or ly == 2) and
((y[0].upper() == "GPU") or (y[0].upper() == "CPU"))):
if device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
device_type = y[0].upper()
if ly == 2 and y[1] != "*":
device_index = int(y[1])
elif ly == 3 and y[0] == "device":
if device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
device_type = y[1]
if y[2] != "*":
device_index = int(y[2])
elif ly and y[0] != "": # pylint: disable=g-explicit-bool-comparison
raise ValueError("Unknown attribute: '%s' in '%s'" % (y[0], spec))
output = (job, replica, task, device_type, device_index)
_STRING_TO_COMPONENTS_CACHE[raw_spec] = output
return output
@staticmethod
def _components_to_string(job, replica, task, device_type, device_index):
"""Stateless portion of `to_string` (separated to allow caching)."""
key = (job, replica, task, device_type, device_index)
cached_result = _COMPONENTS_TO_STRING_CACHE.get(key)
if cached_result is not None:
return cached_result
output = []
if job is not None:
output.append("/job:" + job)
if replica is not None:
output.append("/replica:" + str(replica))
if task is not None:
output.append("/task:" + str(task))
if device_type is not None:
device_index_string = "*"
if device_index is not None:
# Unlike the others, device_index is stored as an int.
device_index_string = str(device_index)
output.append("/device:%s:%s" % (device_type, device_index_string))
output = "".join(output)
_COMPONENTS_TO_STRING_CACHE[key] = output
return output
def __eq__(self, other):
"""Checks if the `other` DeviceSpec is same as the current instance, eg have
same value for all the internal fields.
Args:
other: Another DeviceSpec
Returns:
Return `True` if `other` is also a DeviceSpec instance and has same value
as the current instance.
Return `False` otherwise.
"""
return (isinstance(other, self.__class__) and
self.to_string() == other.to_string())
def __hash__(self):
return self._hash
@tf_export(v1=["DeviceSpec"]) # pylint: disable=missing-docstring
class DeviceSpecV1(DeviceSpecV2):
__doc__ = DeviceSpecV2.__doc__
__slots__ = DeviceSpecV2.__slots__
@DeviceSpecV2.job.setter
def job(self, job):
self._job = _as_str_or_none(job)
self._as_string, self._hash = None, None
@DeviceSpecV2.replica.setter
def replica(self, replica):
self._replica = _as_int_or_none(replica)
self._as_string, self._hash = None, None
@DeviceSpecV2.task.setter
def task(self, task):
self._task = _as_int_or_none(task)
self._as_string, self._hash = None, None
@DeviceSpecV2.device_type.setter
def device_type(self, device_type):
self._device_type = _as_device_str_or_none(device_type)
self._as_string, self._hash = None, None
@DeviceSpecV2.device_index.setter
def device_index(self, device_index):
self._device_index = _as_int_or_none(device_index)
self._as_string, self._hash = None, None
def __hash__(self):
if self._hash is None:
self._hash = hash(self.to_string())
return self._hash
def to_string(self):
if self._as_string is None:
self._as_string = self._components_to_string(
job=self.job, replica=self.replica, task=self.task,
device_type=self.device_type, device_index=self.device_index)
return self._as_string
def parse_from_string(self, spec):
(self.job, self.replica, self.task, self.device_type, self.device_index
) = self._string_to_components(spec)
return self
def merge_from(self, dev):
"""Merge the properties of "dev" into this `DeviceSpec`.
Note: Will be removed in TensorFlow 2.x since DeviceSpecs will become
immutable.
Args:
dev: a `DeviceSpec`.
"""
(self.job, self.replica, self.task, self.device_type, self.device_index
) = self._get_combined_properties(dev)
# Use parent class docstrings for public methods.
to_string.__doc__ = DeviceSpecV2.to_string.__doc__
parse_from_string.__doc__ = DeviceSpecV2.parse_from_string.__doc__
|
tensorflow-master
|
tensorflow/python/framework/device_spec.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for c_api utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class ApiDefMapTest(test_util.TensorFlowTestCase):
def testApiDefMapOpNames(self):
api_def_map = c_api_util.ApiDefMap()
self.assertIn("Add", api_def_map.op_names())
def testApiDefMapGet(self):
api_def_map = c_api_util.ApiDefMap()
op_def = api_def_map.get_op_def("Add")
self.assertEqual(op_def.name, "Add")
api_def = api_def_map.get_api_def("Add")
self.assertEqual(api_def.graph_op_name, "Add")
def testApiDefMapPutThenGet(self):
api_def_map = c_api_util.ApiDefMap()
api_def_text = """
op {
graph_op_name: "Add"
summary: "Returns x + y element-wise."
description: <<END
*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
END
}
"""
api_def_map.put_api_def(api_def_text)
api_def = api_def_map.get_api_def("Add")
self.assertEqual(api_def.graph_op_name, "Add")
self.assertEqual(api_def.summary, "Returns x + y element-wise.")
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/c_api_util_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to manipulate a tensor graph in python.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.framework.graph_util_impl import convert_variables_to_constants
from tensorflow.python.framework.graph_util_impl import extract_sub_graph
from tensorflow.python.framework.graph_util_impl import must_run_on_cpu
from tensorflow.python.framework.graph_util_impl import remove_training_nodes
from tensorflow.python.framework.graph_util_impl import tensor_shape_from_node_def_name
# pylint: enable=unused-import
|
tensorflow-master
|
tensorflow/python/framework/graph_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for querying registered kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import kernel_def_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.util import compat
def get_all_registered_kernels():
"""Returns a KernelList proto of all registered kernels.
"""
buf = c_api.TF_GetAllRegisteredKernels()
data = c_api.TF_GetBuffer(buf)
kernel_list = kernel_def_pb2.KernelList()
kernel_list.ParseFromString(compat.as_bytes(data))
return kernel_list
def get_registered_kernels_for_op(name):
"""Returns a KernelList proto of registered kernels for a given op.
Args:
name: A string representing the name of the op whose kernels to retrieve.
"""
buf = c_api.TF_GetRegisteredKernelsForOp(name)
data = c_api.TF_GetBuffer(buf)
kernel_list = kernel_def_pb2.KernelList()
kernel_list.ParseFromString(compat.as_bytes(data))
return kernel_list
|
tensorflow-master
|
tensorflow/python/framework/kernels.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparse tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_TensorLike = tensor_like._TensorLike
_eval_using_default_session = ops._eval_using_default_session
_override_helper = ops._override_helper
# pylint: enable=protected-access
@tf_export("sparse.SparseTensor", "SparseTensor")
class SparseTensor(_TensorLike, composite_tensor.CompositeTensor):
"""Represents a sparse tensor.
TensorFlow represents a sparse tensor as three separate dense tensors:
`indices`, `values`, and `dense_shape`. In Python, the three tensors are
collected into a `SparseTensor` class for ease of use. If you have separate
`indices`, `values`, and `dense_shape` tensors, wrap them in a `SparseTensor`
object before passing to the ops below.
Concretely, the sparse tensor `SparseTensor(indices, values, dense_shape)`
comprises the following components, where `N` and `ndims` are the number
of values and number of dimensions in the `SparseTensor`, respectively:
* `indices`: A 2-D int64 tensor of dense_shape `[N, ndims]`, which specifies
the indices of the elements in the sparse tensor that contain nonzero
values (elements are zero-indexed). For example, `indices=[[1,3], [2,4]]`
specifies that the elements with indexes of [1,3] and [2,4] have
nonzero values.
* `values`: A 1-D tensor of any type and dense_shape `[N]`, which supplies the
values for each element in `indices`. For example, given
`indices=[[1,3], [2,4]]`, the parameter `values=[18, 3.6]` specifies
that element [1,3] of the sparse tensor has a value of 18, and element
[2,4] of the tensor has a value of 3.6.
* `dense_shape`: A 1-D int64 tensor of dense_shape `[ndims]`, which specifies
the dense_shape of the sparse tensor. Takes a list indicating the number of
elements in each dimension. For example, `dense_shape=[3,6]` specifies a
two-dimensional 3x6 tensor, `dense_shape=[2,3,4]` specifies a
three-dimensional 2x3x4 tensor, and `dense_shape=[9]` specifies a
one-dimensional tensor with 9 elements.
The corresponding dense tensor satisfies:
```python
dense.shape = dense_shape
dense[tuple(indices[i])] = values[i]
```
By convention, `indices` should be sorted in row-major order (or equivalently
lexicographic order on the tuples `indices[i]`). This is not enforced when
`SparseTensor` objects are constructed, but most ops assume correct ordering.
If the ordering of sparse tensor `st` is wrong, a fixed version can be
obtained by calling `tf.sparse.reorder(st)`.
Example: The sparse tensor
```python
SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
```
represents the dense tensor
```python
[[1, 0, 0, 0]
[0, 0, 2, 0]
[0, 0, 0, 0]]
```
"""
@classmethod
def from_value(cls, sparse_tensor_value):
if not is_sparse(sparse_tensor_value):
raise TypeError("Neither a SparseTensor nor SparseTensorValue: %s." %
sparse_tensor_value)
return SparseTensor(
indices=sparse_tensor_value.indices,
values=sparse_tensor_value.values,
dense_shape=sparse_tensor_value.dense_shape)
def __init__(self, indices, values, dense_shape):
"""Creates a `SparseTensor`.
Args:
indices: A 2-D int64 tensor of shape `[N, ndims]`.
values: A 1-D tensor of any type and shape `[N]`.
dense_shape: A 1-D int64 tensor of shape `[ndims]`.
"""
with ops.name_scope(None, "SparseTensor", [indices, values, dense_shape]):
indices = ops.convert_to_tensor(
indices, name="indices", dtype=dtypes.int64)
# TODO(touts): Consider adding mutable_values() when 'values'
# is a VariableOp and updating users of SparseTensor.
values = ops.internal_convert_to_tensor(values, name="values")
dense_shape = ops.convert_to_tensor(
dense_shape, name="dense_shape", dtype=dtypes.int64)
self._indices = indices
self._values = values
self._dense_shape = dense_shape
indices_shape = indices.shape.with_rank(2)
values_shape = values.shape.with_rank(1)
dense_shape_shape = dense_shape.shape.with_rank(1)
# Assert number of rows in indices match the number of elements in values.
indices_shape.dims[0].merge_with(values_shape.dims[0])
# Assert number of columns in indices matches the number of elements in
# dense_shape.
indices_shape.dims[1].merge_with(dense_shape_shape.dims[0])
def get_shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return tensor_util.constant_value_as_shape(self._dense_shape)
@property
def indices(self):
"""The indices of non-zero values in the represented dense tensor.
Returns:
A 2-D Tensor of int64 with dense_shape `[N, ndims]`, where `N` is the
number of non-zero values in the tensor, and `ndims` is the rank.
"""
return self._indices
@property
def values(self):
"""The non-zero values in the represented dense tensor.
Returns:
A 1-D Tensor of any data type.
"""
return self._values
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self._values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._values.dtype
@property
def dense_shape(self):
"""A 1-D Tensor of int64 representing the shape of the dense tensor."""
return self._dense_shape
@property
def shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return tensor_util.constant_value_as_shape(self._dense_shape)
@property
def graph(self):
"""The `Graph` that contains the index, value, and dense_shape tensors."""
return self._indices.graph
def __str__(self):
return "SparseTensor(indices=%s, values=%s, dense_shape=%s)" % (
self._indices, self._values, self._dense_shape)
def eval(self, feed_dict=None, session=None):
"""Evaluates this sparse tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `SparseTensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.
Returns:
A `SparseTensorValue` object.
"""
indices, values, dense_shape = _eval_using_default_session(
[self.indices, self.values, self.dense_shape], feed_dict, self.graph,
session)
return SparseTensorValue(indices, values, dense_shape)
@staticmethod
def _override_operator(operator, func):
_override_helper(SparseTensor, operator, func)
@property
def _type_spec(self):
return SparseTensorSpec(self.shape, self.dtype)
def _shape_invariant_to_type_spec(self, shape):
# From the tf.while_loop docs: "If a loop variable is a SparseTensor, the
# shape invariant must be TensorShape([r]) where r is the rank of the dense
# tensor represented by the sparse tensor. It means the shapes of the three
# tensors of the SparseTensor are ([None], [None, r], [r]). NOTE: The shape
# invariant here is the shape of the SparseTensor.dense_shape property. It
# must be the shape of a vector.
if shape.ndims is not None and shape.ndims != 1:
raise ValueError("Expected a shape with 1 dimension")
rank = tensor_shape.dimension_value(shape[0])
return SparseTensorSpec(tensor_shape.unknown_shape(rank), self.dtype)
def consumers(self):
return self._consumers()
SparseTensorValue = collections.namedtuple("SparseTensorValue",
["indices", "values", "dense_shape"])
tf_export(v1=["SparseTensorValue"])(SparseTensorValue)
pywrap_tensorflow.RegisterType("SparseTensorValue", SparseTensorValue)
@tf_export("SparseTensorSpec")
class SparseTensorSpec(type_spec.BatchableTypeSpec):
"""Type specification for a `tf.SparseTensor`."""
__slots__ = ["_shape", "_dtype"]
value_type = property(lambda self: SparseTensor)
def __init__(self, shape=None, dtype=dtypes.float32):
"""Constructs a type specification for a `tf.SparseTensor`.
Args:
shape: The dense shape of the `SparseTensor`, or `None` to allow
any dense shape.
dtype: `tf.DType` of values in the `SparseTensor`.
"""
self._shape = tensor_shape.as_shape(shape)
self._dtype = dtypes.as_dtype(dtype)
def _serialize(self):
return (self._shape, self._dtype)
@property
def _component_specs(self):
rank = self._shape.ndims
num_values = None
return [
tensor_spec.TensorSpec([num_values, rank], dtypes.int64),
tensor_spec.TensorSpec([num_values], self._dtype),
tensor_spec.TensorSpec([rank], dtypes.int64)]
def _to_components(self, value):
if isinstance(value, SparseTensorValue):
value = SparseTensor.from_value(value)
return [value.indices, value.values, value.dense_shape]
def _from_components(self, tensor_list):
return SparseTensor(*tensor_list)
# The SparseTensorSpec tensor_list encoding uses (de)serialize_sparse ops
# to (un)box the component tensors in a way that allows for batching &
# unbatching.
@property
def _flat_tensor_specs(self):
# NOTE(mrry): The default flat shape of a boxed `SparseTensor` is `(3,)`,
# but a `SparseTensorSpec` can also represent a batch of boxed
# `SparseTensor` objects with shape `(..., 3)` (and batches of batches,
# etc.), so the flat shape must be unknown.
return [tensor_spec.TensorSpec(None, dtypes.variant)]
def _to_tensor_list(self, value):
value = SparseTensor.from_value(value)
return [gen_sparse_ops.serialize_sparse(
value.indices, value.values, value.dense_shape,
out_type=dtypes.variant)]
def _to_batched_tensor_list(self, value):
dense_shape = tensor_util.constant_value_as_shape(value.dense_shape)
if self._shape.merge_with(dense_shape).ndims == 0:
raise ValueError(
"Unbatching a sparse tensor is only supported for rank >= 1")
return [gen_sparse_ops.serialize_many_sparse(
value.indices, value.values, value.dense_shape,
out_type=dtypes.variant)]
def _from_compatible_tensor_list(self, tensor_list):
tensor_list = gen_sparse_ops.deserialize_sparse(tensor_list[0], self._dtype)
result = SparseTensor(*tensor_list)
rank = self._shape.ndims
result.indices.set_shape([None, rank])
result.dense_shape.set_shape([rank])
return result
def _batch(self, batch_size):
return SparseTensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._shape),
self._dtype)
def _unbatch(self):
if self._shape.ndims == 0:
raise ValueError("Unbatching a tensor is only supported for rank >= 1")
return SparseTensorSpec(self._shape[1:], self._dtype)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return SparseTensor
@classmethod
def from_value(cls, value):
if isinstance(value, SparseTensor):
return cls(value.shape, value.dtype)
if isinstance(value, SparseTensorValue):
if isinstance(value.values, np.ndarray):
return cls(value.dense_shape, value.values.dtype)
else:
return cls.from_value(SparseTensor.from_value(value))
else:
raise TypeError("Expected SparseTensor or SparseTensorValue")
# TODO(b/133606651) Delete the SparseTensor registration when CompositeTensor
# is updated to define a _type_spec field (since registration will be
# automatic). Do *not* delete the SparseTensorValue registration.
type_spec.register_type_spec_from_value_converter(
SparseTensor, SparseTensorSpec.from_value)
type_spec.register_type_spec_from_value_converter(
SparseTensorValue, SparseTensorSpec.from_value)
@tf_export(v1=["convert_to_tensor_or_sparse_tensor"])
def convert_to_tensor_or_sparse_tensor(value, dtype=None, name=None):
"""Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, SparseTensorValue):
value = SparseTensor.from_value(value)
if isinstance(value, SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError("Sparse dtype: requested = %s, actual = %s" %
(dtype.name, value.dtype.name))
return value
return ops.internal_convert_to_tensor(value, dtype=dtype, name=name)
def is_sparse(x):
"""Check whether `x` is sparse.
Check whether an object is a `tf.SparseTensor` or
`tf.compat.v1.SparseTensorValue`.
Args:
x: A python object to check.
Returns:
`True` iff `x` is a `tf.SparseTensor` or `tf.compat.v1.SparseTensorValue`.
"""
return isinstance(x, (SparseTensor, SparseTensorValue))
|
tensorflow-master
|
tensorflow/python/framework/sparse_tensor.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""smart_cond and related utilties."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if not callable(true_fn):
raise TypeError("`true_fn` must be callable.")
if not callable(false_fn):
raise TypeError("`false_fn` must be callable.")
pred_value = smart_constant_value(pred)
if pred_value is not None:
if pred_value:
return true_fn()
else:
return false_fn()
else:
return control_flow_ops.cond(pred, true_fn=true_fn, false_fn=false_fn,
name=name)
def smart_constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Arguments:
pred: A scalar, either a Python bool or tensor.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError: If `pred` is not a Tensor or bool.
"""
if pred in {0, 1}: # Accept 1/0 as valid boolean values
pred_value = bool(pred)
elif isinstance(pred, bool):
pred_value = pred
elif isinstance(pred, ops.Tensor):
pred_value = tensor_util.constant_value(pred)
# TODO(skyewm): consider folding this into tensor_util.constant_value.
# pylint: disable=protected-access
if pred_value is None:
pred_value = c_api.TF_TryEvaluateConstant_wrapper(pred.graph._c_graph,
pred._as_tf_output())
# pylint: enable=protected-access
else:
raise TypeError("`pred` must be a Tensor, or a Python bool, or 1 or 0. "
"Found instead: %s" % pred)
return pred_value
def smart_case(pred_fn_pairs, default=None, exclusive=False, name="smart_case"):
"""Like tf.case, except attempts to statically evaluate predicates.
If any predicate in `pred_fn_pairs` is a bool or has a constant value, the
associated callable will be called or omitted depending on its value.
Otherwise this functions like tf.case.
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a
callable which returns a list of tensors.
default: Optional callable that returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for this operation (optional).
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
"""
return control_flow_ops._case_helper( # pylint: disable=protected-access
smart_cond, pred_fn_pairs, default, exclusive, name,
allow_python_preds=True)
|
tensorflow-master
|
tensorflow/python/framework/smart_cond.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes for tensor shape inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python import tf2
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
_TENSORSHAPE_V2_OVERRIDE = None
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/v2_tensorshape",
"Whether tensor_shape.enable_v2_tensorshape() is called.")
@tf_export(v1=["enable_v2_tensorshape"])
def enable_v2_tensorshape():
"""In TensorFlow 2.0, iterating over a TensorShape instance returns values.
This enables the new behavior.
Concretely, `tensor_shape[i]` returned a Dimension instance in V1, but
it V2 it returns either an integer, or None.
Examples:
```
#######################
# If you had this in V1:
value = tensor_shape[i].value
# Do this in V2 instead:
value = tensor_shape[i]
#######################
# If you had this in V1:
for dim in tensor_shape:
value = dim.value
print(value)
# Do this in V2 instead:
for value in tensor_shape:
print(value)
#######################
# If you had this in V1:
dim = tensor_shape[i]
dim.assert_is_compatible_with(other_shape) # or using any other shape method
# Do this in V2 instead:
if tensor_shape.rank is None:
dim = Dimension(None)
else:
dim = tensor_shape.dims[i]
dim.assert_is_compatible_with(other_shape) # or using any other shape method
# The V2 suggestion above is more explicit, which will save you from
# the following trap (present in V1):
# you might do in-place modifications to `dim` and expect them to be reflected
# in `tensor_shape[i]`, but they would not be.
```
"""
global _TENSORSHAPE_V2_OVERRIDE # pylint: disable=invalid-name
_TENSORSHAPE_V2_OVERRIDE = True
_api_usage_gauge.get_cell().set(True)
@tf_export(v1=["disable_v2_tensorshape"])
def disable_v2_tensorshape():
"""Disables the V2 TensorShape behavior and reverts to V1 behavior.
See docstring for `enable_v2_tensorshape` for details about the new behavior.
"""
global _TENSORSHAPE_V2_OVERRIDE # pylint: disable=invalid-name
_TENSORSHAPE_V2_OVERRIDE = False
_api_usage_gauge.get_cell().set(False)
@tf_export(
"compat.dimension_value", v1=["dimension_value", "compat.dimension_value"])
def dimension_value(dimension):
"""Compatibility utility required to allow for both V1 and V2 behavior in TF.
Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to
coexist with the new behavior. This utility is a bridge between the two.
When accessing the value of a TensorShape dimension,
use this utility, like this:
```
# If you had this in your V1 code:
value = tensor_shape[i].value
# Use `dimension_value` as direct replacement compatible with both V1 & V2:
value = dimension_value(tensor_shape[i])
# This would be the V2 equivalent:
value = tensor_shape[i] # Warning: this will return the dim value in V2!
```
Arguments:
dimension: Either a `Dimension` instance, an integer, or None.
Returns:
A plain value, i.e. an integer or None.
"""
if isinstance(dimension, Dimension):
return dimension.value
return dimension
@tf_export(
"compat.dimension_at_index",
v1=["dimension_at_index", "compat.dimension_at_index"])
def dimension_at_index(shape, index):
"""Compatibility utility required to allow for both V1 and V2 behavior in TF.
Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to
coexist with the new behavior. This utility is a bridge between the two.
If you want to retrieve the Dimension instance corresponding to a certain
index in a TensorShape instance, use this utility, like this:
```
# If you had this in your V1 code:
dim = tensor_shape[i]
# Use `dimension_at_index` as direct replacement compatible with both V1 & V2:
dim = dimension_at_index(tensor_shape, i)
# Another possibility would be this, but WARNING: it only works if the
# tensor_shape instance has a defined rank.
dim = tensor_shape.dims[i] # `dims` may be None if the rank is undefined!
# In native V2 code, we recommend instead being more explicit:
if tensor_shape.rank is None:
dim = Dimension(None)
else:
dim = tensor_shape.dims[i]
# Being more explicit will save you from the following trap (present in V1):
# you might do in-place modifications to `dim` and expect them to be reflected
# in `tensor_shape[i]`, but they would not be (as the Dimension object was
# instantiated on the fly.
```
Arguments:
shape: A TensorShape instance.
index: An integer index.
Returns:
A dimension object.
"""
assert isinstance(shape, TensorShape)
if shape.rank is None:
return Dimension(None)
else:
return shape.dims[index]
@tf_export(v1=["Dimension"])
class Dimension(object):
"""Represents the value of one dimension in a TensorShape."""
def __init__(self, value):
"""Creates a new Dimension with the given value."""
if value is None:
self._value = None
elif isinstance(value, Dimension):
self._value = value.value
elif isinstance(value, dtypes.DType):
raise TypeError("Cannot convert %s to Dimension" % value)
else:
self._value = int(value)
if (not isinstance(value, compat.bytes_or_text_types) and
self._value != value):
raise ValueError("Ambiguous dimension: %s" % value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __str__(self):
value = self._value
return "?" if value is None else str(value)
def __eq__(self, other):
"""Returns true if `other` has the same known value as this Dimension."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
"""Returns true if `other` has a different known value from `self`."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
# This is needed for Windows.
# See https://github.com/tensorflow/tensorflow/pull/9780
def __long__(self):
return self._value
def __index__(self):
# Allow use in Python 3 range
return self._value
@property
def value(self):
"""The value of this dimension, or None if it is unknown."""
return self._value
def is_compatible_with(self, other):
"""Returns true if `other` is compatible with this Dimension.
Two known Dimensions are compatible if they have the same value.
An unknown Dimension is compatible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are compatible.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
return (self._value is None or other.value is None or
self._value == other.value)
def assert_is_compatible_with(self, other):
"""Raises an exception if `other` is not compatible with this Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
if not self.is_compatible_with(other):
raise ValueError("Dimensions %s and %s are not compatible" %
(self, other))
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and `other`.
Dimensions are combined as follows:
```python
tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(n)) ==
tf.compat.v1.Dimension(n)
tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(None)) ==
tf.compat.v1.Dimension(n)
tf.compat.v1.Dimension(None).merge_with(tf.compat.v1.Dimension(n)) ==
tf.compat.v1.Dimension(n)
# equivalent to tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None).merge_with(tf.compat.v1.Dimension(None))
# raises ValueError for n != m
tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(m))
```
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
self.assert_is_compatible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
def __add__(self, other):
"""Returns the sum of `self` and `other`.
Dimensions are summed as follows:
```python
tf.compat.v1.Dimension(m) + tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m + n)
tf.compat.v1.Dimension(m) + tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value + other.value)
def __radd__(self, other):
"""Returns the sum of `other` and `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
return self + other
def __sub__(self, other):
"""Returns the subtraction of `other` from `self`.
Dimensions are subtracted as follows:
```python
tf.compat.v1.Dimension(m) - tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m - n)
tf.compat.v1.Dimension(m) - tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `other` from `self`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value - other.value)
def __rsub__(self, other):
"""Returns the subtraction of `self` from `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `self` from `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value - self._value)
def __mul__(self, other):
"""Returns the product of `self` and `other`.
Dimensions are summed as follows:
```python
tf.compat.v1.Dimension(m) * tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m * n)
tf.compat.v1.Dimension(m) * tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value * other.value)
def __rmul__(self, other):
"""Returns the product of `self` and `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
return self * other
def __floordiv__(self, other):
"""Returns the quotient of `self` and `other` rounded down.
Dimensions are divided as follows:
```python
tf.compat.v1.Dimension(m) // tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m // n)
tf.compat.v1.Dimension(m) // tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) // tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) // tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value // other.value)
def __rfloordiv__(self, other):
"""Returns the quotient of `other` and `self` rounded down.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value // self._value)
def __div__(self, other):
"""DEPRECATED: Use `__floordiv__` via `x // y` instead.
This function exists only for backwards compatibility purposes; new code
should use `__floordiv__` via the syntax `x // y`. Using `x // y`
communicates clearly that the result rounds down, and is forward compatible
to Python 3.
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
return self // other
def __rdiv__(self, other):
"""Use `__floordiv__` via `x // y` instead.
This function exists only to have a better error message. Instead of:
`TypeError: unsupported operand type(s) for /: 'int' and 'Dimension'`,
this function will explicitly call for usage of `//` instead.
Args:
other: Another `Dimension`.
Raises:
TypeError.
"""
raise TypeError("unsupported operand type(s) for /: '{}' and 'Dimension', "
"please use // instead".format(type(other).__name__))
def __truediv__(self, other):
"""Use `__floordiv__` via `x // y` instead.
This function exists only to have a better error message. Instead of:
`TypeError: unsupported operand type(s) for /: 'Dimension' and 'int'`,
this function will explicitly call for usage of `//` instead.
Args:
other: Another `Dimension`.
Raises:
TypeError.
"""
raise TypeError("unsupported operand type(s) for /: 'Dimension' and '{}', "
"please use // instead".format(type(other).__name__))
def __rtruediv__(self, other):
"""Use `__floordiv__` via `x // y` instead.
This function exists only to have a better error message. Instead of:
`TypeError: unsupported operand type(s) for /: 'int' and 'Dimension'`,
this function will explicitly call for usage of `//` instead.
Args:
other: Another `Dimension`.
Raises:
TypeError.
"""
raise TypeError("unsupported operand type(s) for /: '{}' and 'Dimension', "
"please use // instead".format(type(other).__name__))
def __mod__(self, other):
"""Returns `self` modulo `other`.
Dimension moduli are computed as follows:
```python
tf.compat.v1.Dimension(m) % tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m % n)
tf.compat.v1.Dimension(m) % tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is `self` modulo `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value % other.value)
def __rmod__(self, other):
"""Returns `other` modulo `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is `other` modulo `self`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
return other % self
def __lt__(self, other):
"""Returns True if `self` is known to be less than `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) < tf.compat.v1.Dimension(n)) == (m < n)
(tf.compat.v1.Dimension(m) < tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value < other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value < other.value
def __le__(self, other):
"""Returns True if `self` is known to be less than or equal to `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) <= tf.compat.v1.Dimension(n)) == (m <= n)
(tf.compat.v1.Dimension(m) <= tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value <= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value <= other.value
def __gt__(self, other):
"""Returns True if `self` is known to be greater than `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) > tf.compat.v1.Dimension(n)) == (m > n)
(tf.compat.v1.Dimension(m) > tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value > other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value > other.value
def __ge__(self, other):
"""Returns True if `self` is known to be greater than or equal to `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) >= tf.compat.v1.Dimension(n)) == (m >= n)
(tf.compat.v1.Dimension(m) >= tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value >= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value >= other.value
def __reduce__(self):
return Dimension, (self._value,)
def as_dimension(value):
"""Converts the given value to a Dimension.
A Dimension input will be returned unmodified.
An input of `None` will be converted to an unknown Dimension.
An integer input will be converted to a Dimension with that value.
Args:
value: The value to be converted.
Returns:
A Dimension corresponding to the given value.
"""
if isinstance(value, Dimension):
return value
else:
return Dimension(value)
@tf_export("TensorShape")
class TensorShape(object):
"""Represents the shape of a `Tensor`.
A `TensorShape` represents a possibly-partial shape specification for a
`Tensor`. It may be one of the following:
* *Fully-known shape:* has a known number of dimensions and a known size
for each dimension. e.g. `TensorShape([16, 256])`
* *Partially-known shape:* has a known number of dimensions, and an unknown
size for one or more dimension. e.g. `TensorShape([None, 256])`
* *Unknown shape:* has an unknown number of dimensions, and an unknown
size in all dimensions. e.g. `TensorShape(None)`
If a tensor is produced by an operation of type `"Foo"`, its shape
may be inferred if there is a registered shape function for
`"Foo"`. See [Shape
functions](https://tensorflow.org/extend/adding_an_op#shape_functions_in_c)
for details of shape functions and how to register them. Alternatively,
the shape may be set explicitly using `tf.Tensor.set_shape`.
"""
def __init__(self, dims):
"""Creates a new TensorShape with the given dimensions.
Args:
dims: A list of Dimensions, or None if the shape is unspecified.
Raises:
TypeError: If dims cannot be converted to a list of dimensions.
"""
if dims is None:
self._dims = None
elif isinstance(dims, compat.bytes_or_text_types):
raise TypeError("A string has ambiguous TensorShape, please wrap in a "
"list or convert to an int: %s" % dims)
elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):
if dims.unknown_rank:
self._dims = None
else:
self._dims = [
# Protos store variable-size dimensions as -1
as_dimension(dim.size if dim.size != -1 else None)
for dim in dims.dim
]
elif isinstance(dims, TensorShape):
self._dims = dims.dims
else:
try:
dims_iter = iter(dims)
except TypeError:
# Treat as a singleton dimension
self._dims = [as_dimension(dims)]
else:
# Got a list of dimensions
self._dims = [as_dimension(d) for d in dims_iter]
@property
def _v2_behavior(self):
if _TENSORSHAPE_V2_OVERRIDE is None:
return tf2.enabled()
return _TENSORSHAPE_V2_OVERRIDE
def __repr__(self):
if self._v2_behavior:
if self._dims is not None:
return "TensorShape(%r)" % [dim.value for dim in self._dims]
else:
return "TensorShape(None)"
else:
return "TensorShape(%r)" % self._dims
def __str__(self):
if self.rank is None:
return "<unknown>"
elif self.rank == 1:
if self._v2_behavior:
return "(%s,)" % self._dims[0].value
else:
return "(%s,)" % self._dims[0]
else:
if self._v2_behavior:
return "(%s)" % ", ".join(str(d.value) for d in self._dims)
else:
return "(%s)" % ", ".join(str(d) for d in self._dims)
@property
def rank(self):
"""Returns the rank of this shape, or None if it is unspecified."""
if self._dims is not None:
return len(self._dims)
return None
@property
def dims(self):
"""Returns a list of Dimensions, or None if the shape is unspecified."""
return self._dims
@property
def ndims(self):
"""Deprecated accessor for `rank`."""
return self.rank
def __len__(self):
"""Returns the rank of this shape, or raises ValueError if unspecified."""
if self._dims is None:
raise ValueError("Cannot take the length of shape with unknown rank.")
return len(self._dims)
def __bool__(self):
"""Returns True if this shape contains non-zero information."""
return self._dims is not None
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __iter__(self):
"""Returns `self.dims` if the rank is known, otherwise raises ValueError."""
if self._dims is None:
raise ValueError("Cannot iterate over a shape with unknown rank.")
else:
if self._v2_behavior:
return iter(d.value for d in self._dims)
else:
return iter(d for d in self._dims)
def __getitem__(self, key):
"""Returns the value of a dimension or a shape, depending on the key.
Args:
key: If `key` is an integer, returns the dimension at that index;
otherwise if `key` is a slice, returns a TensorShape whose dimensions
are those selected by the slice from `self`.
Returns:
An integer if `key` is an integer, or a `TensorShape` if `key` is a
slice.
Raises:
ValueError: If `key` is a slice and `self` is completely unknown and
the step is set.
"""
if self._dims is not None:
if isinstance(key, slice):
return TensorShape(self._dims[key])
else:
if self._v2_behavior:
return self._dims[key].value
else:
return self._dims[key]
else:
if isinstance(key, slice):
start = key.start if key.start is not None else 0
stop = key.stop
if key.step is not None:
# TODO(mrry): Handle these maybe.
raise ValueError("Steps are not yet handled")
if stop is None:
# NOTE(mrry): This implies that TensorShape(None) is compatible with
# TensorShape(None)[1:], which is obviously not true. It would be
# possible to track the number of dimensions symbolically,
# and perhaps we should do that.
return unknown_shape()
elif start < 0 or stop < 0:
# TODO(mrry): Handle this better, as it will be useful for handling
# suffixes of otherwise unknown shapes.
return unknown_shape()
else:
return unknown_shape(rank=stop - start)
else:
if self._v2_behavior:
return None
else:
return Dimension(None)
def num_elements(self):
"""Returns the total number of elements, or none for incomplete shapes."""
if self.is_fully_defined():
size = 1
for dim in self._dims:
size *= dim.value
return size
else:
return None
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible.
"""
other = as_shape(other)
if self._dims is None:
return other
else:
try:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
except ValueError:
raise ValueError("Shapes %s and %s are not compatible" % (self, other))
def __add__(self, other):
if not isinstance(other, TensorShape):
other = TensorShape(other)
return self.concatenate(other)
def __radd__(self, other):
if not isinstance(other, TensorShape):
other = TensorShape(other)
return other.concatenate(self)
def concatenate(self, other):
"""Returns the concatenation of the dimension in `self` and `other`.
*N.B.* If either `self` or `other` is completely unknown,
concatenation will discard information about the other shape. In
future, we might support concatenation that preserves this
information for use with slicing.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` whose dimensions are the concatenation of the
dimensions in `self` and `other`.
"""
# TODO(mrry): Handle the case where we concatenate a known shape with a
# completely unknown shape, so that we can use the partial information.
other = as_shape(other)
if self._dims is None or other.dims is None:
return unknown_shape()
else:
return TensorShape(self._dims + other.dims)
def assert_same_rank(self, other):
"""Raises an exception if `self` and `other` do not have compatible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
"""
other = as_shape(other)
if self.rank is not None and other.rank is not None:
if self.rank != other.rank:
raise ValueError("Shapes %s and %s must have the same rank" %
(self, other))
def assert_has_rank(self, rank):
"""Raises an exception if `self` is not compatible with the given `rank`.
Args:
rank: An integer.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
if self.rank not in (None, rank):
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank(self, rank):
"""Returns a shape based on `self` with the given rank.
This method promotes a completely unknown shape to one with a
known rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with the given rank.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
try:
return self.merge_with(unknown_shape(rank=rank))
except ValueError:
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank_at_least(self, rank):
"""Returns a shape based on `self` with at least the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at least the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at least the given
`rank`.
"""
if self.rank is not None and self.rank < rank:
raise ValueError("Shape %s must have rank at least %d" % (self, rank))
else:
return self
def with_rank_at_most(self, rank):
"""Returns a shape based on `self` with at most the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at most the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at most the given
`rank`.
"""
if self.rank is not None and self.rank > rank:
raise ValueError("Shape %s must have rank at most %d" % (self, rank))
else:
return self
def is_compatible_with(self, other):
"""Returns True iff `self` is compatible with `other`.
Two possibly-partially-defined shapes are compatible if there
exists a fully-defined shape that both shapes can represent. Thus,
compatibility allows the shape inference code to reason about
partially-defined shapes. For example:
* TensorShape(None) is compatible with all shapes.
* TensorShape([None, None]) is compatible with all two-dimensional
shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is
not compatible with, for example, TensorShape([None]) or
TensorShape([None, None, None]).
* TensorShape([32, None]) is compatible with all two-dimensional shapes
with size 32 in the 0th dimension, and also TensorShape([None, None])
and TensorShape(None). It is not compatible with, for example,
TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]).
* TensorShape([32, 784]) is compatible with itself, and also
TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None,
None]) and TensorShape(None). It is not compatible with, for example,
TensorShape([32, 1, 784]) or TensorShape([None]).
The compatibility relation is reflexive and symmetric, but not
transitive. For example, TensorShape([32, 784]) is compatible with
TensorShape(None), and TensorShape(None) is compatible with
TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with
TensorShape([4, 4]).
Args:
other: Another TensorShape.
Returns:
True iff `self` is compatible with `other`.
"""
other = as_shape(other)
if self._dims is not None and other.dims is not None:
if self.rank != other.rank:
return False
for x_dim, y_dim in zip(self._dims, other.dims):
if not x_dim.is_compatible_with(y_dim):
return False
return True
def assert_is_compatible_with(self, other):
"""Raises exception if `self` and `other` do not represent the same shape.
This method can be used to assert that there exists a shape that both
`self` and `other` represent.
Args:
other: Another TensorShape.
Raises:
ValueError: If `self` and `other` do not represent the same shape.
"""
if not self.is_compatible_with(other):
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
def most_specific_compatible_shape(self, other):
"""Returns the most specific TensorShape compatible with `self` and `other`.
* TensorShape([None, 1]) is the most specific TensorShape compatible with
both TensorShape([2, 1]) and TensorShape([5, 1]). Note that
TensorShape(None) is also compatible with above mentioned TensorShapes.
* TensorShape([1, 2, 3]) is the most specific TensorShape compatible with
both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more
less specific TensorShapes compatible with above mentioned TensorShapes,
e.g. TensorShape([1, 2, None]), TensorShape(None).
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` which is the most specific compatible shape of `self`
and `other`.
"""
other = as_shape(other)
if self._dims is None or other.dims is None or self.rank != other.rank:
return unknown_shape()
dims = [(Dimension(None))] * self.rank
for i, (d1, d2) in enumerate(zip(self._dims, other.dims)):
if d1 is not None and d2 is not None and d1 == d2:
dims[i] = d1
return TensorShape(dims)
def is_fully_defined(self):
"""Returns True iff `self` is fully defined in every dimension."""
return (self._dims is not None and
all(dim.value is not None for dim in self._dims))
def assert_is_fully_defined(self):
"""Raises an exception if `self` is not fully defined in every dimension.
Raises:
ValueError: If `self` does not have a known value for every dimension.
"""
if not self.is_fully_defined():
raise ValueError("Shape %s is not fully defined" % self)
def as_list(self):
"""Returns a list of integers or `None` for each dimension.
Returns:
A list of integers or `None` for each dimension.
Raises:
ValueError: If `self` is an unknown shape with an unknown rank.
"""
if self._dims is None:
raise ValueError("as_list() is not defined on an unknown TensorShape.")
return [dim.value for dim in self._dims]
def as_proto(self):
"""Returns this shape as a `TensorShapeProto`."""
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=-1 if d.value is None else d.value) for d in self._dims
])
def __eq__(self, other):
"""Returns True if `self` is equivalent to `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
return self._dims == other.dims
def __ne__(self, other):
"""Returns True if `self` is known to be different from `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
if self.rank is None or other.rank is None:
raise ValueError("The inequality of unknown TensorShapes is undefined.")
if self.rank != other.rank:
return True
return self._dims != other.dims
def __reduce__(self):
return TensorShape, (self._dims,)
def __concat__(self, other):
return self.concatenate(other)
def as_shape(shape):
"""Converts the given object to a TensorShape."""
if isinstance(shape, TensorShape):
return shape
else:
return TensorShape(shape)
def unknown_shape(rank=None, **kwargs):
"""Returns an unknown TensorShape, optionally with a known rank.
Args:
rank: (Optional) If specified, the number of dimensions in the shape.
**kwargs: For backwards compatibility.
Returns:
An unknown TensorShape.
Raises:
TypeError: In case of invalid arguments.
"""
if rank is None and "ndims" in kwargs:
rank = kwargs.pop("ndims")
if kwargs:
raise TypeError("Unknown argument: %s" % kwargs)
if rank is None:
return TensorShape(None)
else:
return TensorShape([Dimension(None)] * rank)
def scalar():
"""Returns a shape representing a scalar."""
return TensorShape([])
def vector(length):
"""Returns a shape representing a vector.
Args:
length: The length of the vector, which may be None if unknown.
Returns:
A TensorShape representing a vector of the given length.
"""
return TensorShape([length])
def matrix(rows, cols):
"""Returns a shape representing a matrix.
Args:
rows: The number of rows in the matrix, which may be None if unknown.
cols: The number of columns in the matrix, which may be None if unknown.
Returns:
A TensorShape representing a matrix of the given size.
"""
return TensorShape([rows, cols])
|
tensorflow-master
|
tensorflow/python/framework/tensor_shape.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.op_def_library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
def _unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
class OpDefLibraryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = test_ops._op_def_lib
def _add_op(self, ascii): # pylint: disable=redefined-builtin
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
def Tensor(self, t, name="in"):
return self._lib.apply_op("OutT", T=t, name=name)
def testNoRegisteredOpFails(self):
with self.assertRaises(RuntimeError) as cm:
self._lib.apply_op("unknown")
self.assertEqual(str(cm.exception), "Unrecognized Op name unknown")
def testAddOpValidation(self):
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'MissingTypeAttr' "
"input_arg { name: 'a' type_attr: 'T' } ")
self.assertEqual(str(cm.exception),
"Inconsistent OpDef for 'MissingTypeAttr', "
"missing attr 'T'")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'BadTypeAttr' "
"output_arg { name: 'a' type_attr: 'T' } "
"attr { name: 'T' type: 'int' }")
self.assertEqual(
str(cm.exception),
"Attr 'T' of 'BadTypeAttr' used as a type_attr but has type int")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'MissingNumberAttr' "
"input_arg { name: 'a' type: DT_INT32 number_attr: 'N' } ")
self.assertEqual(str(cm.exception),
"Inconsistent OpDef for 'MissingNumberAttr', "
"missing attr 'N'")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'BadNumberAttr' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'type' }")
self.assertEqual(
str(cm.exception),
"Attr 'N' of 'BadNumberAttr' used as a number_attr but has type type")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'TwoTypesA' "
"input_arg { name: 'a' type: DT_INT32 type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'TwoTypesA' must have one type field not 2")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'TwoTypesB' "
"input_arg { name: 'a' type: DT_INT32 type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'TwoTypesB' must have one type field not 2")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'ThreeTypes' "
"input_arg { name: 'a' type: DT_INT32 type_attr: 'T' "
"type_list_attr: 'U' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'U' type: 'list(type)' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'ThreeTypes' must have one type field not 3")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'NoTypes' output_arg { name: 'a' } ")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'NoTypes' must have one type field not 0")
def testSimple(self):
with ops.Graph().as_default():
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'Simple' op: 'Simple' input: 'Simple/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=4)
self.assertProtoEquals("""
name: 'Simple_1' op: 'Simple' input: 'Simple_1/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=5, name="named")
self.assertProtoEquals("""
name: 'named' op: 'Simple' input: 'named/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=[[1, 2, 3], [4, 5, 6]], name="two_d")
self.assertProtoEquals("""
name: 'two_d' op: 'Simple' input: 'two_d/a'
""", out.op.node_def)
def testSimpleFailures(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a="Bad string")
self.assertTrue(
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got 'Bad string' of type 'str' instead." in str(cm.exception))
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=self.Tensor(dtypes.string))
self.assertTrue(
"Input 'a' of 'Simple' Op has type string "
"that does not match expected type of int32." in str(cm.exception))
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=6, extra="bogus")
self.assertTrue(
"apply_op() got unexpected keyword arguments: extra"
in str(cm.exception))
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=6, extra1="bogus", extra2="also_bogus")
self.assertTrue(
"apply_op() got unexpected keyword arguments: extra1, "
"extra2" in str(cm.exception))
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple")
self.assertTrue(
"No argument for input a" in str(cm.exception))
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", wrong=7)
self.assertTrue(
"No argument for input a" in str(cm.exception))
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a={"label": 1})
self.assertTrue(
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got {'label': 1} of type 'dict' instead." in str(cm.exception))
def testReservedInput(self):
with ops.Graph().as_default():
op = self._lib.apply_op("ReservedInput", input_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedInput' input: 'x/input'
""", op.node_def)
def testPolymorphic(self):
with ops.Graph().as_default():
out = self._lib.apply_op("Polymorphic", a=7, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'Polymorphic' input: 'p/a'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("Polymorphic", a="s", name="q")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'Polymorphic' input: 'q/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("Polymorphic", a=["s", "t", "u"], name="r")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'r' op: 'Polymorphic' input: 'r/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Polymorphic", a="s", T=dtypes.string)
self.assertEqual(str(cm.exception),
"Should not specify value for inferred attr 'T'.")
def testPolymorphicOut(self):
with ops.Graph().as_default():
out = self._lib.apply_op("PolymorphicOut", T=dtypes.int32, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("PolymorphicOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("PolymorphicOut")
self.assertEqual(str(cm.exception),
"No argument for attr T")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("PolymorphicOut", T=None)
self.assertEqual(str(cm.exception),
"Expected DataType for argument 'T' not None.")
def testPolymorphicDefaultOut(self):
with ops.Graph().as_default():
out = self._lib.apply_op("PolymorphicDefaultOut", T=None, name="p")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("PolymorphicDefaultOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
def testBinary(self):
with ops.Graph().as_default():
out = self._lib.apply_op("Binary", a=8, b=9, name="b")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'b' op: 'Binary' input: 'b/a' input: 'b/b'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("Binary", a="left", b="right", name="c")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'c' op: 'Binary' input: 'c/a' input: 'c/b'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError):
self._lib.apply_op("Binary", a="left", b=12)
with self.assertRaises(TypeError):
self._lib.apply_op("Binary",
a=self.Tensor(dtypes.string),
b=self.Tensor(dtypes.int32))
def testRestrict(self):
with ops.Graph().as_default():
out = self._lib.apply_op("Restrict", a="foo", name="g")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'g' op: 'Restrict' input: 'g/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("Restrict", a=True, name="h")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'h' op: 'Restrict' input: 'h/a'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Restrict", a=17)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testTypeList(self):
with ops.Graph().as_default():
op = self._lib.apply_op("TypeList", a=["foo"], name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeList' input: 'z/a_0'
attr { key: 'T' value { list { type: DT_STRING } } }
""", op.node_def)
op = self._lib.apply_op("TypeList", a=[True, 12], name="y")
self.assertProtoEquals("""
name: 'y' op: 'TypeList' input: 'y/a_0' input: 'y/a_1'
attr { key: 'T' value { list { type: DT_BOOL type: DT_INT32 } } }
""", op.node_def)
op = self._lib.apply_op("TypeList", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeList' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeList", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' "
"argument to 'TypeList' Op, not ")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeList", a=[self.Tensor(dtypes.int32), None])
self.assertStartsWith(str(cm.exception),
"Tensors in list passed to 'a' of 'TypeList' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>]")
def testTypeListTwice(self):
with ops.Graph().as_default():
op = self._lib.apply_op("TypeListTwice",
a=["foo", True],
b=["bar", False],
name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeListTwice'
input: 'z/a_0' input: 'z/a_1' input: 'z/b_0' input: 'z/b_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
op = self._lib.apply_op("TypeListTwice", a=[], b=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeListTwice' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeListTwice", a=["foo", True], b=["bar", 6])
self.assertEqual(str(cm.exception),
"Input 'b' of 'TypeListTwice' Op has type list of "
"string, int32 that does not match type list "
"string, bool of argument 'a'.")
def testOutTypeList(self):
with ops.Graph().as_default():
out, = self._lib.apply_op("OutTypeList", T=[dtypes.float32], name="x")
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'x' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_FLOAT } } }
""", out.op.node_def)
out1, out2 = self._lib.apply_op("OutTypeList",
T=[dtypes.int32, dtypes.bool],
name="w")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'w' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_INT32 type: DT_BOOL } } }
""", out1.op.node_def)
out = self._lib.apply_op("OutTypeList", T=[], name="empty")
self.assertEqual([], out)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("OutTypeList", T=dtypes.int32)
self.assertEqual(str(cm.exception), "Expected list for attr T")
def testTypeListRestrict(self):
with ops.Graph().as_default():
op = self._lib.apply_op("TypeListRestrict", a=["foo", False], name="v")
self.assertProtoEquals("""
name: 'v' op: 'TypeListRestrict' input: 'v/a_0' input: 'v/a_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeListRestrict", a=[True, 12])
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testOutTypeListRestrict(self):
with ops.Graph().as_default():
out1, out2 = self._lib.apply_op("OutTypeListRestrict",
t=[dtypes.bool, dtypes.string],
name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertProtoEquals("""
name: 'u' op: 'OutTypeListRestrict'
attr { key: 't' value { list { type: DT_BOOL type: DT_STRING } } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("OutTypeListRestrict",
t=[dtypes.string, dtypes.int32])
self.assertEqual(str(cm.exception),
"Value passed to parameter 't' has DataType int32 "
"not in list of allowed values: string, bool")
def testAttr(self):
with ops.Graph().as_default():
op = self._lib.apply_op("Attr", a=12, name="t")
self.assertProtoEquals("""
name: 't' op: 'Attr' attr { key: 'a' value { i: 12 } }
""", op.node_def)
op = self._lib.apply_op("Attr", a=tensor_shape.Dimension(13), name="u")
self.assertProtoEquals("""
name: 'u' op: 'Attr' attr { key: 'a' value { i: 13 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a="bad")
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not 'bad'.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a=[12])
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not [12].")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a=None)
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not None.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr")
self.assertEqual(str(cm.exception), "No argument for attr a")
def testAttrFloat(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrFloat", a=1.2, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrFloat' attr { key: 'a' value { f: 1.2 } }
""", op.node_def)
op = self._lib.apply_op("AttrFloat", a=12, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrFloat' attr { key: 'a' value { f: 12 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrFloat", a="bad")
self.assertEqual(str(cm.exception),
"Expected float for argument 'a' not 'bad'.")
def testAttrFunc(self):
with ops.Graph().as_default():
@function.Defun(dtypes.float32, func_name="MyFn")
def fn(x):
return 2 + x
op = self._lib.apply_op("FuncAttr", f=fn, name="t")
self.assertProtoEquals("""
name: 't' op: 'FuncAttr' attr { key: 'f'
value { func { name: 'MyFn' } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("FuncAttr", f=3)
self.assertEqual(str(cm.exception),
"Don't know how to convert 3 to a func for argument f")
def testAttrFuncList(self):
with ops.Graph().as_default():
@function.Defun(dtypes.float32, func_name="MyFn")
def fn1(x):
return 2 + x
@function.Defun(dtypes.int32, dtypes.float32, func_name="MyFn2")
def fn2(x, y):
return 2 + x, y * 3
@function.Defun(dtypes.int32, func_name="MyFn3")
def fn3(y):
return 2 + y
op = self._lib.apply_op("FuncListAttr", f=[fn1, fn2, fn3], name="t")
self.assertProtoEquals("""
name: 't' op: 'FuncListAttr'
attr { key: 'f' value { list { func { name: 'MyFn' }
func { name: 'MyFn2' }
func { name: 'MyFn3' } } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("FuncListAttr", f=[fn1, 3, fn2])
self.assertEqual(str(cm.exception),
"Don't know how to convert 3 to a func for argument f")
def testAttrBool(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrBool", a=True, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBool' attr { key: 'a' value { b: true } }
""", op.node_def)
op = self._lib.apply_op("AttrBool", a=False, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBool' attr { key: 'a' value { b: false } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=0)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=1)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 1.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=[])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not [].")
def testAttrBoolList(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrBoolList", a=[True, False, True], name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBoolList'
attr { key: 'a' value { list { b: true b: false b:true } } }
""", op.node_def)
op = self._lib.apply_op("AttrBoolList", a=[], name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBoolList' attr { key: 'a' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBoolList", a=[0])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
def testAttrMin(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrMin", a=12, name="s")
self.assertProtoEquals("""
name: 's' op: 'AttrMin' attr { key: 'a' value { i: 12 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrMin", a=2)
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrMin' Op passed 2 less than minimum 5.")
def testAttrListMin(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrListMin", a=[1, 2], name="r")
self.assertProtoEquals("""
name: 'r' op: 'AttrListMin'
attr { key: 'a' value { list { i: 1 i: 2 } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrListMin", a=[17])
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrListMin' Op "
"passed list of length 1 less than minimum 2.")
def testAttrEnum(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrEnum", a="oranges", name="e")
self.assertProtoEquals("""
name: 'e' op: 'AttrEnum' attr { key: 'a' value { s: 'oranges' } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrEnum", a="invalid")
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnum\' Op '
'passed string \'invalid\' not in: '
'"apples", "oranges".')
def testAttrEnumList(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrEnumList", a=["oranges", "apples"], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrEnumList'
attr { key: 'a' value { list { s: 'oranges' s: 'apples' } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrEnumList", a=["apples", "invalid", "oranges"])
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnumList\' Op '
'passed string \'invalid\' not '
'in: "apples", "oranges".')
def testAttrShape(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShape", a=(4, 3, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op(
"AttrShape", a=tensor_shape.TensorShape([3, 2]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrShape' attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = 6
shape.dim.add().size = 3
op = self._lib.apply_op("AttrShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 6 } dim { size: 3 } } } }
""", op.node_def)
# TODO(josh11b): Re-enable this test once we stop promoting scalars to
# shapes.
# with self.assertRaises(TypeError) as cm:
# self._lib.apply_op("AttrShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for"
# " argument 'a'")
with self.assertRaises(TypeError):
self._lib.apply_op("AttrShape", a="ABC")
def testAttrShapeList(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrShapeList", a=[[3, 2], [6, 5, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: 5 } dim { size: 4 } } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrShapeList' attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrPartialShape(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrPartialShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrPartialShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShape", a=(4, None, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: -1 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op(
"AttrPartialShape", a=tensor_shape.TensorShape([3, None]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: -1 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrPartialShape'
attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = -1
shape.dim.add().size = 3
op = self._lib.apply_op("AttrPartialShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: -1 } dim { size: 3 } } } }
""", op.node_def)
# TODO(ebrevdo): Re-enable once we stop promoting scalars to shapes.
# with self.assertRaises(TypeError) as cm:
# self._lib.apply_op("AttrPartialShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for"
# " argument 'a'")
with self.assertRaises(TypeError):
self._lib.apply_op("AttrPartialShape", a="ABC")
def testAttrPartialShapeList(self):
with ops.Graph().as_default():
op = self._lib.apply_op(
"AttrPartialShapeList", a=[[3, 2], [6, None, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrPartialShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: -1 } dim { size: 4 } } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrPartialShapeList' attr {
key: 'a' value { list { } } }
""", op.node_def)
def testAttrDefault(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrDefault", a=None, name="d")
self.assertProtoEquals("""
name: 'd' op: 'AttrDefault' attr { key: 'a' value { s: 'banana' } }
""", op.node_def)
op = self._lib.apply_op("AttrDefault", a="kiwi", name="c")
self.assertProtoEquals("""
name: 'c' op: 'AttrDefault' attr { key: 'a' value { s: 'kiwi' } }
""", op.node_def)
def testAttrListDefault(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 5 i: 15 } } }
""", op.node_def)
op = self._lib.apply_op("AttrListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 3 } } }
""", op.node_def)
op = self._lib.apply_op("AttrListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrEmptyListDefault(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrEmptyListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
op = self._lib.apply_op("AttrEmptyListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { f: 3 } } }
""", op.node_def)
op = self._lib.apply_op("AttrEmptyListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testReservedAttr(self):
with ops.Graph().as_default():
op = self._lib.apply_op("ReservedAttr", range_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedAttr' attr { key: 'range' value { i: 7 } }
""", op.node_def)
def testDefaultAttrType(self):
with ops.Graph().as_default():
# Give an input whose type has no obvious output type.
op = self._lib.apply_op("AttrTypeDefault", a=[], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrTypeDefault' input: 'n/a'
attr { key: 'T' value { type: DT_INT32 } }
""", op.node_def)
# Give an input whose type can be inferred as different
# than the default.
op = self._lib.apply_op("AttrTypeDefault", a=[1.0], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrTypeDefault' input: 'f/a'
attr { key: 'T' value { type: DT_FLOAT } }
""", op.node_def)
def testDefaultListAttrType(self):
with ops.Graph().as_default():
# Give an input whose type can be inferred as different
# than the default.
op = self._lib.apply_op("AttrListTypeDefault", a=[1.0], b=[2.0], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrListTypeDefault' input: 'n/a_0' input: 'n/b_0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
def testNIntsIn(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NIntsIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NIntsIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NIntsIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NIntsIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'N' value { i: 5 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=["foo", "bar"])
self.assertEqual(
str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[string, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn",
a=[self.Tensor(dtypes.string),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have "
"types [string, string] that do not match expected type "
"int32.")
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NIntsIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NIntsIn' Op "
"with length 1 shorter than "
"minimum length 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=[38, "bar"])
self.assertEqual(
str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[int32, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn",
a=[self.Tensor(dtypes.int32),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op "
"have types [int32, string] that do not match expected "
"type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NIntsIn' Op, not ")
def testNPolymorphicIn(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NPolymorphicIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 5 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn", a=["foo", "bar"], name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn",
a=[1, self.Tensor(dtypes.float32, name="x")],
name="q")
self.assertProtoEquals("""
name: 'q' op: 'NPolymorphicIn' input: 'q/a_0' input: 'x'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn",
a=[self.Tensor(dtypes.float32, name="y"),
self.Tensor(dtypes.float32_ref, name="z")],
name="r")
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicIn' input: 'y' input: 'z'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NPolymorphicIn' Op with length 1 "
"shorter than minimum length 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, "bar"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, None])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>] that "
"don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn",
a=["abcd", self.Tensor(dtypes.int32)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [string, int32] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NPolymorphicIn' Op, not ")
def testNPolymorphicRestrictIn(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NPolymorphicRestrictIn", a=["foo", "bar"],
name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicRestrictIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicRestrictIn",
a=[False, True, False],
name="b")
self.assertProtoEquals("""
name: 'b' op: 'NPolymorphicRestrictIn'
input: 'b/a_0' input: 'b/a_1' input: 'b/a_2'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicRestrictIn", a=[1, 2])
self.assertEqual(
str(cm.exception),
"Value passed to parameter 'a' has DataType int32 not in "
"list of allowed values: string, bool")
def testNInTwice(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NInTwice", a=[1, 2], b=["one", "two"], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwice", a=[], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwice' attr { key: 'N' value { i: 0 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInTwice", a=[1, 2, 3], b=["too short"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwice' Op "
"with length 1 must match "
"length 3 of argument 'a'.")
def testNInPolymorphicTwice(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NInPolymorphicTwice", a=[1, 2], b=[3, 4],
name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInPolymorphicTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInPolymorphicTwice", a=[1, 2, 3], b=[5])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInPolymorphicTwice' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NInPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'NInPolymorphicTwice' "
"Op have types [string, string] that do not match type "
"int32 inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NInPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of "
"'NInPolymorphicTwice' Op have types [string] that do "
"not match type int32 inferred from earlier arguments.")
def testNInTwoTypeVariables(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NInTwoTypeVariables",
a=[1, 2],
b=[True, False],
name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwoTypeVariables'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwoTypeVariables", a=[1, 2], b=[3, 4],
name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwoTypeVariables'
input: 'o/a_0' input: 'o/a_1' input: 'o/b_0' input: 'o/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwoTypeVariables",
a=[self.Tensor(dtypes.int32, name="q")],
b=[self.Tensor(dtypes.string, name="r")],
name="p")
self.assertProtoEquals("""
name: 'p' op: 'NInTwoTypeVariables' input: 'q' input: 'r'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInTwoTypeVariables", a=[1, 2, 3], b=["5"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwoTypeVariables' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
def testInPolymorphicTwice(self):
with ops.Graph().as_default():
op = self._lib.apply_op("InPolymorphicTwice", a=[8], b=[3, 4, 5],
name="n")
self.assertProtoEquals("""
name: 'n' op: 'InPolymorphicTwice'
input: 'n/a_0' input: 'n/b_0' input: 'n/b_1' input: 'n/b_2'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 3 } }
""", op.node_def)
op = self._lib.apply_op("InPolymorphicTwice", a=[8], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'InPolymorphicTwice' input: 'o/a_0'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 0 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice", a=[], b=[3, 4, 5])
self.assertEqual(str(cm.exception),
"Don't know how to infer type variable from empty input "
"list passed to input 'a' of 'InPolymorphicTwice' Op.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(
str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' Op "
"have types [string, string] that do not match type int32 "
"inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' "
"Op have types [string] that do not match type int32 "
"inferred from earlier arguments.")
def testNIntsOut(self):
with ops.Graph().as_default():
out1, out2 = self._lib.apply_op("NIntsOut", N=2, name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NIntsOut' attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3, out4, out5 = self._lib.apply_op(
"NIntsOut", N=5, name="o")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertEqual(dtypes.int32, out4.dtype)
self.assertEqual(dtypes.int32, out5.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NIntsOut' attr { key: 'N' value { i: 5 } }
""", out5.op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NIntsOut", N=1)
self.assertEqual(
str(cm.exception),
"Attr 'N' of 'NIntsOut' Op passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsOut", N=[3])
self.assertEqual(str(cm.exception),
"Expected int for argument 'N' not [3].")
def testNIntsOutDefault(self):
with ops.Graph().as_default():
out1, out2, out3 = self._lib.apply_op(
"NIntsOutDefault", N=None, name="z")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'z' op: 'NIntsOutDefault' attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = self._lib.apply_op("NIntsOutDefault", N=2, name="y")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'y' op: 'NIntsOutDefault' attr { key: 'N' value { i: 2 } }
""", out2.op.node_def)
def testNPolymorphicOut(self):
with ops.Graph().as_default():
out1, out2 = self._lib.apply_op("NPolymorphicOut",
N=2,
T=dtypes.int32,
name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOut", T=dtypes.string, N=3, name="o")
self.assertEqual(dtypes.string, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertEqual(dtypes.string, out3.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 3 } }
""", out3.op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NPolymorphicOut", N=1, T=dtypes.string)
self.assertEqual(str(cm.exception),
"Attr 'N' of 'NPolymorphicOut' Op "
"passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicOut", N=3, T=[dtypes.string])
self.assertEqual(
str(cm.exception),
"Expected DataType for argument 'T' not [tf.string].")
def testNPolymorphicOutDefault(self):
with ops.Graph().as_default():
out1, out2 = self._lib.apply_op(
"NPolymorphicOutDefault", N=None, T=None, name="r")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOutDefault", N=3, T=None, name="s")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 's' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = self._lib.apply_op(
"NPolymorphicOutDefault", N=None, T=dtypes.int32, name="t")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 't' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOutDefault", N=3, T=dtypes.int32, name="u")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
def testNPolymorphicRestrictOut(self):
with ops.Graph().as_default():
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicRestrictOut", N=3, T=dtypes.bool, name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicRestrictOut'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicRestrictOut", N=2, T=dtypes.int32)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'T' has DataType int32 "
"not in list of allowed values: string, bool")
def testRef(self):
with ops.Graph().as_default():
out = self._lib.apply_op("RefOut", T=dtypes.bool, name="o")
self.assertEqual(dtypes.bool_ref, out.dtype)
self.assertProtoEquals("""
name: 'o' op: 'RefOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
op = self._lib.apply_op("RefIn", a=out, name="i")
self.assertProtoEquals("""
name: 'i' op: 'RefIn' input: 'o'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: "_class" value { list { s: "loc:@o" } } }
""", op.node_def)
# Can pass ref to non-ref input.
out = self._lib.apply_op("RefOut", T=dtypes.int32, name="r")
out = self._lib.apply_op("Simple", a=out, name="s")
self.assertProtoEquals("""
name: 's' op: 'Simple' input: 'r'
""", out.op.node_def)
# Can't pass non-ref to ref input.
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("RefIn", a=2)
self.assertEqual(
str(cm.exception),
"'RefIn' Op requires that input 'a' be a mutable tensor " +
"(e.g.: a tf.Variable)")
input_a = self._lib.apply_op("RefOut", T=dtypes.int32, name="t")
input_b = self._lib.apply_op("RefOut", T=dtypes.int32, name="u")
op = self._lib.apply_op("TwoRefsIn", a=input_a, b=input_b, name="v")
# NOTE(mrry): The order of colocation constraints is an implementation
# detail.
self.assertProtoEquals("""
name: 'v' op: 'TwoRefsIn' input: 't' input: 'u'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: "_class" value { list { s: "loc:@t" s: "loc:@u" } } }
""", op.node_def)
def testSpecifyDevice(self):
graph = ops.Graph()
with graph.as_default():
with graph.device("/job:ADevice"):
self._lib.apply_op("Simple", a=3)
# We look at the whole graph here to make sure the Const op is also given
# the specified device.
graph_def = graph.as_graph_def()
self.assertEqual(len(graph_def.node), 2)
for node in graph_def.node:
self.assertDeviceEqual(node.device, "/job:ADevice")
def testStructuredOutputSingleList(self):
with ops.Graph().as_default():
for n_a in [0, 1, 3]:
a = self._lib.apply_op("SimpleStruct", n_a=n_a)
self.assertTrue(isinstance(a, list))
self.assertEqual(n_a, len(a))
def testStructuredOutputListAndSingle(self):
with ops.Graph().as_default():
for n_a in [0, 1, 3]:
a, b = self._lib.apply_op("MixedStruct", n_a=n_a)
self.assertTrue(isinstance(a, list))
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertTrue(isinstance(b, ops.Tensor))
self.assertEqual(dtypes.float32, b.dtype)
def testStructuredOutputMultipleLists(self):
with ops.Graph().as_default():
for n_a in [0, 1, 3]:
for n_b in [0, 1, 3]:
for t_c in [[],
[dtypes.int32],
[dtypes.int32, dtypes.float32]]:
a, b, c = self._lib.apply_op("ComplexStruct",
n_a=n_a,
n_b=n_b,
t_c=t_c)
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertEqual(n_b, len(b))
self.assertTrue(all(x.dtype == dtypes.int64 for x in b))
self.assertEqual(t_c, [x.dtype for x in c])
class OpDefLibraryGraphTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = test_ops._op_def_lib
def _add_op(self, ascii): # pylint: disable=redefined-builtin
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
def testNoGraph(self):
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(out.graph, ops.get_default_graph())
def testDefaultGraph(self):
graph = ops.Graph()
with graph.as_default():
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(out.graph, graph)
def testDifferentGraphFails(self):
with ops.Graph().as_default():
a = self._lib.apply_op("Simple", a=3)
with ops.Graph().as_default():
b = self._lib.apply_op("Simple", a=4)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("Binary", a=a, b=b)
self.assertTrue("must be from the same graph" in str(cm.exception))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/op_def_library_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import device
from tensorflow.python.framework import device_spec
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
TEST_V1_AND_V2 = (("v1", device_spec.DeviceSpecV1),
("v2", device_spec.DeviceSpecV2))
class DeviceTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters(*TEST_V1_AND_V2)
def testMerge(self, DeviceSpec): # pylint: disable=invalid-name
d = DeviceSpec.from_string("/job:muu/task:1/device:MyFunnyDevice:2")
self.assertEqual("/job:muu/task:1/device:MyFunnyDevice:2", d.to_string())
if not context.executing_eagerly():
with ops.device(device.merge_device("/device:GPU:0")):
var1 = variables.Variable(1.0)
self.assertEqual("/device:GPU:0", var1.device)
with ops.device(device.merge_device("/job:worker")):
var2 = variables.Variable(1.0)
self.assertEqual("/job:worker/device:GPU:0", var2.device)
with ops.device(device.merge_device("/device:CPU:0")):
var3 = variables.Variable(1.0)
self.assertEqual("/job:worker/device:CPU:0", var3.device)
with ops.device(device.merge_device("/job:ps")):
var4 = variables.Variable(1.0)
self.assertEqual("/job:ps/device:CPU:0", var4.device)
def testCanonicalName(self):
self.assertEqual("/job:foo/replica:0",
device.canonical_name("/job:foo/replica:0"))
self.assertEqual("/job:foo/replica:0",
device.canonical_name("/replica:0/job:foo"))
self.assertEqual("/job:foo/replica:0/task:0",
device.canonical_name("/job:foo/replica:0/task:0"))
self.assertEqual("/job:foo/replica:0/task:0",
device.canonical_name("/job:foo/task:0/replica:0"))
self.assertEqual("/device:CPU:0",
device.canonical_name("/device:CPU:0"))
self.assertEqual("/device:GPU:2",
device.canonical_name("/device:GPU:2"))
self.assertEqual("/job:foo/replica:0/task:0/device:GPU:0",
device.canonical_name(
"/job:foo/replica:0/task:0/device:GPU:0"))
self.assertEqual("/job:foo/replica:0/task:0/device:GPU:0",
device.canonical_name(
"/device:GPU:0/task:0/replica:0/job:foo"))
def testCheckValid(self):
device.check_valid("/job:foo/replica:0")
with self.assertRaisesRegexp(ValueError, "invalid literal for int"):
device.check_valid("/job:j/replica:foo")
with self.assertRaisesRegexp(ValueError, "invalid literal for int"):
device.check_valid("/job:j/task:bar")
with self.assertRaisesRegexp(ValueError, "Unknown attribute: 'bar'"):
device.check_valid("/bar:muu/baz:2")
with self.assertRaisesRegexp(ValueError, "Cannot specify multiple device"):
device.check_valid("/cpu:0/device:GPU:2")
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/device_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that old style division works for Dimension."""
from __future__ import absolute_import
# from __future__ import division # Intentionally skip this import
from __future__ import print_function
import six
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class DimensionDivTest(test_util.TensorFlowTestCase):
def testDivSucceeds(self):
"""Without from __future__ import division, __div__ should work."""
if six.PY2: # Old division exists only in Python 2
values = [tensor_shape.Dimension(x) for x in (3, 7, 11, None)]
for x in values:
for y in values:
self.assertEqual((x / y).value, (x // y).value)
def testRDivFail(self):
# Note: This test is related to GitHub issue 25790.
"""Without from __future__ import division, __rdiv__ is used."""
if six.PY2: # Old division exists only in Python 2
two = tensor_shape.Dimension(2)
message = (r"unsupported operand type\(s\) for /: "
r"'int' and 'Dimension', please use // instead")
with self.assertRaisesRegexp(TypeError, message):
_ = 6 / two
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/tensor_shape_div_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import os
import threading
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.gradients # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
ops._set_call_cpp_shape_fn(common_shapes.call_cpp_shape_fn)
class ResourceTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBuildGraph(self):
with self.cached_session():
pt = test_ops.stub_resource_handle_op(container="a", shared_name="b")
test_ops.resource_create_op(pt).run()
@test_util.run_deprecated_v1
def testInitialize(self):
with self.cached_session():
handle = test_ops.stub_resource_handle_op(container="a", shared_name="b")
resources.register_resource(
handle=handle,
create_op=test_ops.resource_create_op(handle),
is_initialized_op=test_ops.resource_initialized_op(handle))
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 1)
resources.initialize_resources(resources.shared_resources()).run()
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 0)
class TensorAndShapeTest(test_util.TensorFlowTestCase):
def testShape(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertEqual(tensor_shape.unknown_shape(), t.get_shape())
t.set_shape([1, 2, 3])
self.assertEqual([1, 2, 3], t.get_shape())
def testIterable(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertTrue(isinstance(t, ops.Tensor))
with self.assertRaisesRegexp(TypeError, "iter"):
for _ in t:
pass
def testAddShape(self):
with self.cached_session():
a = array_ops.zeros([2, 3])
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual([2, 3], c.shape)
@test_util.run_deprecated_v1
def testUnknownDim(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
b = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
c = a + b
self.assertEqual([2, None, 3], c.shape.as_list())
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=None)
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual(tensor_shape.unknown_shape(), c.shape)
@test_util.run_deprecated_v1
def testScalarShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[])
b = array_ops.ones([])
c = a + b
self.assertEqual(tensor_shape.scalar(), c.shape)
@test_util.run_deprecated_v1
def testShapeFunctionError(self):
with self.cached_session():
a = array_ops.ones([1, 2, 3])
b = array_ops.ones([4, 5, 6])
with self.assertRaisesRegexp(
ValueError, r"Dimensions must be equal, but are 2 and 5 for 'add' "
r"\(op: 'Add(V2)?'\) with input shapes: \[1,2,3\], \[4,5,6\]."):
_ = a + b
class IndexedSlicesTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testToTensor(self):
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
dense_shape = constant_op.constant([3, 2])
x = ops.IndexedSlices(values, indices, dense_shape)
tensor = ops.convert_to_tensor(x, name="tensor")
self.assertAllEqual(self.evaluate(tensor), [[2, 3], [0, 0], [5, 7]])
@test_util.run_gpu_only
def testEagerCopy(self):
with context.eager_mode():
var = variables.Variable([[0.0], [0.0], [0.0], [0.0]], name="tensor")
with backprop.GradientTape() as tape:
a = array_ops.gather(array_ops.gather(var, [0, 1]), [0, 1])
b = array_ops.gather(array_ops.gather(var, [2, 3]), [0, 1])
r = special_math_ops.einsum("ij,ij->i", a, b)
g = tape.gradient(r, [var])[0]
values = g.values if isinstance(g, ops.IndexedSlices) else g
self.assertAllEqual(values.get_shape(), [4, 1])
@test_util.run_deprecated_v1
def testNegation(self):
with self.cached_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = -ops.IndexedSlices(values, indices)
self.assertAllEqual(x.values.eval(), [[-2, -3], [-5, -7]])
self.assertAllEqual(x.indices.eval(), [0, 2])
@test_util.run_deprecated_v1
def testScalarMul(self):
with self.cached_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = math_ops.scalar_mul(-2, ops.IndexedSlices(values, indices))
self.assertAllEqual(x.values.eval(), [[-4, -6], [-10, -14]])
self.assertAllEqual(x.indices.eval(), [0, 2])
class NodeDefConstructorTest(test_util.TensorFlowTestCase):
def testNoArgs(self):
nodedef = ops._NodeDef("None", "bar")
self.assertProtoEquals("op: 'None' name: 'bar'", nodedef)
def testArgs(self):
nodedef = ops._NodeDef("foo", "bar", device="/device:baz:*")
self.assertProtoEquals("op:'foo' name:'bar' device:'/device:baz:*'",
nodedef)
nodedef = ops._NodeDef("foo", "bar", device=pydev.DeviceSpec(job="j"))
self.assertProtoEquals("op:'foo' name:'bar' device:'/job:j'", nodedef)
def _apply_op(g, *args, **kwargs):
op = g.create_op(*args, **kwargs)
if len(op.outputs) == 1:
return op.outputs[0]
else:
return op.outputs
class OperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoInputs(self):
op = test_ops.float_output_string_output(name="myop").a.op
self.assertEqual(2, len(op.values()))
self.assertEqual(0, len(op.inputs))
self.assertEqual("myop", op.name)
float_t, label_str_t = op.values()
self.assertEqual(dtypes.float32, float_t.dtype)
self.assertEqual(op, float_t.op)
self.assertEqual(0, float_t._value_index)
self.assertEqual(0, len(float_t.consumers()))
self.assertEqual("myop", float_t._as_node_def_input())
self.assertEqual(dtypes.string, label_str_t.dtype)
self.assertEqual(op, label_str_t.op)
self.assertEqual(1, label_str_t._value_index)
self.assertEqual(0, len(label_str_t.consumers()))
self.assertEqual("myop:1", label_str_t._as_node_def_input())
self.assertProtoEquals("op:'FloatOutputStringOutput' name:'myop'",
op.node_def)
@test_util.run_deprecated_v1
def testNoOutputs(self):
op1 = test_ops.float_output(name="myop1").op
float_t, = op1.values()
op2 = test_ops.float_input(float_t, name="myop2")
self.assertEqual(0, len(op2.values()))
self.assertEqual(1, len(op2.inputs))
self.assertIs(float_t, op2.inputs[0])
self.assertEqual(1, len(float_t.consumers()))
self.assertEqual(op2, float_t.consumers()[0])
self.assertProtoEquals("op:'FloatOutput' name:'myop1'", op1.node_def)
self.assertProtoEquals("op:'FloatInput' name:'myop2' input:'myop1'",
op2.node_def)
@test_util.run_deprecated_v1
def testInputsAndOutputs(self):
op1 = test_ops.float_output(name="myop1").op
self.assertEqual(1, len(op1.values()))
float1_t, = op1.values()
op2 = test_ops.float_output_string_output(name="myop2").a.op
self.assertEqual(2, len(op2.values()))
float2_t, label2_str_t = op2.values()
# Note that we consume label2_str_t twice here.
op3 = test_ops.foo2(float1_t, label2_str_t, label2_str_t, name="myop3").d.op
self.assertEqual(2, len(op3.values()))
self.assertEqual(1, len(float1_t.consumers()))
self.assertEqual(op3, float1_t.consumers()[0])
self.assertEqual(0, len(float2_t.consumers()))
self.assertEqual(2, len(label2_str_t.consumers()))
self.assertEqual(op3, label2_str_t.consumers()[0])
self.assertEqual(op3, label2_str_t.consumers()[1])
self.assertProtoEquals("""
op:'Foo2' name:'myop3'
input:'myop1' input:'myop2:1' input:'myop2:1'
""", op3.node_def)
def testDeviceFromNodeDef(self):
op = ops.Operation(
ops._NodeDef("None", "myop", device="/job:goo/device:GPU:0"),
ops.Graph(), [], [])
self.assertEqual("/job:goo/device:GPU:0", op.device)
def testDeviceObject(self):
op = ops.Operation(ops._NodeDef("None", "myop"), ops.Graph(), [], [])
op._set_device("/job:goo/device:GPU:0")
self.assertProtoEquals(
"op:'None' name:'myop' device:'/job:goo/device:GPU:0' ", op.node_def)
op = ops.Operation(ops._NodeDef("None", "op2"), ops.Graph(), [], [])
op._set_device(
pydev.DeviceSpec(
job="muu", device_type="CPU", device_index=0))
self.assertProtoEquals(
"op:'None' name:'op2' device:'/job:muu/device:CPU:0'", op.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("RefOutputFloatOutput", "op1"), g, [],
[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
self.assertEquals([], list(op1.inputs))
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = ops.Operation(
ops._NodeDef("RefInputFloatInput", "op2"),
g, [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
self.assertEquals([ref_t, nonref_t], list(op2.inputs))
op3 = ops.Operation(
ops._NodeDef("TwoFloatInputs", "op3"), g, [ref_t, nonref_t], [])
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testInvalidNames(self):
g = ops.Graph()
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", ""), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "_invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "-invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "/invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "invalid:0"), g)
@test_util.run_deprecated_v1
def testNoShapeFunction(self):
op = test_ops.a()
self.assertEqual(tensor_shape.unknown_shape(), op.get_shape())
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedArray(self):
values = [[2], [3], [5], [7]]
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
def testShapeTuple(self):
with self.cached_session():
c = constant_op.constant(1)
self.assertEqual(c._shape_tuple(), ()) # pylint: disable=protected-access
def testConvertToTensorEager(self):
with context.eager_mode():
t = constant_op.constant(1)
self.assertTrue(isinstance(t, ops.EagerTensor))
converted = ops.convert_to_tensor(t)
self.assertTrue(isinstance(converted, ops.EagerTensor))
converted = ops.convert_to_tensor(1)
self.assertTrue(isinstance(converted, ops.EagerTensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTuple(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(ops.convert_to_tensor(values)))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTensors(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(
[constant_op.constant(row) for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
tensor = ops.convert_to_tensor(
[[constant_op.constant(v) for v in row] for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedMix(self):
values = ([2], (3,), [constant_op.constant(5)], constant_op.constant([7]))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(((2,), (3,), (5,), (7,)), self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorPreferred(self):
values = [2, 3, 5, 7]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.float32)
self.assertEqual(dtypes.float32, tensor.dtype)
# Convert empty tensor to anything.
values = []
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.int64, tensor.dtype)
# The preferred dtype is a type error and will convert to
# float32 instead.
values = [1.23]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.float32, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToInvalidTensorType(self):
with self.assertRaises(TypeError):
# Forcing an invalid dtype should fail with a type error.
values = [1.23]
ops.convert_to_tensor(values, dtype=dtypes.int64)
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorFromInvalidTensor(self):
tensor = constant_op.constant(42.0, dtype=dtypes.float32)
with self.assertRaises(ValueError):
ops.convert_to_tensor(tensor, dtype=dtypes.int32)
@test_util.run_deprecated_v1
def testNoConvert(self):
# Operation cannot be converted to Tensor.
op = control_flow_ops.no_op()
with self.assertRaisesRegexp(TypeError,
r"Can't convert Operation '.*' to Tensor"):
ops.convert_to_tensor(op)
def testStr(self):
node_def = ops._NodeDef("None", "op1")
op = ops.Operation(node_def, ops.Graph(), [], [dtypes.float32])
self.assertEqual(str(node_def), str(op))
def testRepr(self):
op = ops.Operation(
ops._NodeDef("None", "op1"), ops.Graph(), [], [dtypes.float32])
self.assertEqual("<tf.Operation 'op1' type=None>", repr(op))
@test_util.run_deprecated_v1
def testGetAttr(self):
op = test_ops.default_attrs()
self.assertEqual(op.get_attr("string_val"), b"abc")
self.assertEqual(op.get_attr("string_list_val"), [b"abc", b""])
self.assertEqual(op.get_attr("int_val"), 123)
self.assertEqual(op.get_attr("int_list_val"), [1, 2, 3])
self.assertEqual(op.get_attr("float_val"), 10.0)
self.assertEqual(op.get_attr("float_list_val"), [10.0])
self.assertEqual(op.get_attr("bool_val"), True)
self.assertEqual(op.get_attr("bool_list_val"), [True, False])
self.assertEqual(op.get_attr("shape_val"),
tensor_shape.as_shape([2, 1]).as_proto())
self.assertEqual(op.get_attr("shape_list_val"),
[tensor_shape.as_shape([]).as_proto(),
tensor_shape.as_shape([1]).as_proto()])
self.assertEqual(op.get_attr("tensor_val"),
tensor_util.make_tensor_proto(1, dtypes.int32))
self.assertEqual(op.get_attr("tensor_list_val"),
[tensor_util.make_tensor_proto(1, dtypes.int32)])
type_val = op.get_attr("type_val")
# First check that type_val is a DType, because the assertEquals will work
# no matter what since DType overrides __eq__
self.assertIsInstance(type_val, dtypes.DType)
self.assertEqual(type_val, dtypes.int32)
type_list_val = op.get_attr("type_list_val")
self.assertTrue(all(isinstance(x, dtypes.DType) for x in type_list_val))
self.assertEqual(type_list_val, [dtypes.int32, dtypes.float32])
@function.Defun(dtypes.float32, func_name="MyFunc")
def func(x):
return x
op = test_ops.func_attr(func)
self.assertEqual(op.get_attr("f"),
attr_value_pb2.NameAttrList(name="MyFunc"))
# Try fetching missing attr
with self.assertRaisesRegexp(
ValueError, "Operation 'FuncAttr' has no attr named 'FakeAttr'."):
op.get_attr("FakeAttr")
# TODO(b/65162920): remove this test when users who are directly mutating the
# node_def have been updated to proper usage.
@test_util.run_deprecated_v1
def testSetAttr(self):
op = test_ops.int_attr().op
op._set_attr("foo", attr_value_pb2.AttrValue(i=2))
# TODO(skyewm): add node_def check
self.assertEqual(op.get_attr("foo"), 2)
# TODO(nolivia): test all error cases
def testAddControlInput(self):
with ops.Graph().as_default():
x = constant_op.constant(1).op
y = constant_op.constant(2).op
z = constant_op.constant(3).op
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_inputs([x, y, y]) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x, y])
self.assertEqual(x._control_outputs, [z])
@test_util.run_deprecated_v1
def testRemoveAllControlInputs(self):
a = constant_op.constant(1)
with ops.control_dependencies([a]):
b = constant_op.constant(2)
c = constant_op.constant(3)
d = constant_op.constant(4)
e = constant_op.constant(5)
with ops.control_dependencies([a, c]):
f = d + e
self.assertEqual(a.op.control_inputs, [])
self.assertEqual(b.op.control_inputs, [a.op])
self.assertEqual(f.op.control_inputs, [a.op, c.op])
a.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(a.op.control_inputs, [])
b.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(b.op.control_inputs, [])
f.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(f.op.control_inputs, [])
self.assertEqual(list(f.op.inputs), [d, e])
@test_util.run_deprecated_v1
def testControlInputCycle(self):
graph = ops.Graph()
with graph.as_default():
z = constant_op.constant(0)
x = constant_op.constant(1)
y = constant_op.constant(2)
y.op._add_control_input(z.op) # pylint: disable=protected-access
y.op._add_control_input(x.op) # pylint: disable=protected-access
x.op._add_control_input(y.op) # pylint: disable=protected-access
with self.session(graph=graph) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Graph is invalid, contains a cycle with 2 nodes"):
self.evaluate(x)
def testUpdateInput(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = x + y
z.op._update_input(0, y) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [y, y])
self.assertEquals(x.consumers(), [])
self.assertEquals(y.consumers(), [z.op, z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 4)
z.op._update_input(0, x) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [x, y])
self.assertEquals(x.consumers(), [z.op])
self.assertEquals(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 3)
z.op._update_input(1, y) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [x, y])
self.assertEquals(x.consumers(), [z.op])
self.assertEquals(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 3)
def testUpdateInputGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
z = y * 2
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
z.op._update_input(0, x) # pylint: disable=protected-access
def testUpdateInputTypeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(0)
x = constant_op.constant("")
y = constant_op.constant(1)
z = y + w
z.op._update_input(0, x) # pylint: disable=protected-access
with session.Session(graph=g) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Input 0 of node add was passed string from Const_1:0 incompatible "
"with expected int32"):
self.evaluate(z)
def testUpdateInputShapeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(2, shape=[3, 1])
x = constant_op.constant(0, shape=[3, 1])
y = constant_op.constant(1, shape=[2, 2])
z = w + x
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Cannot update edge, incompatible shapes: \[2,2\] and \[3,1\]"):
z.op._update_input(0, y) # pylint: disable=protected-access
def testUpdateInputOutOfRange(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
with self.assertRaisesRegexp(
errors.OutOfRangeError,
r"Cannot update edge. Input index \[1\] is greater than the number of "
r"total inputs \[0\]."
):
x.op._update_input(1, x) # pylint: disable=protected-access
@test_util.enable_control_flow_v2
@test_util.run_v1_only("b/120545219")
def testAddWhileInput(self):
@eager_function.defun
def test():
output = control_flow_ops.while_loop(lambda x: x < 3, lambda x: x + 1,
[1])
while_op = output.op.inputs[0].op
self.assertEqual(while_op.type, "While")
orig_num_inputs = len(while_op.inputs)
# Make sure we can handle the while op having a control input.
while_op._add_control_input(constant_op.constant(0).op)
new_input1 = constant_op.constant(1.0)
new_input2 = constant_op.constant(True)
# Clear output shapes to bypass shape checking.
while_op._set_shape_list_attr("output_shapes", [])
while_op._set_type_list_attr("T",
[t.dtype for t in while_op.inputs] +
[new_input1.dtype, new_input2.dtype])
while_op._add_while_inputs([new_input1, new_input2])
# Can't add an edge beyond what's specified by "T"
with self.assertRaises(errors.OutOfRangeError):
while_op._add_while_inputs([new_input2])
self.assertEqual(len(while_op.inputs), orig_num_inputs + 2) # pylint: disable=g-deprecated-assert
test()
@test_util.run_deprecated_v1
def testOpDef(self):
x = constant_op.constant(0)
y = constant_op.constant(1)
z = x + y
self.assertEqual(x.op.op_def.name, "Const")
self.assertEqual(len(x.op.op_def.input_arg), 0)
self.assertEqual(len(x.op.op_def.output_arg), 1)
self.assertRegexpMatches(z.op.op_def.name, "Add(V2)?")
self.assertEqual(len(z.op.op_def.input_arg), 2)
self.assertEqual(len(z.op.op_def.output_arg), 1)
def testInputFromDifferentGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
y * x # pylint: disable=pointless-statement
def testInputsAreImmutable(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
op = test_ops.int_input_int_output(x, name="myop").op
with self.assertRaisesRegexp(
AttributeError, "'_InputList' object has no attribute 'append'"):
op.inputs.append(None)
class CreateOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
op1 = g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
with g.device("/device:GPU:0"):
op2 = g.create_op(
"FloatOutputStringOutput", [], [dtypes.float32, dtypes.string], None,
name="myop2")
op3 = g.create_op(
"Foo3",
[list(op1.values())[0], list(op2.values())[1], list(op2.values())[0]],
[dtypes.float32, dtypes.int32],
None,
name="myop3")
self.assertDeviceEqual(None, op1.device)
self.assertDeviceEqual("/device:GPU:0", op2.device)
self.assertDeviceEqual(None, op3.device)
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", op1.node_def)
self.assertProtoEquals(
"name:'myop2' op:'FloatOutputStringOutput' device:'/device:GPU:0'",
op2.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo3'",
op3.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = g.create_op(
"RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = g.create_op(
"RefInputFloatInput", [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = g.create_op("TwoFloatInputs", [ref_t, nonref_t], [], name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testFinalized(self):
g = ops.Graph()
g.finalize()
with self.assertRaises(RuntimeError):
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# Test unfinalize.
g._unsafe_unfinalize()
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# NOTE(skyewm): these cases test the private Graph._create_op_from_tf_operation
# method. Arguably we should only test the public APIs that depend on this
# method. However, this logic is complex and tricky, and it can be difficult to
# ascertain if we have adequate coverage (e.g. a graph may run successfully if
# the control flow context isn't set properly, but a more complicated use case
# that might not be obvious to test will fail). Thus we instead explicitly test
# the low-level behavior.
class CreateOpFromTFOperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c_op = ops._create_c_op(
g, ops._NodeDef("IntInputIntOutput", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "IntInputIntOutput")
self.assertEqual(len(op.outputs), 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.unknown_shape())
self.assertEqual(list(op.inputs), [x])
self.assertEqual(op.control_inputs, [])
self.assertEqual(op.graph, g)
self.assertEqual(x.consumers(), [op])
self.assertIsNotNone(op.traceback)
self.assertEqual(g.get_operation_by_name("myop"), op)
self.assertEqual(g.get_tensor_by_name("myop:0"), op.outputs[0])
def testShape(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
c_op = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "Identity")
self.assertEqual(len(op.outputs), 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.matrix(2, 3))
def testUniqueName(self):
g = ops.Graph()
with g.as_default():
c_op = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop"), [], [])
c_op2 = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop_1"), [], [])
op = g._create_op_from_tf_operation(c_op)
op2 = g._create_op_from_tf_operation(c_op2)
# Create ops with same names as op1 and op2. We expect the new names to be
# uniquified.
op3 = test_ops.int_output(name="myop").op
op4 = test_ops.int_output(name="myop_1").op
self.assertEqual(op.name, "myop")
self.assertEqual(op2.name, "myop_1")
self.assertEqual(op3.name, "myop_2")
self.assertEqual(op4.name, "myop_1_1")
@test_util.run_v1_only("b/120545219")
def testCond(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def true_fn():
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "cond/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return x
control_flow_ops.cond(x < 10, true_fn, lambda: x)
op = g.get_operation_by_name("cond/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "cond/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Switch")
self.assertEqual(op_input.inputs[0], x)
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"cond/cond_text")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "myloop/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Enter")
self.assertEqual(list(op_input.inputs), [x])
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"myloop/while_context")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithInternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
c = constant_op.constant(1.0, name="c")
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
c = g.get_operation_by_name("myloop/c")
self.assertIsNotNone(c)
# Internal control dep is preserved
self.assertEqual(op.control_inputs, [c])
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithExternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c = constant_op.constant(1.0)
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
# External control dep is removed and replaced with internal control dep
self.assertNotEqual(op.control_inputs[0], c.op)
self.assertIsNotNone(op.control_inputs[0]._get_control_flow_context())
class ApplyOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
with g.device("/device:GPU:0"):
t2 = _apply_op(
g, "TwoIntOutputs", [], [dtypes.int32, dtypes.int32], name="myop2")
t3 = _apply_op(
g,
"Foo1", [t1, t2[1], t2[0]], [dtypes.float32, dtypes.int32],
name="myop3")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, list))
self.assertTrue(isinstance(t3, list))
self.assertTrue(isinstance(t3[0], ops.Tensor))
self.assertEqual("myop1", t1._as_node_def_input())
self.assertEqual("myop2", t2[0]._as_node_def_input())
self.assertEqual("myop2:1", t2[1]._as_node_def_input())
self.assertEqual("myop3", t3[0]._as_node_def_input())
# Validate that we got the right ops as well
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", t1.op.node_def)
self.assertProtoEquals(
"name:'myop2' op:'TwoIntOutputs' device:'/device:GPU:0'",
t2[0].op.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo1'",
t3[0].op.node_def)
def testReferenceInput(self):
g = ops.Graph()
ref_t, nonref_t = _apply_op(
g, "RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'",
ref_t.op.node_def)
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
out_2 = _apply_op(
g,
"RefInputFloatInputIntOutput", [ref_t, nonref_t], [dtypes.int32],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInputIntOutput' name:'op2' input:'op1' input:'op1:1'",
out_2.op.node_def)
out_3 = _apply_op(
g, "TwoFloatInputsIntOutput", [ref_t, nonref_t], [dtypes.int32],
name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputsIntOutput' name:'op3' input:'op1' input:'op1:1'",
out_3.op.node_def)
class NameStackTest(test_util.TensorFlowTestCase):
def testBasics(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_2", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_2", g.unique_name("foo"))
self.assertEqual("foo_1_1", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_1", g.unique_name("foo_1"))
self.assertEqual("foo_1_2", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_2", g.unique_name("foo_1"))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2", mark_as_used=False))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo", g.unique_name("foo"))
self.assertEqual("bar/foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo_1", g.unique_name("foo"))
with g.name_scope(None):
self.assertEqual("foo_3", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_3", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo_1", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz_1/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz_1/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo_1", g.unique_name("foo"))
with g.name_scope("quux"):
self.assertEqual("quux/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("quux/foo", g.unique_name("foo"))
with g.name_scope("bar"):
with g.name_scope("baz"):
self.assertEqual(
"bar_1/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar_1/baz/foo", g.unique_name("foo"))
self.assertEqual("foo_4", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_4", g.unique_name("foo"))
self.assertEqual("bar_2", g.unique_name("bar", mark_as_used=False))
self.assertEqual("bar_2", g.unique_name("bar"))
@test_util.run_deprecated_v1
def testNameAndVariableScope(self):
with self.cached_session() as sess:
with sess.graph.name_scope("l0"):
with variable_scope.variable_scope("l1"):
with sess.graph.name_scope("l1") as scope:
self.assertEqual("l0/l1/l1/", scope)
self.assertEqual(
"l0/l1/l1/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l1/foo", sess.graph.unique_name("foo"))
with sess.graph.name_scope("l2") as scope:
self.assertEqual("l0/l1/l2/", scope)
self.assertEqual(
"l0/l1/l2/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l2/foo", sess.graph.unique_name("foo"))
def testOutOfOrderUniqueName(self):
g = ops.Graph()
self.assertEqual("foo_2", g.unique_name("foo_2"))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_3", g.unique_name("foo"))
def testUniqueNameCaseInsensitivity(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("Foo_1", g.unique_name("Foo"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo"))
with g.name_scope("Bar"):
self.assertEqual("Bar_1/foo", g.unique_name("foo"))
def testInvalidNameRaisesError(self):
g = ops.Graph()
with g.name_scope(""): # Should not raise
pass
with g.name_scope("foo/"): # Should not raise
with g.name_scope("_bar"): # Should not raise
pass
with self.assertRaises(ValueError):
with g.name_scope("foo:0"):
pass
with self.assertRaises(ValueError):
with g.name_scope("_bar"):
pass
class NameTest(test_util.TensorFlowTestCase):
def testGenerateName(self):
g = ops.Graph()
op0 = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
self.assertEqual("TwoFloatOutputs", op0.name)
self.assertEqual("TwoFloatOutputs:0", op0.outputs[0].name)
self.assertEqual("TwoFloatOutputs:1", op0.outputs[1].name)
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput", op1.name)
self.assertEqual("FloatOutput:0", op1.outputs[0].name)
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput_1", op2.name)
self.assertEqual("FloatOutput_1:0", op2.outputs[0].name)
op3 = g.create_op("FloatOutput", [], [dtypes.float32], name="my_op")
self.assertEqual("my_op", op3.name)
self.assertEqual("my_op:0", op3.outputs[0].name)
def testNameScope(self):
g = ops.Graph()
with g.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with g.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with g.name_scope(None) as empty1:
self.assertEqual("", empty1)
with g.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with g.name_scope("") as empty2:
self.assertEqual("", empty2)
self.assertEqual("FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar") as scope:
self.assertEqual("bar/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual("bar/FloatOutput_1",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from "with .. as", that values is used as-is.
self.assertEqual(
"bar", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
with g.name_scope("baz") as scope:
with g.name_scope("quux"):
self.assertEqual("baz/quux/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from the enclosing "with .. as", nothing is pushed.
with g.name_scope(scope):
self.assertEqual("baz/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual(
"baz", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
self.assertEqual(
"trailing",
g.create_op(
"FloatOutput", [], [dtypes.float32], name="trailing/").name)
with g.name_scope("bar"):
self.assertEqual("bar_1/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar/"):
self.assertEqual("bar/FloatOutput_2",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
class DeviceTest(test_util.TensorFlowTestCase):
def testNoDevice(self):
g = ops.Graph()
op = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertDeviceEqual(None, op.device)
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput" }
""", gd)
def testEagerBackingDevice(self):
with context.eager_mode():
with ops.device("/device:CPU:0"):
t = constant_op.constant(1.0)
self.assertRegexpMatches(t.device, "/device:CPU:0")
self.assertRegexpMatches(t.backing_device, "/device:CPU:0")
def testDevicePartialString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testDeviceFull(self):
g = ops.Graph()
with g.device(
pydev.DeviceSpec(
job="worker", replica=2, task=0, device_type="CPU",
device_index=3)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/task:0/device:CPU:3" }
""", gd)
def testNesting(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingOverrideGpuCpu(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:2/device:GPU:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:2/device:GPU:2" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNestingWithMergeDeviceFunction(self):
g = ops.Graph()
with g.device(pydev.merge_device("/device:GPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:worker")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/device:CPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device(None)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStrings(self):
g = ops.Graph()
with g.device("/device:GPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(""):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStringWildcard(self):
g = ops.Graph()
with g.device("/device:GPU:7"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:GPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:5"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/device:CPU:*" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/device:CPU:5" }
""", gd)
def testNestingErrorGraph(self):
g = ops.Graph()
scope = g.device("/device:GPU:8")
scope.__enter__()
with g.device("/device:GPU:9"):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNestingErrorEager(self):
with context.eager_mode():
scope = ops.device("/device:CPU:0")
scope.__enter__()
with ops.device(None):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNoneClearsDefault(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNoneIgnoresOuterDeviceFunction(self):
g = ops.Graph()
with g.device(lambda op: "/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def _overwritingDeviceFunction(self, unused_op):
# This device function unconditionally overwrites the device of ops.
#
# NOTE(mrry): Writing device functions like this is not
# recommended. Instead, in most cases you should use
# `pydev.merge_device("/job:ps")` or simply `"/job:ps"` as the
# argument to `tf.device()` and the device component will be merged in.
return "/job:overwrite"
def testOverwritingBehavior(self):
g = ops.Graph()
with g.device(self._overwritingDeviceFunction):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps" }
""", gd)
class MultithreadedGraphStateTest(test_util.TensorFlowTestCase):
class TestThread(threading.Thread):
def __init__(self, graph, replica_id):
super(MultithreadedGraphStateTest.TestThread, self).__init__()
self._graph = graph
self._replica_id = replica_id
# This thread sets this event when it mutated the graph. The caller can
# wait for that.
self.has_mutated_graph = threading.Event()
# This thread waits for when it should continue. The caller can set this
# event.
self.should_continue = threading.Event()
def run(self):
# Mutate a graph's stack, then set `has_mutated_graph`, then wait for
# `should_continue`, then add an op to the graph affected by the graph's
# stack.
raise NotImplementedError("must be implemented in descendants")
def testDeviceFunctionStack(self):
class DeviceSettingThread(self.TestThread):
def run(self):
with g.device("/job:worker/replica:{}".format(self._replica_id)):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
# If `switch_to_thread` isn't called, then device placement of the ops
# below is not deterministic.
g.switch_to_thread_local()
threads = [DeviceSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testColocateWith(self):
class ColocatingThread(self.TestThread):
def __init__(self, graph, replica_id, op_to_colocate_with):
super(ColocatingThread, self).__init__(graph, replica_id)
self._op_to_colocate_with = op_to_colocate_with
def run(self):
with g.colocate_with(self._op_to_colocate_with):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
ops_to_colocate_with = []
for i in range(3):
with g.device("/job:worker/replica:{}".format(i)):
ops_to_colocate_with.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `device` and `attr` values for
# the ops below are not deterministic.
g.switch_to_thread_local()
threads = [
ColocatingThread(g, i, ops_to_colocate_with[i]) for i in range(3)
]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "ColocateWithMe_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "ColocateWithMe_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_0"}}}}
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_1"}}}}
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_2"}}}}
""", gd)
def testControlDependencies(self):
class DependingThread(self.TestThread):
def __init__(self, graph, replica_id, dependency_op):
super(DependingThread, self).__init__(graph, replica_id)
self._dependency_op = dependency_op
def run(self):
with g.control_dependencies([self._dependency_op]):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
dependency_ops = []
for i in range(3):
dependency_ops.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `input` values for the ops below
# are not deterministic.
g.switch_to_thread_local()
threads = [DependingThread(g, i, dependency_ops[i]) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput" }
node { name: "ColocateWithMe_1" op: "FloatOutput" }
node { name: "ColocateWithMe_2" op: "FloatOutput" }
node { name: "FloatOutput_0" op: "FloatOutput"
input: "^ColocateWithMe_0" }
node { name: "FloatOutput_1" op: "FloatOutput"
input: "^ColocateWithMe_1" }
node { name: "FloatOutput_2" op: "FloatOutput"
input: "^ColocateWithMe_2" }
""", gd)
def testNameStack(self):
class NameSettingThread(self.TestThread):
def run(self):
with g.name_scope("foo"):
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.result = (op1, op2)
g = ops.Graph()
threads = [NameSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
suffixes = ["", "_1", "_2"]
for t, s in zip(threads, suffixes):
self.assertEquals("foo" + s + "/FloatOutput", t.result[0].name)
self.assertEquals("foo" + s + "/FloatOutput_1", t.result[1].name)
class ObjectWithName(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class CollectionTest(test_util.TensorFlowTestCase):
def test_get_collections(self):
g = ops.Graph()
self.assertSequenceEqual(g.collections, [])
g.add_to_collection("key", 12)
g.add_to_collection("key", 15)
self.assertSequenceEqual(g.collections, ["key"])
g.add_to_collection("other", "foo")
self.assertSequenceEqual(sorted(g.collections), ["key", "other"])
self.assertSequenceEqual(
sorted(g.get_all_collection_keys()), ["key", "other"])
def test_add_to_collection(self):
g = ops.Graph()
g.add_to_collection("key", 12)
g.add_to_collection("other", "foo")
g.add_to_collection("key", 34)
# Note that only blank1 is returned.
g.add_to_collection("blah", 27)
blank1 = ObjectWithName("prefix/foo")
g.add_to_collection("blah", blank1)
blank2 = ObjectWithName("junk/foo")
g.add_to_collection("blah", blank2)
self.assertEqual([12, 34], g.get_collection("key"))
self.assertEqual([], g.get_collection("nothing"))
self.assertEqual([27, blank1, blank2], g.get_collection("blah"))
self.assertEqual([blank1], g.get_collection("blah", "prefix"))
self.assertEqual([blank1], g.get_collection("blah", ".*x"))
# Make sure that get_collection() returns a first-level
# copy of the collection, while get_collection_ref() returns
# the original list.
other_collection_snapshot = g.get_collection("other")
other_collection_ref = g.get_collection_ref("other")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo"], other_collection_ref)
g.add_to_collection("other", "bar")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo", "bar"], other_collection_ref)
self.assertEqual(["foo", "bar"], g.get_collection("other"))
self.assertTrue(other_collection_ref is g.get_collection_ref("other"))
# Verify that getting an empty collection ref returns a modifiable list.
empty_coll_ref = g.get_collection_ref("empty")
self.assertEqual([], empty_coll_ref)
empty_coll = g.get_collection("empty")
self.assertEqual([], empty_coll)
self.assertFalse(empty_coll is empty_coll_ref)
empty_coll_ref2 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref2 is empty_coll_ref)
# Add to the collection.
empty_coll_ref.append("something")
self.assertEqual(["something"], empty_coll_ref)
self.assertEqual(["something"], empty_coll_ref2)
self.assertEqual([], empty_coll)
self.assertEqual(["something"], g.get_collection("empty"))
empty_coll_ref3 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref3 is empty_coll_ref)
def test_add_to_collections_uniquify(self):
g = ops.Graph()
g.add_to_collections([1, 2, 1], "key")
# Make sure "key" is not added twice
self.assertEqual(["key"], g.get_collection(1))
def test_add_to_collections_from_list(self):
g = ops.Graph()
g.add_to_collections(["abc", "123"], "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_tuple(self):
g = ops.Graph()
g.add_to_collections(("abc", "123"), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_generator(self):
g = ops.Graph()
def generator():
yield "abc"
yield "123"
g.add_to_collections(generator(), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_set(self):
g = ops.Graph()
g.add_to_collections(set(["abc", "123"]), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_string(self):
g = ops.Graph()
g.add_to_collections("abc", "key")
self.assertEqual(["key"], g.get_collection("abc"))
def test_default_graph(self):
with ops.Graph().as_default():
ops.add_to_collection("key", 90)
ops.add_to_collection("key", 100)
# Collections are ordered.
self.assertEqual([90, 100], ops.get_collection("key"))
def test_defun(self):
with context.eager_mode():
@eager_function.defun
def defun():
ops.add_to_collection("int", 1)
ops.add_to_collection("tensor", constant_op.constant(2))
@eager_function.defun
def inner_defun():
self.assertEqual(ops.get_collection("int"), [1])
three = ops.get_collection("tensor")[0] + ops.get_collection("int")[0]
ops.add_to_collection("int", 2)
self.assertEqual(ops.get_collection("int"), [1, 2])
ops.add_to_collection("foo", "bar")
self.assertEqual(ops.get_collection("foo"), ["bar"])
return three
self.assertEqual(ops.get_collection("int"), [1])
three = inner_defun()
self.assertEqual(ops.get_collection("int"), [1])
self.assertEqual(ops.get_collection("foo"), [])
return three
three = defun()
self.assertEqual(three.numpy(), 3)
ops.NotDifferentiable("FloatOutput")
@ops.RegisterGradient("CopyOp")
def _CopyGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
@ops.RegisterGradient("copy_override")
def _CopyOverrideGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
class RegistrationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testRegisterGradients(self):
x = test_ops.float_output()
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyGrad, fn)
def testOverrideGradients(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "copy_override"}):
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyOverrideGrad, fn)
def testNonExistentOverride(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "unknown_override"}):
y = test_ops.copy_op(x)
with self.assertRaisesRegexp(LookupError, "unknown_override"):
ops.get_gradient_function(y.op)
class ComparisonTest(test_util.TensorFlowTestCase):
def testMembershipAllowed(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
t2 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop2")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, ops.Tensor))
self.assertTrue(t1 in [t1])
self.assertTrue(t1 not in [t2])
class ControlDependenciesTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
# Creating unregistered ops with _apply_op() doesn't work with the C API
# TODO(skyewm): address this more consistently. Possible solutions are
# to use registered ops in all tests, create a way to register ops in
# Python tests, or conditionally disable the op registration check in
# the C API.
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
with g.control_dependencies([a]):
c = constant_op.constant(1.0)
d = array_ops.identity(b)
e = array_ops.identity(c)
self.assertEqual(c.op.control_inputs, [a.op])
self.assertEqual(d.op.control_inputs, [a.op])
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
@test_util.run_in_graph_and_eager_modes
def testEager(self):
def future():
future.calls += 1
return constant_op.constant(2.0)
future.calls = 0
if context.executing_eagerly():
a = constant_op.constant(1.0)
b = future
with ops.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(future.calls, 1)
else:
g = ops.Graph()
with g.as_default():
a = constant_op.constant(1.0)
b = future()
with g.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(c.op.control_inputs, [a.op, b.op])
self.assertEqual(future.calls, 1)
def testBasicWithConversion(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
class ConvertibleObj(object):
def _as_graph_element(self):
return a
with g.control_dependencies([ConvertibleObj()]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
def testNested(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1, a_2, a_3, a_4]):
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
b_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_1.op, a_2.op, a_3.op, a_4.op],
b_1.op.control_inputs)
self.assertItemsEqual(b_1.op.control_inputs, b_2.op.control_inputs)
def testClear(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies(None):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies(None):
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testComplex(self):
g = ops.Graph()
# Usage pattern:
# * Nodes a_i are constants defined at the outermost scope, and are used
# as control inputs for the ith nested scope.
# * Nodes b_i are defined as Mul(a_3, a_4) at each scope.
# * Nodes c_i are defined as Mul(a_1, b_1) at each scope.
# * Nodes d_i are defined as Mul(b_i, c_i) at each scope.
# * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1.
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
b_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_1, c_1],
[dtypes.float32])
e_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_2]):
b_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_2, c_2],
[dtypes.float32])
e_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_1, e_1],
[dtypes.float32])
with g.control_dependencies([a_3]):
b_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_3, c_3],
[dtypes.float32])
e_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_2, e_2],
[dtypes.float32])
with g.control_dependencies([a_4]):
b_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_4, c_4],
[dtypes.float32])
e_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_3, e_3],
[dtypes.float32])
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_2.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_3.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_4.op.control_inputs)
self.assertItemsEqual([], c_1.op.control_inputs)
self.assertItemsEqual([a_2.op], c_2.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op], c_3.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op, a_4.op], c_4.op.control_inputs)
self.assertItemsEqual([], d_1.op.control_inputs)
self.assertItemsEqual([], d_2.op.control_inputs)
self.assertItemsEqual([], d_3.op.control_inputs)
self.assertItemsEqual([], d_4.op.control_inputs)
self.assertItemsEqual([a_1.op], e_1.op.control_inputs)
self.assertItemsEqual([a_2.op], e_2.op.control_inputs)
self.assertItemsEqual([a_3.op], e_3.op.control_inputs)
self.assertItemsEqual([a_4.op], e_4.op.control_inputs)
def testRepeatedDependency(self):
g = ops.Graph()
a = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
a_0, a_1 = a.outputs
with g.control_dependencies([a_0]):
b = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [a])
self.assertEqual(c.op.control_inputs, [a])
def testNoControlDependencyWithDataDependency(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a]):
b = _apply_op(g, "Identity", [a], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [])
class OpScopeTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testNames(self):
with ops.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with ops.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with ops.name_scope(None) as empty1:
self.assertEqual("", empty1)
with ops.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with ops.name_scope("") as empty2:
self.assertEqual("", empty2)
with ops.name_scope("foo/") as outer_foo:
self.assertEqual("foo/", outer_foo)
with ops.name_scope("") as empty3:
self.assertEqual("", empty3)
with ops.name_scope("foo4") as foo4:
self.assertEqual("foo/foo4/", foo4)
with ops.name_scope("foo5//") as foo5:
self.assertEqual("foo5//", foo5)
with ops.name_scope("foo6") as foo6:
self.assertEqual("foo5//foo6/", foo6)
with ops.name_scope("/") as foo7:
self.assertEqual("/", foo7)
with ops.name_scope("//") as foo8:
self.assertEqual("//", foo8)
with ops.name_scope("a//b/c") as foo9:
self.assertEqual("foo/a//b/c/", foo9)
with ops.name_scope("a//b/c") as foo10:
self.assertEqual("a//b/c/", foo10)
@test_util.run_in_graph_and_eager_modes
def testEagerDefaultScopeName(self):
with ops.name_scope(None, "default") as scope:
self.assertEqual(scope, "default/")
with ops.name_scope(None, "default2") as scope2:
self.assertEqual(scope2, "default/default2/")
@test_util.run_in_graph_and_eager_modes
def testNameScopeV2IsReEntrant(self):
foo = ops.name_scope_v2("foo")
bar = ops.name_scope_v2("bar")
with foo as scope_name:
self.assertEqual("foo/", scope_name)
with foo as scope_name:
self.assertEqual("foo/foo/", scope_name)
with bar as scope_name:
self.assertEqual("foo/bar/", scope_name)
with foo as scope_name:
self.assertEqual("foo/bar/foo/", scope_name)
with bar as scope_name:
self.assertEqual("bar/", scope_name)
@test_util.run_deprecated_v1
def testNoScopeName(self):
g0 = ops.Graph()
values = [
g0.create_op("A", [], [dtypes.float32]),
g0.create_op("B", [], [dtypes.float32])
]
with self.assertRaises(ValueError):
with ops.name_scope(None, values=values):
pass
with self.assertRaises(ValueError):
with ops.name_scope(None, None, values):
pass
@test_util.run_deprecated_v1
def testEmptyScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
with ops.name_scope("", values=[a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope("", "my_default_scope", [a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
@test_util.run_deprecated_v1
def testDefaultScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
scope_name = "my_scope"
default_scope_name = "my_default_scope"
with ops.name_scope(scope_name, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope(None, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % default_scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with self.assertRaises(TypeError):
with ops.name_scope(scope_name, [a, b]):
pass
def _testGraphElements(self, graph_elements):
scope_name = "my_scope"
with ops.name_scope(scope_name, values=graph_elements) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(graph_elements[0].graph, ops.get_default_graph())
g1 = ops.Graph()
a = g1.create_op("A", [], [dtypes.float32])
with self.assertRaises(ValueError):
with ops.name_scope(scope_name, values=graph_elements + [a]):
pass
@test_util.run_deprecated_v1
def testTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, b])
@test_util.run_deprecated_v1
def testSparseTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
sparse = sparse_tensor.SparseTensor(
_apply_op(g0, "Int64Output", [], [dtypes.int64]),
_apply_op(g0, "FloatOutput", [], [dtypes.float32]),
_apply_op(g0, "Int64Output", [], [dtypes.int64]))
self._testGraphElements([a, sparse, b])
@test_util.run_deprecated_v1
def testVariable(self):
g0 = ops.Graph()
with g0.as_default():
variable = variables.Variable([1.0])
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, variable, b])
class InitScopeTest(test_util.TensorFlowTestCase):
def testClearsControlDependencies(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.as_default():
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with ops.init_scope():
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with ops.init_scope():
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testLiftsOpsFromFunctions(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with g2.as_default():
with ops.init_scope():
_ = constant_op.constant(1.0)
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 1)
def testPreservesDevices(self):
g0 = ops.Graph()
with g0.as_default(), ops.device("CPU:0"):
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
with g1.as_default():
with ops.device("GPU:0"):
with ops.init_scope():
# init_scope should preserve device set under `g1`.
on_gpu = constant_op.constant(1.0)
self.assertEqual(on_gpu.device, "/device:GPU:0")
still_on_gpu = constant_op.constant(1.0)
self.assertEqual(still_on_gpu.device, "/device:GPU:0")
blank = constant_op.constant(1.0)
self.assertEqual(blank.device, "")
with ops.init_scope():
now_on_cpu = constant_op.constant(1.0)
self.assertEqual(now_on_cpu.device, "/device:CPU:0")
on_cpu = constant_op.constant(1.0)
self.assertEqual(on_cpu.device, "/device:CPU:0")
def testComposes(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
g3 = ops.Graph()
g3._building_function = False # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 1)
with g2.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
with g3.as_default():
with ops.init_scope():
# This op should be lifted into g3, because g3 is not building a
# function.
_ = constant_op.constant(1.0)
self.assertIs(g3, ops.get_default_graph())
self.assertEqual(len(g3.get_operations()), 1)
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 2)
def testEscapesToEagerContext(self):
g = ops.Graph()
g._building_function = True # pylint: disable=protected-access
with context.eager_mode():
with context.graph_mode():
with g.as_default():
with ops.init_scope():
# Because g is building a function, init_scope should
# escape out to the eager context.
self.assertTrue(context.executing_eagerly())
# g should be reinstated as the default graph, and the
# graph context should be re-entered.
self.assertIs(g, ops.get_default_graph())
self.assertFalse(context.executing_eagerly())
def testStaysInEagerWhenOnlyEagerContextActive(self):
with context.eager_mode():
with ops.init_scope():
self.assertTrue(context.eager_mode())
self.assertTrue(context.eager_mode())
def testEscapesDefunWhenInEagerMode(self):
def function_with_variables():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(3)
return self.v.assign_add(1)
with context.eager_mode():
# Each invocation of function_with_variables recreates a variable.
self.assertEqual(4, int(function_with_variables()))
self.assertEqual(4, int(function_with_variables()))
compiled = eager_function.defun(function_with_variables)
# The init_scope in function_with_variables lifts the variable out
# of the graph function constructed by defun; hence,
# compiled now appears to be stateful.
self.assertEqual(4, int(compiled()))
self.assertEqual(5, int(compiled()))
def testEscapesDefunWhenInGraphMode(self):
def function_with_variables(name):
with ops.init_scope():
_ = variable_scope.get_variable(name, shape=(1,))
g = ops.Graph()
with g.as_default():
with self.cached_session():
# First ensure that graphs that are not building functions are
# not escaped.
function_with_variables("foo")
with self.assertRaisesRegexp(ValueError,
r"Variable foo already exists.*"):
# This will fail because reuse is not set to True.
function_with_variables("foo")
compiled = eager_function.defun(function_with_variables)
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
# The second call to `compiled` should not create variables: the
# init_scope has lifted the variable creation code out of the defun.
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
def testEscapesNestedDefun(self):
def inner_function():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(1)
return self.v.assign_add(2)
def outer_function(inner=None):
with ops.init_scope():
self.v0 = resource_variable_ops.ResourceVariable(0)
return self.v0.assign_add(1) + inner()
with context.eager_mode():
# Each invocation of outer_function recreates variables.
self.assertEqual(4, int(outer_function(inner=inner_function)))
self.assertEqual(4, int(outer_function(inner=inner_function)))
compiled_inner = eager_function.defun(inner_function)
compiled_outer = eager_function.defun(outer_function)
# The init_scope lifts variables out of the graph functions
# constructed by defun; hence, compiled_outer should now appear to be
# stateful.
self.assertEqual(4, int(compiled_outer(inner=compiled_inner)))
self.assertEqual(7, int(compiled_outer(inner=compiled_inner)))
@test_util.run_v1_only("b/120545219")
def testFallsBackToGlobalGraphWhenAllGraphsAreBuildingFunctions(self):
with context.graph_mode():
ops.reset_default_graph()
# This doesn't push anything onto the graph stack, but it does
# set the stack's global graph.
global_graph = ops.get_default_graph()
fn_graph = ops.Graph()
# pylint: disable=protected-access
fn_graph._building_function = True
self.assertEqual(len(ops._default_graph_stack.stack), 0)
with fn_graph.as_default():
self.assertEqual(len(ops._default_graph_stack.stack), 1)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 1)
dummy = constant_op.constant(1.0)
self.assertEqual(len(ops._default_graph_stack.stack), 1)
# Note that the global graph is _not_ on the graph stack.
self.assertEqual(len(ops._default_graph_stack.stack), 0)
# Ensure that `dummy` was added to the global graph.
self.assertEqual(global_graph, dummy.graph)
# pylint: enable=protected-access
def testInstallsDefaultGraphWhenGraphStackIsEmptyInGraphMode(self):
with context.graph_mode():
# pylint: disable=protected-access
self.assertEqual(len(ops._default_graph_stack.stack), 0)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 0)
self.assertEqual(len(ops._default_graph_stack.stack), 0)
# pylint: enable=protected-access
def testPreservesNameScopeInGraphConstruction(self):
with ops.Graph().as_default():
function_graph = ops.Graph()
with function_graph.as_default():
with ops.name_scope("inner"), ops.init_scope():
self.assertEqual(ops.get_name_scope(), "inner")
self.assertEqual(ops.get_name_scope(), "")
def testEnteringGraphFromEagerIsSticky(self):
with context.eager_mode():
g = ops.Graph()
with g.as_default():
with ops.init_scope():
self.assertFalse(context.executing_eagerly())
self.assertEqual(g, ops.get_default_graph())
def testMixGraphEager(self):
with context.eager_mode():
c = constant_op.constant(1.0)
with ops.Graph().as_default():
with self.assertRaisesRegexp(
RuntimeError, "Attempting to capture an EagerTensor"):
math_ops.add(c, c)
c2 = constant_op.constant(2.0)
with self.assertRaisesRegexp(
TypeError, "Graph tensors"):
math_ops.add(c2, c2)
def testPreservesNameScopeInEagerExecution(self):
with context.eager_mode():
def foo():
with ops.name_scope("inner"), ops.init_scope():
if context.executing_eagerly():
# A trailing slash is always appended when eager execution is
# enabled.
self.assertEqual(context.context().scope_name, "inner/")
else:
self.assertEqual(ops.get_name_scope(), "inner")
foo()
self.assertEqual(ops.get_name_scope(), "")
foo_compiled = eager_function.defun(foo)
foo_compiled()
self.assertEqual(ops.get_name_scope(), "")
def testExecutingEagerlyOutsideFunctions(self):
@def_function.function
def f():
return ops.executing_eagerly_outside_functions()
with context.graph_mode():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
# Need self.evaluate for these as the return type of functions is
# tensors.
self.assertFalse(self.evaluate(f()))
with context.eager_mode():
self.assertTrue(ops.executing_eagerly_outside_functions())
self.assertTrue(f())
with ops.Graph().as_default():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
self.assertFalse(self.evaluate(f()))
class GraphTest(test_util.TensorFlowTestCase):
def setUp(self):
ops.reset_default_graph()
def _AssertDefault(self, expected):
self.assertIs(expected, ops.get_default_graph())
def testResetDefaultGraphNesting(self):
g0 = ops.Graph()
with self.assertRaises(AssertionError):
with g0.as_default():
ops.reset_default_graph()
def testGraphContextManagerCancelsEager(self):
with context.eager_mode():
with ops.Graph().as_default():
self.assertFalse(context.executing_eagerly())
def testGraphContextManager(self):
g0 = ops.Graph()
with g0.as_default() as g1:
self.assertIs(g0, g1)
def testDefaultGraph(self):
orig = ops.get_default_graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
g0 = ops.Graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
context_manager_0 = g0.as_default()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
with context_manager_0 as g0:
self._AssertDefault(g0)
with ops.Graph().as_default() as g1:
self.assertTrue(ops.has_default_graph())
self._AssertDefault(g1)
self._AssertDefault(g0)
self._AssertDefault(orig)
self.assertFalse(ops.has_default_graph())
def testPreventFeeding(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_feedable(a))
g.prevent_feeding(a)
self.assertFalse(g.is_feedable(a))
@test_util.run_deprecated_v1
def testPreventFetching(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_fetchable(a))
g.prevent_fetching(a.op)
self.assertFalse(g.is_fetchable(a))
def testAsGraphElementConversions(self):
class ConvertibleObj(object):
def _as_graph_element(self):
return "FloatOutput:0"
class NonConvertibleObj(object):
pass
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(a, g.as_graph_element(ConvertibleObj()))
with self.assertRaises(TypeError):
g.as_graph_element(NonConvertibleObj())
# Regression test against creating custom __del__ functions in classes
# involved in cyclic references, e.g. Graph and Operation. (Python won't gc
# cycles that require calling a __del__ method, because the __del__ method can
# theoretically increase the object's refcount to "save" it from gc, and any
# already-deleted objects in the cycle would have be to restored.)
def testGarbageCollected(self):
# Create a graph we can delete and a weak reference to monitor if it's gc'd
g = ops.Graph()
g_ref = weakref.ref(g)
# Create some ops
with g.as_default():
a = constant_op.constant(2.0)
b = constant_op.constant(3.0)
c = math_ops.add(a, b)
# Create a session we can delete
with session.Session(graph=g) as sess:
self.evaluate(c)
# Delete all references and trigger gc
del g
del a
del b
del c
del sess
gc.collect()
self.assertIsNone(g_ref())
def testRunnableAfterInvalidShape(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
math_ops.add([1, 2], [1, 2, 3])
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
def testRunnableAfterInvalidShapeWithKernelLabelMap(self):
g = ops.Graph()
with g.as_default():
with g._kernel_label_map({"KernelLabelRequired": "overload_1"}):
with self.assertRaises(ValueError):
test_ops.kernel_label_required(1)
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
class AttrScopeTest(test_util.TensorFlowTestCase):
def _get_test_attrs(self):
x = control_flow_ops.no_op()
try:
a = compat.as_text(x.get_attr("_A"))
except ValueError:
a = None
try:
b = compat.as_text(x.get_attr("_B"))
except ValueError:
b = None
return (a, b)
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual((None, None), self._get_test_attrs())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
a1 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("foo"))
}):
a2 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": None,
"_B": attr_value_pb2.AttrValue(s=compat.as_bytes("bar"))
}):
a3 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("baz"))
}):
a4 = self._get_test_attrs()
a5 = self._get_test_attrs()
a6 = self._get_test_attrs()
a7 = self._get_test_attrs()
self.assertAllEqual((None, None), a1)
self.assertAllEqual(("foo", None), a2)
self.assertAllEqual((None, "bar"), a3)
self.assertAllEqual(("baz", "bar"), a4)
self.assertAllEqual((None, "bar"), a5)
self.assertAllEqual(("foo", None), a6)
self.assertAllEqual((None, None), a7)
ops.RegisterShape("KernelLabel")(common_shapes.scalar_shape)
class KernelLabelTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual(b"My label is: default",
test_ops.kernel_label().eval())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
default_1 = test_ops.kernel_label()
# pylint: disable=protected-access
with sess.graph._kernel_label_map({"KernelLabel": "overload_1"}):
overload_1_1 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": "overload_2"}):
overload_2 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": ""}):
default_2 = test_ops.kernel_label()
overload_1_2 = test_ops.kernel_label()
# pylint: enable=protected-access
default_3 = test_ops.kernel_label()
self.assertAllEqual(b"My label is: default", self.evaluate(default_1))
self.assertAllEqual(b"My label is: default", self.evaluate(default_2))
self.assertAllEqual(b"My label is: default", self.evaluate(default_3))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_1))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_2))
self.assertAllEqual(b"My label is: overload_2", self.evaluate(overload_2))
class AsGraphDefTest(test_util.TensorFlowTestCase):
def testGraphDefVersion(self):
"""Test that the graphdef version is plumbed through to kernels."""
with ops.Graph().as_default() as g:
version = g.graph_def_versions.producer
with self.session(graph=g):
v = test_ops.graph_def_version().eval()
self.assertEqual(version, v)
def testAddShapes(self):
with ops.Graph().as_default() as g:
t1, t2, t3, t4, t5 = _apply_op(g, "FiveFloatOutputs", [],
[dtypes.float32] * 5)
t1.set_shape(None)
t2.set_shape([])
t3.set_shape([None])
t4.set_shape([43, 37])
t5.set_shape([43, None])
b = constant_op.constant(1.0) # pylint: disable=unused-variable
gd = g.as_graph_def(add_shapes=True)
self.assertProtoEqualsVersion("""
node { name: "FiveFloatOutputs" op: "FiveFloatOutputs"
attr {
key: "_output_shapes"
value {
list {
shape { unknown_rank: true }
shape { }
shape { dim { size: -1 } }
shape { dim { size: 43 } dim { size: 37 } }
shape { dim { size: 43 } dim { size: -1 } }
}
}
}
}
node { name: "Const" op: "Const"
attr {
key: "_output_shapes"
value {
list {
shape { }
}
}
}
attr {
key: "dtype"
value { type: DT_FLOAT }
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape { }
float_val: 1.0 } } } }
""", gd)
@ops.RegisterStatistics("a", "flops")
def _calc_a_forward_flops(unused_graph, unused_node):
return ops.OpStats("flops", 20)
class StatisticsTest(test_util.TensorFlowTestCase):
def testRegisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("a", "an_a")
flops = ops.get_stats_for_node_def(graph, node, "flops")
self.assertEqual(20, flops.value)
missing_stat = ops.get_stats_for_node_def(graph, node, "missing_stat")
self.assertEqual(None, missing_stat.value)
def testUnregisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("b", "a_b")
weight_params = ops.get_stats_for_node_def(graph, node, "weight_params")
self.assertEqual(None, weight_params.value)
def testAccumulateStatistics(self):
flops_total = ops.OpStats("flops")
self.assertEqual(None, flops_total.value)
second_flops = ops.OpStats("flops", 3)
flops_total += second_flops
self.assertEqual(3, flops_total.value)
class DeviceStackTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasicDeviceAssignmentMetadata(self):
def device_func(unused_op):
return "/cpu:*"
const_zero = constant_op.constant([0.0], name="zero")
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.device("/cpu:0"):
const_two = constant_op.constant([2.0], name="two")
with ops.device(device_func):
const_three = constant_op.constant(3.0, name="three")
self.assertEqual(0, len(const_zero.op._device_assignments))
one_list = const_one.op._device_assignments
self.assertEqual(1, len(one_list))
self.assertEqual("/cpu", one_list[0].obj)
self.assertEqual("ops_test.py", os.path.basename(one_list[0].filename))
two_list = const_two.op._device_assignments
self.assertEqual(2, len(two_list))
devices = [t.obj for t in two_list]
self.assertEqual(set(["/cpu", "/cpu:0"]), set(devices))
three_list = const_three.op._device_assignments
self.assertEqual(1, len(three_list))
func_description = three_list[0].obj
expected_regex = r"device_func<.*ops_test.py, [0-9]+"
self.assertRegexpMatches(func_description, expected_regex)
@test_util.run_deprecated_v1
def testDeviceAssignmentMetadataForGraphDeviceAndTfDeviceFunctions(self):
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.get_default_graph().device("/cpu"):
const_two = constant_op.constant([2.0], name="two")
one_metadata = const_one.op._device_assignments[0]
two_metadata = const_two.op._device_assignments[0]
# Verify both types of device assignment return the right stack info.
self.assertRegexpMatches("ops_test.py",
os.path.basename(one_metadata.filename))
self.assertEqual(one_metadata.filename, two_metadata.filename)
self.assertEqual(one_metadata.lineno + 2, two_metadata.lineno)
class ColocationGroupTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], a.op.colocation_groups())
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
with self.assertRaises(ValueError):
c.op.get_attr("_class")
@test_util.run_deprecated_v1
def testBasicColocationMetadata(self):
const_two = constant_op.constant([2.0], name="two")
with ops.colocate_with(const_two.op):
const_three = constant_op.constant(3.0, name="three")
locations_dict = const_three.op._colocation_dict
self.assertIn("two", locations_dict)
metadata = locations_dict["two"]
self.assertIsNone(metadata.obj)
# Check that this test's filename is recorded as the file containing the
# colocation statement.
self.assertEqual("ops_test.py", os.path.basename(metadata.filename))
@test_util.run_deprecated_v1
def testColocationDeviceInteraction(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# 'b' is created in the scope of /cpu:0, but it is
# colocated with 'a', which is on '/device:GPU:0'. colocate_with
# overrides devices because it is a stronger constraint.
b = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual(a.op.device, b.op.device)
@test_util.run_deprecated_v1
def testColocationCanonicalization(self):
with ops.device("/device:GPU:0"):
_ = constant_op.constant(2.0)
with ops.device(lambda op: "/device:GPU:0"):
b = constant_op.constant(3.0)
with ops.get_default_graph().colocate_with(b):
with ops.device("/device:GPU:0"):
c = constant_op.constant(4.0)
# A's device will be /device:GPU:0
# B's device will be /device:GPU:0
# C's device will be /device:GPU:0 because it
# inherits B's device name, after canonicalizing the names.
self.assertEqual(b.op.device, c.op.device)
@test_util.run_deprecated_v1
def testLocationOverrides(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
# Note that this colocation is "redundant", since we are
# within the scope of "/device:GPU:0". However, we would like to
# preserve in the GraphDef that these two ops should be
# colocated in a portable way.
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
d = constant_op.constant(5.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual("/device:GPU:0", a.op.device)
self.assertEqual(a.op.device, b.op.device)
# Test that device function stack is restored.
self.assertEqual("/device:GPU:0", c.op.device)
self.assertEqual("/device:CPU:0", d.op.device)
@test_util.run_deprecated_v1
def testNestedColocateWith(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testMultiColocationGroups(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@a", b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocationIgnoreStack(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op, ignore_existing=True):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocateWithReset(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(None, ignore_existing=True):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@c"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateWithInitialNoneThenNested(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
with ops.colocate_with(None, ignore_existing=True):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(b.op):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@b"], b.op.colocation_groups())
self.assertEqual([b"loc:@b"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateVariables(self):
a = variables.Variable([2.0], name="a")
with ops.colocate_with(a.op):
b = variables.Variable([3.0], name="b")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
class DeprecatedTest(test_util.TensorFlowTestCase):
def testSuccess(self):
with ops.Graph().as_default() as g:
test_util.set_producer_version(g, 7)
old = test_ops.old()
with self.session(graph=g):
old.run()
def _error(self):
return ((r"Op Old is not available in GraphDef version %d\. "
r"It has been removed in version 8\. For reasons\.") %
versions.GRAPH_DEF_VERSION)
def testGraphConstructionFail(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(NotImplementedError, self._error()):
test_ops.old()
class DenseTensorLikeTypeTest(test_util.TensorFlowTestCase):
def testSuccess(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertTrue(ops.is_dense_tensor_like(t))
v = variables.Variable([17])
self.assertTrue(ops.is_dense_tensor_like(v))
class BadClassNoName(object):
pass
class BadClassBadName(object):
def name(self):
pass
class BadClassNoDtype(object):
@property
def name(self):
pass
class BadClassBadDtype(object):
@property
def name(self):
pass
def dtype(self):
pass
def testBadClass(self):
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoName)
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadName)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoDtype)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadDtype)
class NameScopeTest(test_util.TensorFlowTestCase):
def testStripAndPrependScope(self):
strs = [
"hidden1/hidden1/weights", # Same prefix. Should strip.
"hidden1///hidden1/weights", # Extra "/". Should strip.
"^hidden1/hidden1/weights", # Same prefix. Should strip.
"loc:@hidden1/hidden1/weights", # Same prefix. Should strip.
"hhidden1/hidden1/weights", # Different prefix. Should keep.
"hidden1"
] # Not a prefix. Should keep.
expected_striped = [
"hidden1/weights", "hidden1/weights", "^hidden1/weights",
"loc:@hidden1/weights", "hhidden1/hidden1/weights", "hidden1"
]
expected_prepended = [
"hidden2/hidden1/weights", "hidden2/hidden1/weights",
"^hidden2/hidden1/weights", "loc:@hidden2/hidden1/weights",
"hidden2/hhidden1/hidden1/weights", "hidden2/hidden1"
]
name_scope_to_strip = "hidden1"
name_scope_to_add = "hidden2"
for es, ep, s in zip(expected_striped, expected_prepended, strs):
striped = ops.strip_name_scope(s, name_scope_to_strip)
self.assertEqual(es, striped)
self.assertEqual(ep, ops.prepend_name_scope(striped, name_scope_to_add))
def testGetNameScope(self):
with ops.Graph().as_default() as g:
with ops.name_scope("scope1"):
with ops.name_scope("scope2"):
with ops.name_scope("scope3"):
self.assertEqual("scope1/scope2/scope3", g.get_name_scope())
self.assertEqual("scope1/scope2", g.get_name_scope())
self.assertEqual("scope1", g.get_name_scope())
self.assertEqual("", g.get_name_scope())
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with ops.name_scope("_"):
pass
self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
class TracebackTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testTracebackWithStartLines(self):
with self.cached_session() as sess:
a = constant_op.constant(2.0)
sess.run(
a,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(sess.graph.get_operations())
# Tests that traceback_with_start_lines is the same as traceback
# but includes one more element at the end.
for op in sess.graph.get_operations():
self.assertEquals(len(op.traceback), len(op.traceback_with_start_lines))
for frame, frame_with_start_line in zip(
op.traceback, op.traceback_with_start_lines):
self.assertEquals(5, len(frame_with_start_line))
self.assertEquals(frame, frame_with_start_line[:-1])
class EnableEagerExecutionTest(test_util.TensorFlowTestCase):
@test_util.run_v1_only("b/120545219")
def testBadArgumentsToEnableEagerExecution(self):
with self.assertRaisesRegexp(TypeError, "config must be a tf.ConfigProto"):
ops.enable_eager_execution(context.DEVICE_PLACEMENT_SILENT)
with self.assertRaisesRegexp(ValueError, "device_policy must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, c)
with self.assertRaisesRegexp(ValueError, "execution_mode must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, execution_mode=c)
class _TupleTensor(composite_tensor.CompositeTensor):
"""`Tensor`-like `tuple`-like for custom `Tensor` conversion masquerading."""
def __init__(self, components):
super(_TupleTensor, self).__init__()
self._components = tuple(ops.convert_to_tensor(c) for c in components)
@property
def _type_spec(self):
return _TupleTensorSpec(type_spec.from_value(c) for c in self._components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
class _TupleTensorSpec(type_spec.TypeSpec):
def __init__(self, specs):
self._specs = specs
value_type = property(lambda self: _TupleTensor)
_component_specs = property(lambda self: self._specs)
def _to_components(self, value):
return value._components
def _from_components(self, components):
return _TupleTensor(*components)
def _serialize(self):
return (self._specs,)
class _MyTuple(object):
"""Pretend user-side class for `ConvertToCompositeTensorTest ."""
def __init__(self, components):
super(_MyTuple, self).__init__()
self._components = tuple(components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
ops.register_tensor_conversion_function(
_MyTuple, conversion_func=lambda x, *_, **__: _TupleTensor(x))
class CustomConvertToCompositeTensorTest(test_util.TensorFlowTestCase):
def testCompositeTensorConversion(self):
"""Tests that a user can register a CompositeTensor converter."""
x = _MyTuple((1, [2., 3.], [[4, 5], [6, 7]]))
y = ops.convert_to_tensor_or_composite(x)
self.assertFalse(tensor_util.is_tensor(y))
self.assertIsInstance(y, _TupleTensor)
self.assertLen(y, len(x))
for x_, y_ in zip(x, y):
self.assertIsInstance(y_, ops.Tensor)
self.assertTrue(tensor_util.is_tensor(y_))
self.assertAllEqual(x_, tensor_util.constant_value(y_))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for common shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class CommonShapesTest(test_util.TensorFlowTestCase):
# Asserts that we get the same result with numpy (for known shapes), and that
# the order of arguments does not matter (i.e., broadcasting is reflexive).
def _assert_incompatible_broadcast(self, shape1, shape2):
if shape1.dims is not None and shape2.dims is not None:
zeros1 = np.zeros(shape1.as_list())
zeros2 = np.zeros(shape2.as_list())
with self.assertRaises(ValueError):
np.broadcast(zeros1, zeros2)
with self.assertRaises(ValueError):
np.broadcast(zeros2, zeros1)
self.assertFalse(common_shapes.is_broadcast_compatible(shape1, shape2))
self.assertFalse(common_shapes.is_broadcast_compatible(shape2, shape1))
with self.assertRaises(ValueError):
common_shapes.broadcast_shape(shape1, shape2)
with self.assertRaises(ValueError):
common_shapes.broadcast_shape(shape2, shape1)
# Asserts that we get the same result with numpy (for known shapes), and that
# the order of arguments does not matter (i.e., broadcasting is reflexive).
def _assert_broadcast(self, expected, shape1, shape2):
if shape1.dims is not None and shape2.dims is not None:
expected_np = expected.as_list()
zeros1 = np.zeros(shape1.as_list())
zeros2 = np.zeros(shape2.as_list())
self.assertAllEqual(expected_np, np.broadcast(zeros1, zeros2).shape)
self.assertAllEqual(expected_np, np.broadcast(zeros2, zeros1).shape)
self.assertEqual(
expected, common_shapes.broadcast_shape(shape1, shape2))
self.assertEqual(
expected, common_shapes.broadcast_shape(shape2, shape1))
else:
self.assertEqual(expected, common_shapes.broadcast_shape(shape1, shape2))
self.assertEqual(expected, common_shapes.broadcast_shape(shape2, shape1))
def testBroadcast_one_dimension(self):
s1 = tensor_shape.vector(5)
s2 = tensor_shape.vector(7)
unknown = tensor_shape.unknown_shape()
scalar = tensor_shape.scalar()
expanded_scalar = tensor_shape.TensorShape([1])
# Tensors with same shape should have the same broadcast result.
for shape in (s1, s2, unknown, scalar, expanded_scalar):
self._assert_broadcast(expected=shape, shape1=shape, shape2=shape)
# [] and [1] act like identity.
self._assert_broadcast(expected=s1, shape1=s1, shape2=scalar)
self._assert_broadcast(expected=s2, shape1=s2, shape2=scalar)
self._assert_broadcast(expected=s1, shape1=s1, shape2=expanded_scalar)
self._assert_broadcast(expected=s2, shape1=s2, shape2=expanded_scalar)
self._assert_broadcast(expected=unknown, shape1=s1, shape2=unknown)
self._assert_broadcast(expected=unknown, shape1=s2, shape2=unknown)
self._assert_broadcast(
expected=expanded_scalar, shape1=scalar, shape2=expanded_scalar)
self._assert_incompatible_broadcast(shape1=s1, shape2=s2)
def testBroadcast_many_dimensions(self):
unknown = tensor_shape.unknown_shape()
shape_0 = tensor_shape.scalar()
shape_1 = tensor_shape.vector(1)
shape_4 = tensor_shape.vector(4)
shape_1x4 = tensor_shape.matrix(1, 4)
shape_4x1 = tensor_shape.matrix(4, 1)
shape_3x4 = tensor_shape.matrix(3, 4)
shape_4x3 = tensor_shape.matrix(4, 3)
# Tensors with same shape should have the same broadcast result.
for shape in (
shape_0, shape_1, shape_4, shape_1x4, shape_4x1, shape_3x4, shape_4x3):
self._assert_broadcast(expected=shape, shape1=shape, shape2=shape)
# [] and [1] act like identity.
for identity in (shape_0, shape_1):
for shape in (shape_4, shape_1x4, shape_4x1, shape_3x4, shape_4x3):
self._assert_broadcast(expected=shape, shape1=identity, shape2=shape)
# Unknown in, unknown out.
for shape in (shape_4, shape_1x4, shape_4x1, shape_3x4, shape_4x3):
self._assert_broadcast(expected=unknown, shape1=shape, shape2=unknown)
self._assert_broadcast(expected=shape_1x4, shape1=shape_4, shape2=shape_1x4)
shape_4x4 = tensor_shape.matrix(4, 4)
self._assert_broadcast(expected=shape_4x4, shape1=shape_4, shape2=shape_4x1)
self._assert_broadcast(expected=shape_3x4, shape1=shape_4, shape2=shape_3x4)
self._assert_incompatible_broadcast(shape1=shape_4, shape2=shape_4x3)
self._assert_broadcast(
expected=shape_4x4, shape1=shape_1x4, shape2=shape_4x1)
self._assert_broadcast(
expected=shape_3x4, shape1=shape_1x4, shape2=shape_3x4)
self._assert_incompatible_broadcast(shape1=shape_1x4, shape2=shape_4x3)
self._assert_incompatible_broadcast(shape1=shape_4x1, shape2=shape_3x4)
self._assert_broadcast(
expected=shape_4x3, shape1=shape_4x1, shape2=shape_4x3)
self._assert_incompatible_broadcast(shape1=shape_3x4, shape2=shape_4x3)
# Asserts that the order of arguments does not matter (i.e., broadcasting is
# reflexive).
def _assert_broadcast_with_unknown_dims(self, expected, shape1, shape2):
actual_dims = common_shapes.broadcast_shape(shape1, shape2).dims
reflexive_actual_dims = common_shapes.broadcast_shape(shape2, shape1).dims
if actual_dims is None:
self.assertIsNone(reflexive_actual_dims)
elif reflexive_actual_dims is None:
self.assertIsNone(actual_dims)
else:
self.assertEqual(len(actual_dims), len(reflexive_actual_dims))
for actual_dim, reflexive_actual_dim in zip(
actual_dims, reflexive_actual_dims):
self.assertEqual(actual_dim.value, reflexive_actual_dim.value)
expected_dims = expected.dims
if expected_dims is None:
self.assertIsNone(actual_dims)
elif actual_dims is None:
self.assertIsNone(expected_dims)
else:
self.assertEqual(len(expected_dims), len(actual_dims))
for expected_dim, actual_dim in zip(expected_dims, actual_dims):
self.assertEqual(expected_dim.value, actual_dim.value)
def testBroadcast_unknown_dims(self):
unknown = tensor_shape.unknown_shape()
shape_0 = tensor_shape.scalar()
shape_1 = tensor_shape.vector(1)
# pylint: disable=invalid-name
shape_U = tensor_shape.vector(None)
shape_1xU = tensor_shape.matrix(1, None)
shape_Ux1 = tensor_shape.matrix(None, 1)
shape_4xU = tensor_shape.matrix(4, None)
shape_Ux4 = tensor_shape.matrix(None, 4)
# pylint: enable=invalid-name
# Tensors with same shape should have the same broadcast result.
for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
self._assert_broadcast_with_unknown_dims(
expected=shape, shape1=shape, shape2=shape)
# [] and [1] act like identity.
for identity in (shape_0, shape_1):
for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
self._assert_broadcast_with_unknown_dims(
expected=shape, shape1=identity, shape2=shape)
# Unknown in, unknown out.
for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
self._assert_broadcast_with_unknown_dims(
expected=unknown, shape1=shape, shape2=unknown)
self._assert_broadcast_with_unknown_dims(
expected=shape_1xU, shape1=shape_U, shape2=shape_1xU)
shape_UxU = tensor_shape.matrix(None, None) # pylint: disable=invalid-name
self._assert_broadcast_with_unknown_dims(
expected=shape_UxU, shape1=shape_U, shape2=shape_Ux1)
self._assert_broadcast_with_unknown_dims(
expected=shape_4xU, shape1=shape_U, shape2=shape_4xU)
self._assert_broadcast_with_unknown_dims(
expected=shape_Ux4, shape1=shape_U, shape2=shape_Ux4)
self._assert_broadcast_with_unknown_dims(
expected=shape_UxU, shape1=shape_1xU, shape2=shape_Ux1)
self._assert_broadcast_with_unknown_dims(
expected=shape_4xU, shape1=shape_1xU, shape2=shape_4xU)
self._assert_broadcast_with_unknown_dims(
expected=shape_Ux4, shape1=shape_1xU, shape2=shape_Ux4)
self._assert_broadcast_with_unknown_dims(
expected=shape_4xU, shape1=shape_Ux1, shape2=shape_4xU)
self._assert_broadcast_with_unknown_dims(
expected=shape_Ux4, shape1=shape_Ux1, shape2=shape_Ux4)
shape_4x4 = tensor_shape.matrix(4, 4)
self._assert_broadcast_with_unknown_dims(
expected=shape_4x4, shape1=shape_4xU, shape2=shape_Ux4)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/common_shapes_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function for interpolating formatted errors from the TensorFlow runtime.
Exposes the function `interpolate` to interpolate messages with tags of the form
{{type name}}.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import os
import re
import six
from tensorflow.python.util import tf_stack
_NAME_REGEX = r"[A-Za-z0-9_.][A-Za-z0-9_.\-/]*?"
_TAG_REGEX = r"{{{{({name}) ({name})}}}}".format(name=_NAME_REGEX)
_INTERPOLATION_REGEX = r"^(.*?)({tag})".format(tag=_TAG_REGEX)
_INTERPOLATION_PATTERN = re.compile(_INTERPOLATION_REGEX, re.DOTALL)
_ParseTag = collections.namedtuple("_ParseTag", ["type", "name"])
_BAD_FILE_SUBSTRINGS = [
os.path.join("tensorflow", "python"),
os.path.join("tensorflow", "contrib"),
os.path.join("tensorflow_estimator", "python"),
os.path.join("tensorflow_estimator", "contrib"),
"<embedded",
]
def parse_message(message):
"""Parses the message.
Splits the message into separators and tags. Tags are named tuples
representing the string {{type name}} and they are separated by
separators. For example, in "123{{node Foo}}456{{node Bar}}789", there are
two tags and three separators. The separators are the numeric characters.
Args:
message: String to parse
Returns:
(list of separator strings, list of _ParseTags).
For example, if message is "123{{node Foo}}456" then this function
returns (["123", "456"], [_ParseTag("node", "Foo")])
"""
seps = []
tags = []
pos = 0
while pos < len(message):
match = re.match(_INTERPOLATION_PATTERN, message[pos:])
if match:
seps.append(match.group(1))
tags.append(_ParseTag(match.group(3), match.group(4)))
pos += match.end()
else:
break
seps.append(message[pos:])
return seps, tags
def _compute_device_summary_from_list(name, device_assignment_list, prefix=""):
"""Return a summary of an op's device function stack.
Args:
name: The name of the op.
device_assignment_list: The op._device_assignments list.
prefix: An optional string prefix used before each line of the multi-
line string returned by this function.
Returns:
A multi-line string similar to:
Device assignments active during op 'foo' creation:
with tf.device(/cpu:0): <test_1.py:27>
with tf.device(some_func<foo.py, 123>): <test_2.py:38>
The first line will have no padding to its left by default. Subsequent
lines will have two spaces of left-padding. Use the prefix argument
to increase indentation.
"""
if not device_assignment_list:
message = "No device assignments were active during op '%s' creation."
message %= name
return prefix + message
str_list = []
str_list.append(
"%sDevice assignments active during op '%s' creation:" % (prefix, name))
for traceable_obj in device_assignment_list:
location_summary = "<{file}:{line}>".format(
file=traceable_obj.filename, line=traceable_obj.lineno)
subs = {
"prefix": prefix,
"indent": " ",
"dev_name": traceable_obj.obj,
"loc": location_summary,
}
str_list.append(
"{prefix}{indent}with tf.device({dev_name}): {loc}".format(**subs))
return "\n".join(str_list)
def _compute_device_assignment_summary_from_op(op, prefix=""):
# pylint: disable=protected-access
return _compute_device_summary_from_list(op.name, op._device_assignments,
prefix)
# pylint: enable=protected-access
def _compute_colocation_summary_from_dict(name, colocation_dict, prefix=""):
"""Return a summary of an op's colocation stack.
Args:
name: The op name.
colocation_dict: The op._colocation_dict.
prefix: An optional string prefix used before each line of the multi-
line string returned by this function.
Returns:
A multi-line string similar to:
Node-device colocations active during op creation:
with tf.compat.v1.colocate_with(test_node_1): <test_1.py:27>
with tf.compat.v1.colocate_with(test_node_2): <test_2.py:38>
The first line will have no padding to its left by default. Subsequent
lines will have two spaces of left-padding. Use the prefix argument
to increase indentation.
"""
if not colocation_dict:
message = "No node-device colocations were active during op '%s' creation."
message %= name
return prefix + message
str_list = []
str_list.append("%sNode-device colocations active during op '%s' creation:" %
(prefix, name))
for coloc_name, location in colocation_dict.items():
location_summary = "<{file}:{line}>".format(
file=location.filename, line=location.lineno)
subs = {
"prefix": prefix,
"indent": " ",
"name": coloc_name,
"loc": location_summary,
}
str_list.append(
"{prefix}{indent}with tf.colocate_with({name}): {loc}".format(**subs))
return "\n".join(str_list)
def _compute_colocation_summary_from_op(op, prefix=""):
"""Fetch colocation file, line, and nesting and return a summary string."""
# pylint: disable=protected-access
return _compute_colocation_summary_from_dict(op.name, op._colocation_dict,
prefix)
# pylint: enable=protected-access
def _find_index_of_defining_frame_for_op(op):
"""Return index in op.traceback with first 'useful' frame.
This method reads through the stack stored in op.traceback looking for the
innermost frame which (hopefully) belongs to the caller. It accomplishes this
by rejecting frames whose filename appears to come from TensorFlow (see
error_interpolation._BAD_FILE_SUBSTRINGS for the list of rejected substrings).
Args:
op: the Operation object for which we would like to find the defining
location.
Returns:
Integer index into op.traceback where the first non-TF file was found
(innermost to outermost), or 0 (for the outermost stack frame) if all files
came from TensorFlow.
"""
# Index 0 of tf_traceback is the outermost frame.
tf_traceback = op.traceback
size = len(tf_traceback)
filenames = [frame[tf_stack.TB_FILENAME] for frame in tf_traceback]
# We process the filenames from the innermost frame to outermost.
for idx, filename in enumerate(reversed(filenames)):
contains_bad_substrings = [ss in filename for ss in _BAD_FILE_SUBSTRINGS]
if not any(contains_bad_substrings):
return size - idx - 1
return 0
def _get_defining_frame_from_op(op):
"""Find and return stack frame where op was defined."""
frame_index = _find_index_of_defining_frame_for_op(op)
return op.traceback[frame_index]
def compute_useful_frames(op, num):
"""Return a list of frames, which form a 'useful' stack.
Starting from the defining frame to the outermost one, this method computes
the contiguous portion of the 'useful' stack trace and returns the selected
frames.
Args:
op: op.Operation object having a _traceback member.
num: total number of frames to return.
Returns:
A list of frames.
"""
defining_frame_index = _find_index_of_defining_frame_for_op(op)
# The stack trace is collected from two lines before the defining frame in the
# model file to the outermost with `num` frames at most. These two extra lines
# are included from the TensorFlow library to give the context which node is
# defined.
innermost_excluded = min(defining_frame_index + 2 + 1, len(op.traceback))
outermost_included = max(innermost_excluded - num, 0)
return op.traceback[outermost_included:innermost_excluded]
def compute_field_dict(op, strip_file_prefix=""):
"""Return a dictionary mapping interpolation tokens to values.
Args:
op: op.Operation object having a _traceback member.
strip_file_prefix: The common path in the stacktrace. We remove the prefix
from the file names.
Returns:
A dictionary mapping string tokens to string values. The keys are shown
below along with example values.
{
"file": "tool_utils.py",
"line": "124",
"defined_at": " (defined at tool_utils.py:124)",
"colocations":
'''Node-device colocations active during op creation:
with tf.compat.v1.colocate_with(test_node_1): <test_1.py:27>
with tf.compat.v1.colocate_with(test_node_2): <test_2.py:38>'''
"devices":
'''Device assignments active during op 'foo' creation:
with tf.device(/cpu:0): <test_1.py:27>
with tf.device(some_func<foo.py, 123>): <test_2.py:38>'''
"devs_and_colocs": A concatenation of colocations and devices, e.g.
'''Node-device colocations active during op creation:
with tf.compat.v1.colocate_with(test_node_1): <test_1.py:27>
with tf.compat.v1.colocate_with(test_node_2): <test_2.py:38>'''
Device assignments active during op 'foo' creation:
with tf.device(/cpu:0): <test_1.py:27>
with tf.device(some_func<foo.py, 123>): <test_2.py:38>'''
}
"""
frame = _get_defining_frame_from_op(op)
filename = frame[tf_stack.TB_FILENAME]
if filename.startswith(strip_file_prefix):
filename = filename[len(strip_file_prefix):]
lineno = frame[tf_stack.TB_LINENO]
defined_at = " (defined at %s:%d)" % (filename, lineno)
colocation_summary = _compute_colocation_summary_from_op(op)
device_summary = _compute_device_assignment_summary_from_op(op)
combined_summary = "\n".join([colocation_summary, device_summary])
field_dict = {
"file": filename,
"line": lineno,
"defined_at": defined_at,
"colocations": colocation_summary,
"devices": device_summary,
"devs_and_colocs": combined_summary,
}
return field_dict
def traceback_files_common_prefix(all_ops):
"""Determines the common prefix from the paths of the stacktrace of 'all_ops'.
For example, if the paths are '/foo/bar/baz/' and '/foo/car', this would
return '/foo'.
Args:
all_ops: All the input nodes in the form of a list of lists of ops.
Returns:
The common prefix.
"""
files = set()
for ops in all_ops:
if ops is None:
continue
for op in ops:
for frame in op.traceback:
filename = frame[tf_stack.TB_FILENAME]
if "<embedded" not in filename:
files.add(filename)
return os.path.split(os.path.commonprefix(list(files)))[0]
def _sources_for_node(node, graph):
"""Gets the input op nodes for 'node'.
Args:
node: The node.
graph: The graph containing the node.
Returns:
The unique input nodes.
"""
inputs = set()
for name in node.node_def.input:
if name.startswith("^"):
name = name[1:]
try:
tensor = graph.get_tensor_by_name(name)
op = tensor.op
except (KeyError, ValueError):
try:
op = graph.get_operation_by_name(name)
except KeyError:
continue
inputs.add(op)
return list(inputs)
def _build_error_message(op, input_ops, common_prefix):
"""Returns the formatted error message for the given op.
Args:
op: The node.
input_ops: The input nodes to the 'op' node
common_prefix: The prefix path common to the stacktrace of inputs.
Returns:
The formatted error message for the given op. The error message also
includes the information about the input sources for the given op.
"""
field_dict = compute_field_dict(op, common_prefix)
msg = "node %s%s " % (op.name, field_dict["defined_at"])
input_debug_info = []
# This stores the line numbers that we have already printed.
done = set()
done.add(field_dict["defined_at"])
for op_inp in input_ops:
field_dict_inp = compute_field_dict(op_inp, common_prefix)
if field_dict_inp["defined_at"] not in done:
input_debug_info.append(
" %s%s" % (op_inp.name, field_dict_inp["defined_at"]))
done.add(field_dict_inp["defined_at"])
if input_debug_info:
end_msg = ("\nInput Source operations connected to node %s:\n") % (op.name)
end_msg += "\t\n".join(input_debug_info)
else:
end_msg = ""
return msg, end_msg
def interpolate(error_message, graph):
"""Interpolates an error message.
The error message can contain tags of the form `{{type name}}` which will be
replaced. For example: "{{node <name>}}" would get expanded to:
"node <name>(defined at <path>)".
Args:
error_message: A string to interpolate.
graph: ops.Graph object containing all nodes referenced in the error
message.
Returns:
The string with tags of the form {{type name}} interpolated.
"""
seps, tags = parse_message(error_message)
subs = []
end_msg = collections.defaultdict(list)
tagged_ops = []
for t in tags:
try:
op = graph.get_operation_by_name(t.name)
except KeyError:
op = None
if op is None:
tagged_ops.append(None)
else:
tagged_ops.append([op] + _sources_for_node(op, graph))
common_prefix = traceback_files_common_prefix(tagged_ops)
for tag, ops in zip(tags, tagged_ops):
msg = "{{%s %s}}" % (tag.type, tag.name)
if ops is not None:
if tag.type == "node":
msg, source_msg = _build_error_message(ops[0], ops[1:], common_prefix)
if source_msg:
end_msg["source_nodes"].append(source_msg)
elif tag.type == "colocation_node":
field_dict = compute_field_dict(ops[0], common_prefix)
msg = "node %s%s placed on device %s " % (
ops[0].name, field_dict["defined_at"], field_dict["devices"])
end_msg["colocations"].append(field_dict["devs_and_colocs"])
if tag.type == "function_node":
msg = ""
subs.append(msg)
if "source_nodes" in end_msg:
subs.append("\n\nErrors may have originated from an input operation.")
subs.append("\n".join(end_msg["source_nodes"]))
end_msg.pop("source_nodes", None)
for k, messages in end_msg.items():
subs.append("Additional information about %s:" % k)
subs.append("\n".join(messages))
return "".join(
itertools.chain(*six.moves.zip_longest(seps, subs, fillvalue="")))
|
tensorflow-master
|
tensorflow/python/framework/error_interpolation.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util.tf_export import tf_export
__version__ = pywrap_tensorflow.__version__
__git_version__ = pywrap_tensorflow.__git_version__
__compiler_version__ = pywrap_tensorflow.__compiler_version__
__cxx11_abi_flag__ = pywrap_tensorflow.__cxx11_abi_flag__
__monolithic_build__ = pywrap_tensorflow.__monolithic_build__
VERSION = __version__
tf_export(
"version.VERSION",
"__version__",
v1=["version.VERSION", "VERSION", "__version__"]).export_constant(
__name__, "VERSION")
GIT_VERSION = __git_version__
tf_export(
"version.GIT_VERSION",
"__git_version__",
v1=["version.GIT_VERSION", "GIT_VERSION",
"__git_version__"]).export_constant(__name__, "GIT_VERSION")
COMPILER_VERSION = __compiler_version__
tf_export(
"version.COMPILER_VERSION",
"__compiler_version__",
v1=["version.COMPILER_VERSION", "COMPILER_VERSION",
"__compiler_version__"]).export_constant(__name__, "COMPILER_VERSION")
CXX11_ABI_FLAG = __cxx11_abi_flag__
tf_export(
"sysconfig.CXX11_ABI_FLAG",
"__cxx11_abi_flag__",
v1=["sysconfig.CXX11_ABI_FLAG", "CXX11_ABI_FLAG",
"__cxx11_abi_flag__"]).export_constant(__name__, "CXX11_ABI_FLAG")
MONOLITHIC_BUILD = __monolithic_build__
tf_export(
"sysconfig.MONOLITHIC_BUILD",
"__monolithic_build__",
v1=[
"sysconfig.MONOLITHIC_BUILD", "MONOLITHIC_BUILD", "__monolithic_build__"
]).export_constant(__name__, "MONOLITHIC_BUILD")
GRAPH_DEF_VERSION = pywrap_tensorflow.GRAPH_DEF_VERSION
tf_export(
"version.GRAPH_DEF_VERSION",
v1=["version.GRAPH_DEF_VERSION", "GRAPH_DEF_VERSION"]).export_constant(
__name__, "GRAPH_DEF_VERSION")
GRAPH_DEF_VERSION_MIN_CONSUMER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_CONSUMER)
tf_export(
"version.GRAPH_DEF_VERSION_MIN_CONSUMER",
v1=[
"version.GRAPH_DEF_VERSION_MIN_CONSUMER",
"GRAPH_DEF_VERSION_MIN_CONSUMER"
]).export_constant(__name__, "GRAPH_DEF_VERSION_MIN_CONSUMER")
GRAPH_DEF_VERSION_MIN_PRODUCER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_PRODUCER)
tf_export(
"version.GRAPH_DEF_VERSION_MIN_PRODUCER",
v1=[
"version.GRAPH_DEF_VERSION_MIN_PRODUCER",
"GRAPH_DEF_VERSION_MIN_PRODUCER"
]).export_constant(__name__, "GRAPH_DEF_VERSION_MIN_PRODUCER")
__all__ = [
"__version__",
"__git_version__",
"__compiler_version__",
"__cxx11_abi_flag__",
"__monolithic_build__",
"COMPILER_VERSION",
"CXX11_ABI_FLAG",
"GIT_VERSION",
"GRAPH_DEF_VERSION",
"GRAPH_DEF_VERSION_MIN_CONSUMER",
"GRAPH_DEF_VERSION_MIN_PRODUCER",
"VERSION",
"MONOLITHIC_BUILD",
]
|
tensorflow-master
|
tensorflow/python/framework/versions.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception types for TensorFlow errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.framework import errors_impl as _impl
# pylint: enable=unused-import
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.framework.errors_impl import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/python/framework/errors.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.composite_tensor_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import composite_tensor_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import googletest
class CompositeTensorTest(test_util.TensorFlowTestCase):
def test_is_composite(self):
# Validate that all composite tensor and value types return true.
self.assertTrue(
composite_tensor_utils.is_composite_or_composite_value(
sparse_tensor.SparseTensor([[0, 0]], [1], [1, 1])))
self.assertTrue(
composite_tensor_utils.is_composite_or_composite_value(
sparse_tensor.SparseTensorValue([[0, 0]], [1], [1, 1])))
self.assertTrue(
composite_tensor_utils.is_composite_or_composite_value(
ragged_tensor.RaggedTensor.from_row_splits(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))))
self.assertTrue(
composite_tensor_utils.is_composite_or_composite_value(
ragged_tensor_value.RaggedTensorValue(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))))
# Test that numpy arrays and tensors return false.
self.assertFalse(
composite_tensor_utils.is_composite_or_composite_value(
np.ndarray([0, 1])))
self.assertFalse(
composite_tensor_utils.is_composite_or_composite_value(
ops.convert_to_tensor([3, 1])))
def test_sparse_concatenation(self):
tensor_1 = sparse_tensor.SparseTensor([[0, 0]], [1], [1, 1])
tensor_2 = sparse_tensor.SparseTensor([[0, 0]], [2], [1, 1])
concatenated_tensor = composite_tensor_utils.append_composite_tensor(
tensor_1, tensor_2)
evaluated_tensor = self.evaluate(concatenated_tensor)
self.assertAllEqual(evaluated_tensor.indices, [[0, 0], [1, 0]])
self.assertAllEqual(evaluated_tensor.values, [1, 2])
self.assertAllEqual(evaluated_tensor.dense_shape, [2, 1])
def test_sparse_value_concatenation(self):
tensor_1 = sparse_tensor.SparseTensorValue([[0, 0]], [1], [1, 1])
tensor_2 = sparse_tensor.SparseTensorValue([[0, 0]], [2], [1, 1])
concatenated_tensor = composite_tensor_utils.append_composite_tensor(
tensor_1, tensor_2)
self.assertAllEqual(concatenated_tensor.indices, [[0, 0], [1, 0]])
self.assertAllEqual(concatenated_tensor.values, [1, 2])
self.assertAllEqual(concatenated_tensor.dense_shape, [2, 1])
def test_ragged_concatenation(self):
tensor_1 = ragged_tensor.RaggedTensor.from_row_splits(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))
tensor_2 = ragged_tensor.RaggedTensor.from_row_splits(
np.array([3, 4, 5]), np.array([0, 2, 3], dtype=np.int64))
concatenated_tensor = composite_tensor_utils.append_composite_tensor(
tensor_1, tensor_2)
evaluated_tensor = self.evaluate(concatenated_tensor)
self.assertAllEqual(evaluated_tensor.values, [0, 1, 2, 3, 4, 5])
self.assertAllEqual(evaluated_tensor.row_splits, [0, 1, 3, 5, 6])
def test_ragged_value_concatenation(self):
tensor_1 = ragged_tensor_value.RaggedTensorValue(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))
tensor_2 = ragged_tensor_value.RaggedTensorValue(
np.array([3, 4, 5]), np.array([0, 2, 3], dtype=np.int64))
concatenated_tensor = composite_tensor_utils.append_composite_tensor(
tensor_1, tensor_2)
self.assertAllEqual(concatenated_tensor.values, [0, 1, 2, 3, 4, 5])
self.assertAllEqual(concatenated_tensor.row_splits, [0, 1, 3, 5, 6])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/composite_tensor_utils_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import random
import threading
import weakref
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
@test_util.run_deprecated_v1
def test_session_functions(self):
with self.test_session() as sess:
sess_ref = weakref.ref(sess)
with self.cached_session(graph=None, config=None) as sess2:
# We make sure that sess2 is sess.
assert sess2 is sess
# We make sure we raise an exception if we use cached_session with
# different values.
with self.assertRaises(ValueError):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
# with scope.
assert not sess_ref()._closed
with self.session() as unique_sess:
unique_sess_ref = weakref.ref(unique_sess)
with self.session() as sess2:
assert sess2 is not unique_sess
# We make sure the session is closed when we leave the with statement.
assert unique_sess_ref()._closed
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node '{{node seven}}"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegexp(AssertionError,
r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
@test_util.run_in_graph_and_eager_modes
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
@test_util.run_in_graph_and_eager_modes
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
@test_util.run_in_graph_and_eager_modes
def testAllCloseTensors(self):
a_raw_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = constant_op.constant(a_raw_data)
b = math_ops.add(1, constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
self.assertAllClose(a, b)
self.assertAllClose(a, a_raw_data)
a_dict = {"key": a}
b_dict = {"key": b}
self.assertAllClose(a_dict, b_dict)
x_list = [a, b]
y_list = [a_raw_data, b]
self.assertAllClose(x_list, y_list)
@test_util.run_in_graph_and_eager_modes
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
@test_util.run_in_graph_and_eager_modes
def testAllCloseList(self):
with self.assertRaisesRegexp(AssertionError, r"not close dif"):
self.assertAllClose([0], [1])
@test_util.run_in_graph_and_eager_modes
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
@test_util.run_in_graph_and_eager_modes
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegexp(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegexp(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
@test_util.run_in_graph_and_eager_modes
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegexp(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
@test_util.skip_if(True) # b/117665998
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
@test_util.run_in_graph_and_eager_modes
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-8], dtype=dtypes.float64),
constant_op.constant([2e-8], dtype=dtypes.float64),
rtol=1e-8,
atol=1e-8)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-7], dtype=dtypes.float32),
constant_op.constant([2e-7], dtype=dtypes.float32),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-4], dtype=dtypes.float16),
constant_op.constant([2e-4], dtype=dtypes.float16),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7,
half_rtol=1e-4,
half_atol=1e-4)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
@test_util.run_in_graph_and_eager_modes
def testAssertAllEqual(self):
i = variables.Variable([100] * 3, dtype=dtypes.int32, name="i")
j = constant_op.constant([20] * 3, dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([120] * 3, k)
self.assertAllEqual([20] * 3, j)
with self.assertRaisesRegexp(AssertionError, r"not equal lhs"):
self.assertAllEqual([0] * 3, k)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllClose(self):
# Test with arrays
self.assertNotAllClose([0.1], [0.2])
with self.assertRaises(AssertionError):
self.assertNotAllClose([-1.0, 2.0], [-1.0, 2.0])
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
self.assertNotAllClose([0.9, 1.0], x)
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.0, 1.0], x)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseRTol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], rtol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, rtol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseATol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], atol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, atol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLess(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllClose([110.0, 120.0, 130.0], z)
self.assertAllGreater(x, 95.0)
self.assertAllLess(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 95.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLessEqual(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllEqual([110.0, 120.0, 130.0], z)
self.assertAllGreaterEqual(x, 95.0)
self.assertAllLessEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 95.0)
@test_util.run_deprecated_v1
def testAssertAllInRangeWithNonNumericValuesFails(self):
s1 = constant_op.constant("Hello, ", name="s1")
c = constant_op.constant([1 + 2j, -3 + 5j], name="c")
b = constant_op.constant([False, True], name="b")
with self.assertRaises(AssertionError):
self.assertAllInRange(s1, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(c, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(b, 0, 1)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRange(self):
x = constant_op.constant([10.0, 15.0], name="x")
self.assertAllInRange(x, 10, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_upper_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(
x, 10, 15, open_lower_bound=True, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeErrorMessageEllipses(self):
x_init = np.array([[10.0, 15.0]] * 12)
x = constant_op.constant(x_init, name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 5, 10)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeDetectsNaNs(self):
x = constant_op.constant(
[[np.nan, 0.0], [np.nan, np.inf], [np.inf, np.nan]], name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 0.0, 2.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeWithInfinities(self):
x = constant_op.constant([10.0, np.inf], name="x")
self.assertAllInRange(x, 10, np.inf)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, np.inf, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInSet(self):
b = constant_op.constant([True, False], name="b")
x = constant_op.constant([13, 37], name="x")
self.assertAllInSet(b, [False, True])
self.assertAllInSet(b, (False, True))
self.assertAllInSet(b, {False, True})
self.assertAllInSet(x, [0, 13, 37, 42])
self.assertAllInSet(x, (0, 13, 37, 42))
self.assertAllInSet(x, {0, 13, 37, 42})
with self.assertRaises(AssertionError):
self.assertAllInSet(b, [False])
with self.assertRaises(AssertionError):
self.assertAllInSet(x, (42,))
@test_util.run_deprecated_v1
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new defeault graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
with self.test_session():
a_rand = random_ops.random_normal([1]).eval()
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
with self.test_session():
b_rand = random_ops.random_normal([1]).eval()
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_util.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_util.run_in_graph_and_eager_modes()(inc)
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegexp(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
class Foo(object):
pass
del Foo # Make pylint unused happy.
def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
modes = []
def _test(self):
if not context.executing_eagerly():
self.skipTest("Skipping in graph mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["eager"])
def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
modes = []
def _test(self):
if context.executing_eagerly():
self.skipTest("Skipping in eager mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["graph"])
@test_util.run_deprecated_v1
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
class ExampleTest(test_util.TensorFlowTestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_util.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[0:2], ["setup_graph", "run_graph"])
self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_parameterized_keyword(self, arg):
self.assertEqual(arg, True)
def test_build_as_function_and_v1_graph(self):
class GraphModeAndFuncionTest(parameterized.TestCase):
def __init__(inner_self): # pylint: disable=no-self-argument
super(GraphModeAndFuncionTest, inner_self).__init__()
inner_self.graph_mode_tested = False
inner_self.inside_function_tested = False
def runTest(self):
del self
@test_util.build_as_function_and_v1_graph
def test_modes(inner_self): # pylint: disable=no-self-argument
is_building_function = ops.get_default_graph().building_function
if is_building_function:
self.assertFalse(inner_self.inside_function_tested)
inner_self.inside_function_tested = True
else:
self.assertFalse(inner_self.graph_mode_tested)
inner_self.graph_mode_tested = True
test_object = GraphModeAndFuncionTest()
test_object.test_modes_v1_graph()
test_object.test_modes_function()
self.assertTrue(test_object.graph_mode_tested)
self.assertTrue(test_object.inside_function_tested)
# Its own test case to reproduce variable sharing issues which only pop up when
# setUp() is overridden and super() is not called.
class GraphAndEagerNoVariableSharing(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.run_in_graph_and_eager_modes
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.], name="leak")
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.], name="no-leak")
with self.assertRaisesRegexp(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
inner_self.accumulation = []
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
with self.assertRaises(AssertionError):
LeakedObjectTest().test_has_leak()
LeakedObjectTest().test_has_no_leak()
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/test_util_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor-like objects that are composed from tf.Tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util import nest
@six.add_metaclass(abc.ABCMeta)
class CompositeTensor(object):
"""Abstract base class for Tensor-like objects that are composed from Tensors.
Each `CompositeTensor` can be decomposed into a structured collection of
component `tf.Tensor`s, and reconstructed from those components.
The `tensorflow.python.util.nest` module has support for treating composite
tensors as structure, which makes it easy to flatten and reconstruct
composite tensors (or larger structures that contain composite tensors).
E.g.:
```python
ct = ... # Create a composite tensor.
flat_list_of_tensors = nest.flatten(ct, expand_composites=True)
transformed_list_of_tensors = ... # do something with the flat tensors.
result = nest.pack_sequence_as(ct, transformed_list_of_tensors,
expand_composites=True)
```
"""
@abc.abstractproperty
def _type_spec(self):
"""A `TypeSpec` describing the type of this value."""
raise NotImplementedError("%s._type_spec()" % type(self).__name__)
# Deprecated -- use self._type_spec._to_components(self) instead.
# TODO(b/133606651) Remove all callers and then delete this method.
def _to_components(self):
"""Decomposes this composite tensor into its component tensors.
Returns:
A nested structure of `tf.Tensor`s and `CompositeTensor`s that can be
used to reconstruct this composite tensor (along with metadata returned
by `_component_metadata`).
"""
return self._type_spec._to_components(self) # pylint: disable=protected-access
# Deprecated -- use self._type_spec instead.
# TODO(b/133606651) Remove all callers and then delete this method.
def _component_metadata(self):
"""Returns any non-tensor metadata needed to reconstruct a composite tensor.
Returns:
A nested structure of metadata that can be used to reconstruct this
composite tensor (along with the tensors returned by `_to_components`).
"""
return self._type_spec
# Deprecated -- use metadata._from_components(components) instead.
# TODO(b/133606651) Remove all callers and then delete this method.
@staticmethod
def _from_components(components, metadata):
"""Creates a composite tensor of type `cls` from components.
Args:
components: A nested structure whose values are `tf.Tensor`s or
`tf.CompositeTensor`s (as returned by `_to_components`).
metadata: A nested structure containing any additional metadata needed to
reconstruct the composite tensor (as returned by `_composite_metadata`).
Returns:
A `CompositeTensor` of type `cls`.
"""
return metadata._from_components(components) # pylint: disable=protected-access
def _shape_invariant_to_type_spec(self, shape):
"""Returns a TypeSpec given a shape invariant (used by `tf.while_loop`).
Args:
shape: A `tf.TensorShape` object. The shape invariant for this
`CompositeTensor`, or `None` if a default shape invariant should be
used (based on the value of this `CompositeTensor`).
Returns:
A nested structure whose values are `tf.TensorShape` objects, specifying
the shape invariants for the tensors that comprise this `CompositeTensor`.
"""
# New TypeSpec subclasses generally do not need to implement this --
# this method is used for backwards compatibility. Users of tf.while_loop
# can specify a type by passing in TypeSpec instead.
raise NotImplementedError("%s._shape_invariant_to_type_spec"
% type(self).__name__)
# TODO(b/133606651) Remove this property, since it's not clear what it should
# return if a CompositeTensor has a mix of graph and non-graph components.
# Update users to perform an appropraite check themselves.
@property
def _is_graph_tensor(self):
"""Returns True if this tensor's components belong to a TF graph."""
components = self._type_spec._to_components(self) # pylint: disable=protected-access
tensors = nest.flatten(components, expand_composites=True)
return any(hasattr(t, "graph") for t in tensors)
def _consumers(self):
"""Returns a list of `Operation`s that consume this `CompositeTensor`.
Returns:
A list of `Operation`s.
Raises:
RuntimeError: If this method is called while executing eagerly.
"""
consumers = nest.flatten([
component.consumers()
for component in self._to_components()
if getattr(component, "graph", None) is not None
])
return list(set(consumers))
pywrap_tensorflow.RegisterType("CompositeTensor", CompositeTensor)
def replace_composites_with_components(structure):
"""Recursively replaces CompositeTensors with their components.
Args:
structure: A `nest`-compatible structure, possibly containing composite
tensors.
Returns:
A copy of `structure`, where each composite tensor has been replaced by
its components. The result will contain no composite tensors.
Note that `nest.flatten(replace_composites_with_components(structure))`
returns the same value as `nest.flatten(structure)`.
"""
if isinstance(structure, CompositeTensor):
return replace_composites_with_components(structure._to_components()) # pylint: disable=protected-access
elif not nest.is_sequence(structure):
return structure
else:
return nest.map_structure(replace_composites_with_components, structure,
expand_composites=False)
# @TODO(edloper): Can we replace convert_to_tensor_or_xyz with just
# convert_to_tensor_or_composite? Alternatively, should composite tensors
# register a dispatch override for tf.convert_to_tensor?
|
tensorflow-master
|
tensorflow/python/framework/composite_tensor.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for reading/writing graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
from google.protobuf import text_format
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.tf_export import tf_export
@tf_export('io.write_graph', v1=['io.write_graph', 'train.write_graph'])
def write_graph(graph_or_graph_def, logdir, name, as_text=True):
"""Writes a graph proto to a file.
The graph is written as a text proto unless `as_text` is `False`.
```python
v = tf.Variable(0, name='my_variable')
sess = tf.compat.v1.Session()
tf.io.write_graph(sess.graph_def, '/tmp/my-model', 'train.pbtxt')
```
or
```python
v = tf.Variable(0, name='my_variable')
sess = tf.compat.v1.Session()
tf.io.write_graph(sess.graph, '/tmp/my-model', 'train.pbtxt')
```
Args:
graph_or_graph_def: A `Graph` or a `GraphDef` protocol buffer.
logdir: Directory where to write the graph. This can refer to remote
filesystems, such as Google Cloud Storage (GCS).
name: Filename for the graph.
as_text: If `True`, writes the graph as an ASCII proto.
Returns:
The path of the output proto file.
"""
if isinstance(graph_or_graph_def, ops.Graph):
graph_def = graph_or_graph_def.as_graph_def()
else:
graph_def = graph_or_graph_def
# gcs does not have the concept of directory at the moment.
if not file_io.file_exists(logdir) and not logdir.startswith('gs:'):
file_io.recursive_create_dir(logdir)
path = os.path.join(logdir, name)
if as_text:
file_io.atomic_write_string_to_file(path,
text_format.MessageToString(
graph_def, float_format=''))
else:
file_io.atomic_write_string_to_file(path, graph_def.SerializeToString())
return path
|
tensorflow-master
|
tensorflow/python/framework/graph_io.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Utility to convert a Graph to a FunctionDef."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import op_def_registry
def _make_argname_from_tensor_name(name):
return re.sub(":0$", "", name).replace(":", "_o")
def _tensor_to_argdef(t, name=None, used_names=None):
"""Convert tensor t to an argdef, with a specified name or a unique name."""
arg = op_def_pb2.OpDef.ArgDef()
if name is None:
arg.name = _make_argname_from_tensor_name(t.name)
if used_names is not None:
if arg.name in used_names:
i = 0
while True:
new_name = "%s_U%d" % (arg.name, i)
if new_name not in used_names:
arg.name = new_name
break
i += 1
used_names.add(arg.name)
else:
arg.name = name
arg.type = t.dtype.as_datatype_enum
return arg
def _is_in_placeholders(op, func_arg_placeholders):
"""Checks whether any output of this op is in func_arg_placeholders."""
return op.values() and any(x.name in func_arg_placeholders
for x in op.values())
def _get_node_def(op):
return op.node_def # pylint: disable=protected-access
def _get_op_def(op):
return op.op_def or op_def_registry.get_registered_ops()[op.type]
def _create_input_dict(function_graph,
func_arg_placeholders,
initial_value=None):
"""Create a mapping from graph tensor names to function tensor names."""
if initial_value is None:
input_dict = {}
else:
input_dict = dict(initial_value)
for op in function_graph.get_operations():
if _is_in_placeholders(op, func_arg_placeholders):
input_dict[op.name] = op.name
else:
op_def = _get_op_def(op)
attrs = _get_node_def(op).attr
o = 0
for arg_def in op_def.output_arg:
if arg_def.number_attr:
num = attrs[arg_def.number_attr].i
elif arg_def.type_list_attr:
num = len(attrs[arg_def.type_list_attr].list.type)
else:
num = 1
for i in range(num):
result = "%s:%s:%d" % (op.name, arg_def.name, i)
input_dict[op.values()[o].name] = result
if o == 0:
input_dict[op.name] = result
o += 1
return input_dict
def _add_op_node(op, func, input_dict):
"""Converts an op to a function def node and add it to `func`."""
# Add an entry in func.node_def
# Note that extend() makes a copy in this case, see:
# https://developers.google.com/protocol-buffers/docs/reference/python-generated#repeated-message-fields
func.node_def.extend([_get_node_def(op)])
node_def = func.node_def[-1]
for i in range(len(node_def.input)):
if not node_def.input[i].startswith("^"):
assert node_def.input[i] in input_dict, ("%s missing from %s" %
(node_def.input[i],
input_dict.items()))
node_def.input[i] = input_dict[node_def.input[i]]
# The function is stateful if any of its operations are stateful.
# NOTE(mrry): The "Const" node typically does not have an `OpDef` associated
# with it, so we assume any nodes without an `OpDef` are stateless.
# TODO(skyewm): Remove the `is not None` test after we transition to the C
# API.
if op.op_def is not None and op.op_def.is_stateful:
func.signature.is_stateful = True
def graph_to_function_def(graph, operations, inputs, outputs, out_names=None):
"""Returns `graph` as a `FunctionDef` protocol buffer.
This method creates a [`FunctionDef`](
https://www.tensorflow.org/code/tensorflow/core/framework/function.proto)
protocol buffer that contains all the ops in `operations`. The
operations become the body of the function.
The arguments `inputs` and `outputs` will be listed as the inputs
and outputs tensors of the function. They must be lists of
tensors present in the graph. The lists can optionally be empty.
Args:
graph: Graph.
operations: the operations to put in the function. Must be a subset of
the operations in the graph.
inputs: List of tensors. Inputs to the function.
outputs: List of tensors. Outputs of the function.
out_names: Optional list of string names for the outputs.
Returns:
A FunctionDef protocol buffer.
Raises:
ValueError: if out_names is specified and the wrong length.
"""
func = function_pb2.FunctionDef()
func.signature.name = "_"
used_names = set()
func.signature.input_arg.extend(
[_tensor_to_argdef(i, used_names=used_names) for i in inputs])
# Initializes the input map with all placeholder input tensors.
initial_dict = {}
for o, m in zip(inputs, func.signature.input_arg):
initial_dict[o.name] = m.name
if out_names is None:
used_names = set()
func.signature.output_arg.extend(
[_tensor_to_argdef(o, used_names=used_names) for o in outputs])
elif len(outputs) != len(out_names):
raise errors_impl.InvalidArgumentError(
None, None,
"output names must be either empty or equal in size to outputs. "
"output names size = %d outputs size = %d" %
(len(out_names), len(outputs)))
elif len(out_names) != len(set(out_names)):
raise ValueError(
"Must not have duplicates in out_names: %s" % ", ".join(out_names))
else:
func.signature.output_arg.extend(
[_tensor_to_argdef(o, name=n) for o, n in zip(outputs, out_names)])
func_arg_placeholders = set([i.name for i in inputs])
input_dict = _create_input_dict(graph, func_arg_placeholders,
initial_value=initial_dict)
for op in operations:
if _is_in_placeholders(op, func_arg_placeholders):
continue
_add_op_node(op, func, input_dict)
if out_names is None:
for index, o in enumerate(outputs):
k = func.signature.output_arg[index].name
func.ret[k] = input_dict[o.name]
else:
for o, n in zip(outputs, out_names):
func.ret[n] = input_dict[o.name]
return func
|
tensorflow-master
|
tensorflow/python/framework/graph_to_function_def.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MetaGraph and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from distutils import version as distutils_version # pylint: disable=g-bad-import-order
import os.path
import re
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.protobuf import graph_debug_info_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import tf_stack
# Prefix to be added to unbound input names so they are easily identifiable.
_UNBOUND_INPUT_PREFIX = "$unbound_inputs_"
# List of collections that didn't register proto functions, as a result in
# a previously exported meta_graph the items are of a different data type.
_COMPAT_COLLECTION_LIST = [ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.MODEL_VARIABLES,
ops.GraphKeys.METRIC_VARIABLES]
def _node_def(from_node_def, export_scope, unbound_inputs, clear_devices=False):
"""Create a `NodeDef` proto with export_scope stripped.
Args:
from_node_def: A `node_def_pb2.NodeDef` protocol buffer.
export_scope: A `string` representing the name scope to remove.
unbound_inputs: An array of unbound input names if they exist.
clear_devices: Boolean which controls whether to clear device information
from node_def. Default false.
Returns:
A `node_def_pb2.NodeDef` protocol buffer.
"""
node_def = copy.deepcopy(from_node_def)
for i, v in enumerate(node_def.input):
if (export_scope and
not node_def.input[i].lstrip("^").startswith(export_scope)):
# Adds "$unbound_inputs_" prefix to the unbound name so they are easily
# identifiable.
node_def.input[i] = re.sub(r"([\^]|^)(.*)",
r"\1" + _UNBOUND_INPUT_PREFIX + r"\2",
compat.as_str(v))
unbound_inputs.append(node_def.input[i])
else:
node_def.input[i] = ops.strip_name_scope(v, export_scope)
node_def.name = compat.as_bytes(
ops.strip_name_scope(from_node_def.name, export_scope))
for k, v in six.iteritems(from_node_def.attr):
if k == "_class":
new_s = [compat.as_bytes(
ops.strip_name_scope(s, export_scope)) for s in v.list.s
if not export_scope or
compat.as_str(s).split("@")[1].startswith(export_scope)]
node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=new_s)))
elif node_def.op in ("Enter", "RefEnter") and k == "frame_name":
if not export_scope or compat.as_str(v.s).startswith(export_scope):
new_s = compat.as_bytes(ops.strip_name_scope(v.s, export_scope))
node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(s=new_s))
else:
node_def.attr[k].CopyFrom(v)
if clear_devices:
node_def.device = ""
return node_def
def _read_file(filename):
"""Reads a file containing `GraphDef` and returns the protocol buffer.
Args:
filename: `graph_def` filename including the path.
Returns:
A `GraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
graph_def = graph_pb2.GraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.FileIO(filename, "rb").read()
try:
graph_def.ParseFromString(file_content)
return graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content, graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return graph_def
def ops_used_by_graph_def(graph_def):
"""Collect the list of ops used by a graph.
Does not validate that the ops are all registered.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
A list of strings, each naming an op used by the graph.
"""
# Map function names to definitions
name_to_function = {}
for fun in graph_def.library.function:
name_to_function[fun.signature.name] = fun
# Collect the list of op names. Since functions can reference functions, we
# need a recursive traversal.
used_ops = set() # Includes both primitive ops and functions
functions_to_process = [] # A subset of used_ops
def mark_op_as_used(op):
if op not in used_ops and op in name_to_function:
functions_to_process.append(name_to_function[op])
used_ops.add(op)
for node in graph_def.node:
mark_op_as_used(node.op)
while functions_to_process:
fun = functions_to_process.pop()
for node in fun.node_def:
mark_op_as_used(node.op)
return [op for op in used_ops if op not in name_to_function]
def stripped_op_list_for_graph(graph_def):
"""Collect the stripped OpDefs for ops used by a graph.
This function computes the `stripped_op_list` field of `MetaGraphDef` and
similar protos. The result can be communicated from the producer to the
consumer, which can then use the C++ function
`RemoveNewDefaultAttrsFromGraphDef` to improve forwards compatibility.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
An `OpList` of ops used by the graph.
Raises:
ValueError: If an unregistered op is used.
"""
# This is the Python equivalent of StrippedOpListForGraph in C++.
# Unfortunately, since the Python op registry can differ from that in C++, we
# can't remove the duplication using swig (at least naively).
# TODO(irving): Support taking graphs directly.
used_ops = ops_used_by_graph_def(graph_def)
# Verify that all used ops are registered.
registered_ops = op_def_registry.get_registered_ops()
# These internal ops used by functions are not registered, so we need to
# whitelist them. # TODO(irving): Do something better here.
op_whitelist = ("_Arg", "_Retval", "_ListToArray", "_ArrayToList")
for op in used_ops:
if op not in registered_ops and op not in op_whitelist:
raise ValueError("Op %s is used by the graph, but is not registered" % op)
# Build the stripped op list in sorted order
return op_def_pb2.OpList(op=[registered_ops[op] for op in sorted(used_ops)
if op in registered_ops])
def _get_kind_name(item):
"""Returns the kind name in CollectionDef.
Args:
item: A data item.
Returns:
The string representation of the kind in CollectionDef.
"""
if isinstance(item, (six.string_types, six.binary_type)):
kind = "bytes_list"
elif isinstance(item, six.integer_types):
kind = "int64_list"
elif isinstance(item, float):
kind = "float_list"
elif isinstance(item, Any):
kind = "any_list"
else:
kind = "node_list"
return kind
SAVE_AND_RESTORE_OPS = ["SaveV2",
"Save", "SaveSlice",
"LegacySave", "LegacySaveSlice",
"RestoreV2",
"Restore", "RestoreSlice",
"LegacyRestore", "LegacyRestoreSlice"]
def _op_name(tensor_name):
"""Extract the Op name from a Tensor name.
The Op name is everything before a colon, if present,
not including any ^ prefix denoting a control dependency.
Args:
tensor_name: the full name of a Tensor in the graph.
Returns:
The name of the Op of which the given Tensor is an output.
Raises:
ValueError: if tensor_name is None or empty.
"""
if not tensor_name:
raise ValueError("Tensor name cannot be empty or None.")
# Control dependency inputs start with ^.
if tensor_name.startswith("^"):
tensor_name = tensor_name[1:]
if ":" in tensor_name:
op_name, _ = tensor_name.split(":")
return op_name
return tensor_name
def _get_scope(node_name):
"""Extract the scope name from a node name.
The scope name is everything before the final slash,
not including any ^ prefix denoting a control dependency.
Args:
node_name: the full name of an Op or a Tensor in the graph.
Returns:
The deepest named scope containing the node.
Raises:
ValueError: if tensor_name is None or empty
"""
if not node_name:
raise ValueError("Node name cannot be empty or None.")
# Control dependency inputs start with ^.
if node_name.startswith("^"):
node_name = node_name[1:]
if "/" in node_name:
scope, _ = node_name.rsplit("/", 1)
return scope
return ""
def _find_extraneous_saver_nodes(graph_def, saver_def):
"""Identifies any nodes in the graph_def related to unused Savers.
This approach assumes that each Saver is cleanly isolated in its own name
scope, so we need only identify the scopes associated with extraneous Savers
and return all the nodes in those scopes.
Args:
graph_def: a GraphDef proto to evaluate.
saver_def: a SaverDef proto referencing Save/Restore ops to be retained.
Returns:
An iterable of node names that may be safely omitted.
"""
# TODO(soergel): confirm that the assumption of scope isolation is valid.
# If not, we need to walk up the graph from any restore_all nodes, and walk
# down the graph from any Save/Restore nodes. I drafted that approach too,
# but it seems unnecessarily complex given the name scope solution.
# load the graph DAG in minimal form, without initializing a full Graph object
nodes = {node_def.name:
(set([_op_name(x) for x in node_def.input]), node_def.op)
for node_def in graph_def.node}
retain_scope_save = None
retain_scope_restore = None
# It's possible to have no saver if the graph has no Variables
if saver_def is not None:
save_op_name = _op_name(saver_def.save_tensor_name)
restore_op_name = _op_name(saver_def.restore_op_name)
# The save and restore scopes should always be the same, but if they differ
# for some reason, we retain them both to be safe.
retain_scope_restore = _get_scope(restore_op_name) + "/"
retain_scope_save = _get_scope(save_op_name) + "/"
all_saver_node_names = set([name for name, (_, op) in nodes.items()
if op in SAVE_AND_RESTORE_OPS])
all_saver_scopes = (set([_get_scope(x) for x in all_saver_node_names])
- all_saver_node_names)
all_saver_scopes = set([x + "/" for x in all_saver_scopes])
extraneous_scopes = all_saver_scopes - set([retain_scope_save,
retain_scope_restore])
extraneous_node_names = set()
for name, _ in nodes.items():
for extraneous_scope in extraneous_scopes:
if name.startswith(extraneous_scope):
extraneous_node_names.add(name)
break
return extraneous_node_names
def _should_include_node(node_or_node_name, export_scope, exclude_nodes):
"""Returns `True` if a node should be included.
Args:
node_or_node_name: A node or `string` node name.
export_scope: `string`. Name scope under which to extract the subgraph. The
scope name will be stripped from the node definitions for easy import
later into new name scopes.
exclude_nodes: An iterable of nodes or `string` node names to omit from the
export, or None. Note no sanity-checking is done, so this list must be
carefully constructed to avoid producing an invalid graph.
Returns:
`True` if the node should be included.
"""
if not isinstance(node_or_node_name, six.string_types):
try:
node_name = node_or_node_name.name
except AttributeError:
# Keep the object that we don't know how to process.
return True
else:
node_name = node_or_node_name
if exclude_nodes and (node_or_node_name in exclude_nodes
or node_name in exclude_nodes):
return False
return (node_name.startswith(_UNBOUND_INPUT_PREFIX) or
(not export_scope or node_name.startswith(export_scope)))
def add_collection_def(meta_graph_def, key, graph=None,
export_scope=None, exclude_nodes=None,
override_contents=None):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
graph: The `Graph` from which to get collections.
export_scope: Optional `string`. Name scope to remove.
exclude_nodes: An iterable of nodes or `string` node names to omit from the
collection, or None.
override_contents: An iterable of values to place in the collection,
ignoring the current values (if set).
"""
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if not isinstance(key, six.string_types) and not isinstance(key, bytes):
logging.warning("Only collections with string type keys will be "
"serialized. This key has %s", type(key))
return
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
if override_contents:
collection_list = override_contents
else:
collection_list = graph.get_collection(key)
# Remove nodes that should not be exported from the collection list.
collection_list = [x for x in collection_list if
_should_include_node(x, export_scope, exclude_nodes)]
if not collection_list:
return
try:
col_def = meta_graph_def.collection_def[key]
to_proto = ops.get_to_proto_function(key)
proto_type = ops.get_collection_proto_type(key)
if to_proto:
kind = "bytes_list"
for x in collection_list:
# Additional type check to make sure the returned proto is indeed
# what we expect.
proto = to_proto(x, export_scope=export_scope)
if proto:
assert isinstance(proto, proto_type)
getattr(col_def, kind).value.append(proto.SerializeToString())
else:
kind = _get_kind_name(collection_list[0])
if kind == "node_list":
for x in collection_list:
if not export_scope or x.name.startswith(export_scope):
getattr(col_def, kind).value.append(
ops.strip_name_scope(x.name, export_scope))
elif kind == "bytes_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python3 distinguishes between bytes and strings.
getattr(col_def, kind).value.extend(
[compat.as_bytes(x) for x in collection_list])
else:
getattr(col_def, kind).value.extend([x for x in collection_list])
except Exception as e: # pylint: disable=broad-except
logging.warning("Issue encountered when serializing %s.\n"
"Type is unsupported, or the types of the items don't "
"match field type in CollectionDef. Note this is a warning "
"and probably safe to ignore.\n%s", key, str(e))
if key in meta_graph_def.collection_def:
del meta_graph_def.collection_def[key]
return
def _is_default_attr_value(op_def, attr_name, attr_value):
"""Checks if given attribute matches the default value in the op def."""
for attr_def in op_def.attr:
if attr_def.name == attr_name:
if not attr_def.HasField("default_value"):
return False
# pywrap_tensorflow.EqualAttrValueWrapper returns an empty string
# if both arguments represent an equivalent AttrValue instance.
return not pywrap_tensorflow.EqualAttrValueWrapper(
attr_value.SerializeToString(),
attr_def.default_value.SerializeToString())
return False
def strip_graph_default_valued_attrs(meta_graph_def):
"""Strips default valued attributes for node defs in given MetaGraphDef.
This method also sets `meta_info_def.stripped_default_attrs` in the given
`MetaGraphDef` proto to True.
Args:
meta_graph_def: `MetaGraphDef` protocol buffer
Returns:
None.
"""
# Map function op names to their function definitions.
op_name_to_function = {}
for function_def in meta_graph_def.graph_def.library.function:
op_name_to_function[function_def.signature.name] = function_def
# Get all registered ops.
registered_ops = op_def_registry.get_registered_ops()
def _strip_node_default_valued_attrs(node_def):
"""Removes default valued attributes from a single node def."""
if node_def.op in op_name_to_function or node_def.op not in registered_ops:
return
op_def = registered_ops[node_def.op]
attrs_to_strip = set()
for attr_name, attr_value in node_def.attr.items():
if _is_default_attr_value(op_def, attr_name, attr_value):
attrs_to_strip.add(attr_name)
for attr in attrs_to_strip:
del node_def.attr[attr]
# Process all NodeDef instances in graph_def.
for node_def in meta_graph_def.graph_def.node:
_strip_node_default_valued_attrs(node_def)
# Process all NodeDef instances in graph_def.library.function.
for function_def in meta_graph_def.graph_def.library.function:
for function_node_def in function_def.node_def:
_strip_node_default_valued_attrs(function_node_def)
# Tell consumers of this graph that default valued attrs have been stripped.
meta_graph_def.meta_info_def.stripped_default_attrs = True
def create_graph_debug_info_def(operations):
"""Construct and returns a `GraphDebugInfo` protocol buffer.
Args:
operations: An iterable of op.Operation objects having _traceback members.
Returns:
GraphDebugInfo protocol buffer.
Raises:
TypeError: If the arguments are not of the correct proto buffer type.
"""
# Creates an empty GraphDebugInfoDef proto.
graph_debug_info_def = graph_debug_info_pb2.GraphDebugInfo()
# Gets the file names and line numbers for the exported node names. Also
# collects the unique file names.
all_file_names = set()
node_to_trace = {}
for op in operations:
# Gets the stack trace of the operation and then the file location.
node_name = op.name
node_to_trace[node_name] = error_interpolation.compute_useful_frames(op, 10)
for frame in node_to_trace[node_name]:
all_file_names.add(frame[tf_stack.TB_FILENAME])
# Sets the `files` field in the GraphDebugInfo proto
graph_debug_info_def.files.extend(all_file_names)
# Builds a mapping between file names and index of the `files` field, so we
# only store the indexes for the nodes in the GraphDebugInfo.
file_to_index = dict(
[(y, x) for x, y in enumerate(graph_debug_info_def.files)])
# Creates the FileLineCol proto for each node and sets the value in the
# GraphDebugInfo proto. We only store the file name index for each node to
# save the storage space.
for node_name, frames in node_to_trace.items():
trace_def = graph_debug_info_def.traces[node_name]
for frame in reversed(frames):
trace_def.file_line_cols.add(
file_index=file_to_index[frame[tf_stack.TB_FILENAME]],
line=frame[tf_stack.TB_LINENO],
func=frame[tf_stack.TB_FUNCNAME],
code=frame[tf_stack.TB_CODEDICT])
return graph_debug_info_def
def create_meta_graph_def(meta_info_def=None,
graph_def=None,
saver_def=None,
collection_list=None,
graph=None,
export_scope=None,
exclude_nodes=None,
clear_extraneous_savers=False,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Construct and returns a `MetaGraphDef` protocol buffer.
Args:
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
graph: The `Graph` to create `MetaGraphDef` out of.
export_scope: Optional `string`. Name scope to remove.
exclude_nodes: An iterable of nodes or `string` node names to omit from all
collection, or None.
clear_extraneous_savers: Remove any preexisting SaverDefs from the SAVERS
collection. Note this method does not alter the graph, so any
extraneous Save/Restore ops should have been removed already, as needed.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
MetaGraphDef protocol buffer.
Raises:
TypeError: If the arguments are not of the correct proto buffer type.
"""
# pylint: enable=line-too-long
# Type check.
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if meta_info_def and not isinstance(meta_info_def,
meta_graph_pb2.MetaGraphDef.MetaInfoDef):
raise TypeError("meta_info_def must be of type MetaInfoDef, not %s",
type(meta_info_def))
if graph_def and not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be of type GraphDef, not %s",
type(graph_def))
if saver_def and not isinstance(saver_def, saver_pb2.SaverDef):
raise TypeError("saver_def must be of type SaverDef, not %s",
type(saver_def))
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Creates a MetaGraphDef proto.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Adds meta_info_def.
if not meta_info_def:
meta_info_def = meta_graph_pb2.MetaGraphDef.MetaInfoDef()
# Set the tf version strings to the current tf build.
meta_info_def.tensorflow_version = versions.__version__
meta_info_def.tensorflow_git_version = versions.__git_version__
meta_graph_def.meta_info_def.MergeFrom(meta_info_def)
# Adds graph_def or the default.
if not graph_def:
meta_graph_def.graph_def.MergeFrom(graph.as_graph_def(add_shapes=True))
else:
meta_graph_def.graph_def.MergeFrom(graph_def)
# Fills in meta_info_def.stripped_op_list using the ops from graph_def.
# pylint: disable=g-explicit-length-test
if len(meta_graph_def.meta_info_def.stripped_op_list.op) == 0:
meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(
stripped_op_list_for_graph(meta_graph_def.graph_def))
# pylint: enable=g-explicit-length-test
# Strip default valued attributes in graph_def.
if strip_default_attrs:
strip_graph_default_valued_attrs(meta_graph_def)
# Adds saver_def.
if saver_def:
meta_graph_def.saver_def.MergeFrom(saver_def)
# Adds collection_list.
if collection_list is not None:
clist = collection_list
else:
clist = graph.get_all_collection_keys()
for ctype in clist:
if clear_extraneous_savers and ctype == ops.GraphKeys.SAVERS:
# Avoid importing Saver here
from_proto = ops.get_from_proto_function(ctype)
add_collection_def(meta_graph_def, ctype,
graph=graph,
export_scope=export_scope,
exclude_nodes=exclude_nodes,
override_contents=[from_proto(saver_def)])
else:
add_collection_def(meta_graph_def, ctype,
graph=graph,
export_scope=export_scope,
exclude_nodes=exclude_nodes)
return meta_graph_def
def read_meta_graph_file(filename):
"""Reads a file containing `MetaGraphDef` and returns the protocol buffer.
Args:
filename: `meta_graph_def` filename including the path.
Returns:
A `MetaGraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
meta_graph_def = meta_graph_pb2.MetaGraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.FileIO(filename, "rb").read()
try:
meta_graph_def.ParseFromString(file_content)
return meta_graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content.decode("utf-8"), meta_graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return meta_graph_def
def import_scoped_meta_graph(meta_graph_or_file,
clear_devices=False,
graph=None,
import_scope=None,
input_map=None,
unbound_inputs_col_name="unbound_inputs",
restore_collections_predicate=(lambda key: True)):
"""Recreates a `Graph` saved in a `MetaGraphDef` proto.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates the desired collections, and returns a dictionary of
all the Variables imported into the name scope.
In combination with `export_scoped_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
from graph_def. Default false.
graph: The `Graph` to import into. If `None`, use the default graph.
import_scope: Optional `string`. Name scope into which to import the
subgraph. If `None`, the graph is imported to the root name scope.
input_map: A dictionary mapping input names (as strings) in `graph_def` to
`Tensor` objects. The values of the named input tensors in the imported
graph will be re-mapped to the respective `Tensor` values.
unbound_inputs_col_name: Collection name for looking up unbound inputs.
restore_collections_predicate: a predicate on collection names. A collection
named c (i.e whose key is c) will be restored iff
1) `restore_collections_predicate(c)` is True, and
2) `c != unbound_inputs_col_name`.
Returns:
A dictionary of all the `Variables` imported into the name scope.
Raises:
ValueError: If the graph_def contains unbound inputs.
"""
return import_scoped_meta_graph_with_return_elements(
meta_graph_or_file, clear_devices, graph, import_scope, input_map,
unbound_inputs_col_name, restore_collections_predicate)[0]
def import_scoped_meta_graph_with_return_elements(
meta_graph_or_file,
clear_devices=False,
graph=None,
import_scope=None,
input_map=None,
unbound_inputs_col_name="unbound_inputs",
restore_collections_predicate=(lambda key: True),
return_elements=None):
"""Imports graph from `MetaGraphDef` and returns vars and return elements.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates the desired collections, and returns a dictionary of
all the Variables imported into the name scope.
In combination with `export_scoped_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
from graph_def. Default false.
graph: The `Graph` to import into. If `None`, use the default graph.
import_scope: Optional `string`. Name scope into which to import the
subgraph. If `None`, the graph is imported to the root name scope.
input_map: A dictionary mapping input names (as strings) in `graph_def` to
`Tensor` objects. The values of the named input tensors in the imported
graph will be re-mapped to the respective `Tensor` values.
unbound_inputs_col_name: Collection name for looking up unbound inputs.
restore_collections_predicate: a predicate on collection names. A collection
named c (i.e whose key is c) will be restored iff
1) `restore_collections_predicate(c)` is True, and
2) `c != unbound_inputs_col_name`.
return_elements: A list of strings containing operation names in the
`MetaGraphDef` that will be returned as `Operation` objects; and/or
tensor names in `MetaGraphDef` that will be returned as `Tensor` objects.
Returns:
A tuple of (
dictionary of all the `Variables` imported into the name scope,
list of `Operation` or `Tensor` objects from the `return_elements` list).
Raises:
ValueError: If the graph_def contains unbound inputs.
"""
if context.executing_eagerly():
raise ValueError("Exporting/importing meta graphs is not supported when "
"eager execution is enabled.")
if isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
meta_graph_def = meta_graph_or_file
else:
meta_graph_def = read_meta_graph_file(meta_graph_or_file)
if unbound_inputs_col_name:
for key, col_def in meta_graph_def.collection_def.items():
if key == unbound_inputs_col_name:
kind = col_def.WhichOneof("kind")
field = getattr(col_def, kind)
if field.value and (
not input_map or
sorted([compat.as_str(v) for v in field.value]) !=
sorted(input_map)):
raise ValueError("Graph contains unbound inputs: %s. Must "
"provide these inputs through input_map." %
",".join([compat.as_str(v) for v in field.value
if not input_map or v not in input_map]))
break
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Gathers the list of nodes we are interested in.
with graph.as_default():
producer_op_list = None
if meta_graph_def.meta_info_def.HasField("stripped_op_list"):
producer_op_list = meta_graph_def.meta_info_def.stripped_op_list
input_graph_def = meta_graph_def.graph_def
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ""
scope_to_prepend_to_names = graph.unique_name(
import_scope or "", mark_as_used=False)
imported_return_elements = importer.import_graph_def(
input_graph_def,
name=(import_scope or scope_to_prepend_to_names),
input_map=input_map,
producer_op_list=producer_op_list,
return_elements=return_elements)
# TensorFlow versions before 1.9 (not inclusive) exported SavedModels
# without a VariableDef.trainable field set.
tf_version = meta_graph_def.meta_info_def.tensorflow_version
if not tf_version:
variables_have_trainable = True
else:
variables_have_trainable = (
distutils_version.LooseVersion(tf_version)
>= distutils_version.LooseVersion("1.9"))
# Sort collections so we see TRAINABLE_VARIABLES first and can default these
# variables to trainable if the value is not set in their VariableDef.
sorted_collections = []
if ops.GraphKeys.TRAINABLE_VARIABLES in meta_graph_def.collection_def:
sorted_collections.append(
(ops.GraphKeys.TRAINABLE_VARIABLES,
meta_graph_def.collection_def[ops.GraphKeys.TRAINABLE_VARIABLES]))
for key, value in sorted(meta_graph_def.collection_def.items()):
if key != ops.GraphKeys.TRAINABLE_VARIABLES:
sorted_collections.append((key, value))
# Restores all the other collections.
variable_objects = {}
for key, col_def in sorted_collections:
# Don't add unbound_inputs to the new graph.
if key == unbound_inputs_col_name:
continue
if not restore_collections_predicate(key):
continue
kind = col_def.WhichOneof("kind")
if kind is None:
logging.error("Cannot identify data type for collection %s. Skipping.",
key)
continue
from_proto = ops.get_from_proto_function(key)
# Temporary change to allow the TFMA evaluator to read metric variables
# saved as a bytes list.
# TODO(kathywu): Remove this hack once cl/248406059 has been submitted.
if key == ops.GraphKeys.METRIC_VARIABLES:
# Metric variables will use the same proto functions as GLOBAL_VARIABLES
from_proto = ops.get_from_proto_function(ops.GraphKeys.GLOBAL_VARIABLES)
if from_proto and kind == "bytes_list":
proto_type = ops.get_collection_proto_type(key)
if key in ops.GraphKeys._VARIABLE_COLLECTIONS: # pylint: disable=protected-access
for value in col_def.bytes_list.value:
variable = variable_objects.get(value, None)
if variable is None:
proto = proto_type()
proto.ParseFromString(value)
if not variables_have_trainable:
# If the VariableDef proto does not contain a "trainable"
# property because it was exported before that property was
# added, we default it to whether the variable is in the
# TRAINABLE_VARIABLES collection. We've sorted
# TRAINABLE_VARIABLES to be first, so trainable variables will
# be created from that collection.
proto.trainable = (key == ops.GraphKeys.TRAINABLE_VARIABLES)
variable = from_proto(
proto, import_scope=scope_to_prepend_to_names)
variable_objects[value] = variable
graph.add_to_collection(key, variable)
else:
for value in col_def.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
graph.add_to_collection(
key, from_proto(
proto, import_scope=scope_to_prepend_to_names))
else:
field = getattr(col_def, kind)
if key in _COMPAT_COLLECTION_LIST:
logging.warning(
"The saved meta_graph is possibly from an older release:\n"
"'%s' collection should be of type 'byte_list', but instead "
"is of type '%s'.", key, kind)
if kind == "node_list":
for value in field.value:
col_op = graph.as_graph_element(
ops.prepend_name_scope(value, scope_to_prepend_to_names))
graph.add_to_collection(key, col_op)
elif kind == "int64_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python2 distinguishes between int and long, while Python3 has
# only int.
for value in field.value:
graph.add_to_collection(key, int(value))
else:
for value in field.value:
graph.add_to_collection(
key, ops.prepend_name_scope(value, scope_to_prepend_to_names))
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=scope_to_prepend_to_names)
for v in variables:
var_list[ops.strip_name_scope(v.name, scope_to_prepend_to_names)] = v
return var_list, imported_return_elements
def export_scoped_meta_graph(filename=None,
graph_def=None,
graph=None,
export_scope=None,
as_text=False,
unbound_inputs_col_name="unbound_inputs",
clear_devices=False,
saver_def=None,
clear_extraneous_savers=False,
strip_default_attrs=False,
save_debug_info=False,
**kwargs):
"""Returns `MetaGraphDef` proto. Optionally writes it to filename.
This function exports the graph, saver, and collection objects into
`MetaGraphDef` protocol buffer with the intention of it being imported
at a later time or location to restart training, run inference, or be
a subgraph.
Args:
filename: Optional filename including the path for writing the
generated `MetaGraphDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
graph: The `Graph` to export. If `None`, use the default graph.
export_scope: Optional `string`. Name scope under which to extract
the subgraph. The scope name will be stripped from the node definitions
for easy import later into new name scopes. If `None`, the whole graph
is exported.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
unbound_inputs_col_name: Optional `string`. If provided, a string collection
with the given name will be added to the returned `MetaGraphDef`,
containing the names of tensors that must be remapped when importing the
`MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
before exporting the graph.
saver_def: `SaverDef` protocol buffer.
clear_extraneous_savers: Remove any Saver-related information from the
graph (both Save/Restore ops and SaverDefs) that are not associated
with the provided SaverDef.
strip_default_attrs: Set to true if default valued attributes must be
removed while exporting the GraphDef.
save_debug_info: If `True`, save the GraphDebugInfo to a separate file,
which in the same directory of filename and with `_debug` added before the
file extension.
**kwargs: Optional keyed arguments, including meta_info_def and
collection_list.
Returns:
A `MetaGraphDef` proto and dictionary of `Variables` in the exported
name scope.
Raises:
ValueError: When the `GraphDef` is larger than 2GB.
ValueError: When executing in Eager mode and either `graph_def` or `graph`
is undefined.
"""
if context.executing_eagerly() and not (graph_def is not None and
graph is not None):
raise ValueError("Exporting/importing meta graphs is not supported when "
"Eager Execution is enabled.")
graph = graph or ops.get_default_graph()
exclude_nodes = None
unbound_inputs = []
if export_scope or clear_extraneous_savers or clear_devices:
if graph_def:
new_graph_def = graph_pb2.GraphDef()
new_graph_def.versions.CopyFrom(graph_def.versions)
new_graph_def.library.CopyFrom(graph_def.library)
if clear_extraneous_savers:
exclude_nodes = _find_extraneous_saver_nodes(graph_def, saver_def)
for node_def in graph_def.node:
if _should_include_node(node_def.name, export_scope, exclude_nodes):
new_node_def = _node_def(node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
new_graph_def.node.extend([new_node_def])
graph_def = new_graph_def
else:
# Only do this complicated work if we want to remove a name scope.
graph_def = graph_pb2.GraphDef()
# pylint: disable=protected-access
graph_def.versions.CopyFrom(graph.graph_def_versions)
bytesize = 0
if clear_extraneous_savers:
exclude_nodes = _find_extraneous_saver_nodes(graph.as_graph_def(),
saver_def)
for key in sorted(graph._nodes_by_id):
if _should_include_node(graph._nodes_by_id[key].name,
export_scope,
exclude_nodes):
value = graph._nodes_by_id[key]
# pylint: enable=protected-access
node_def = _node_def(value.node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
graph_def.node.extend([node_def])
if value.outputs:
assert "_output_shapes" not in graph_def.node[-1].attr
graph_def.node[-1].attr["_output_shapes"].list.shape.extend([
output.get_shape().as_proto() for output in value.outputs])
bytesize += value.node_def.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph._copy_functions_to_graph_def(graph_def, bytesize) # pylint: disable=protected-access
# It's possible that not all the inputs are in the export_scope.
# If we would like such information included in the exported meta_graph,
# add them to a special unbound_inputs collection.
if unbound_inputs_col_name:
# Clears the unbound_inputs collections.
graph.clear_collection(unbound_inputs_col_name)
for k in unbound_inputs:
graph.add_to_collection(unbound_inputs_col_name, k)
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=export_scope)
for v in variables:
if _should_include_node(v, export_scope, exclude_nodes):
var_list[ops.strip_name_scope(v.name, export_scope)] = v
scoped_meta_graph_def = create_meta_graph_def(
graph_def=graph_def,
graph=graph,
export_scope=export_scope,
exclude_nodes=exclude_nodes,
clear_extraneous_savers=clear_extraneous_savers,
saver_def=saver_def,
strip_default_attrs=strip_default_attrs,
**kwargs)
if filename:
graph_io.write_graph(
scoped_meta_graph_def,
os.path.dirname(filename),
os.path.basename(filename),
as_text=as_text)
if save_debug_info:
name, _ = os.path.splitext(filename)
debug_filename = "{name}{ext}".format(name=name, ext=".debug")
# Gets the operation from the graph by the name. Exludes variable nodes,
# so only the nodes in the frozen models are included.
ops_to_export = []
for node in scoped_meta_graph_def.graph_def.node:
scoped_op_name = ops.prepend_name_scope(node.name, export_scope)
ops_to_export.append(graph.get_operation_by_name(scoped_op_name))
graph_debug_info = create_graph_debug_info_def(ops_to_export)
graph_io.write_graph(
graph_debug_info,
os.path.dirname(debug_filename),
os.path.basename(debug_filename),
as_text=as_text)
return scoped_meta_graph_def, var_list
def copy_scoped_meta_graph(from_scope, to_scope,
from_graph=None, to_graph=None):
"""Copies a sub-meta_graph from one scope to another.
Args:
from_scope: `String` name scope containing the subgraph to be copied.
to_scope: `String` name scope under which the copied subgraph will reside.
from_graph: Optional `Graph` from which to copy the subgraph. If `None`, the
default graph is use.
to_graph: Optional `Graph` to which to copy the subgraph. If `None`, the
default graph is used.
Returns:
A dictionary of `Variables` that has been copied into `to_scope`.
Raises:
ValueError: If `from_scope` and `to_scope` are the same while
`from_graph` and `to_graph` are also the same.
"""
from_graph = from_graph or ops.get_default_graph()
to_graph = to_graph or ops.get_default_graph()
if from_graph == to_graph and from_scope == to_scope:
raise ValueError("'from_scope' and 'to_scope' need to be different "
"when performing copy in the same graph.")
orig_meta_graph, var_list = export_scoped_meta_graph(
export_scope=from_scope, graph=from_graph)
var_list = import_scoped_meta_graph(orig_meta_graph,
graph=to_graph,
import_scope=to_scope)
return var_list
|
tensorflow-master
|
tensorflow/python/framework/meta_graph.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FuncGraph and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as py_collections
import itertools
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework.auto_control_deps import AutomaticControlDependencies
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.lazy_loader import LazyLoader
# This is to avoid a circular dependency:
# function -> func_graph
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
def_function = LazyLoader(
"def_function", globals(),
"tensorflow.python.eager.def_function")
WHITELIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY, # pylint: disable=protected-access
variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access
]
class UnknownArgument(object):
"""Signifies an argument which is not currently handled."""
pass
def convert_structure_to_signature(structure, arg_names=None):
"""Convert a potentially nested structure to a signature.
Args:
structure: Structure to convert, where top level collection is a list or a
tuple.
arg_names: Optional list of arguments that has equal number of elements as
`structure` and is used for naming corresponding TensorSpecs.
Returns:
Identical structure that has TensorSpec objects instead of Tensors and
UknownArgument instead of any unsupported types.
"""
def encode_arg(arg, path):
"""A representation for this argument, for converting into signatures."""
if isinstance(arg, ops.Tensor):
user_specified_name = None
try:
user_specified_name = compat.as_str(
arg.op.get_attr("_user_specified_name"))
except ValueError:
pass
if path and user_specified_name and user_specified_name != path[0]:
# The user has explicitly named the argument differently than the name
# of the function argument.
name = user_specified_name
else:
name = "/".join([str(p) for p in path])
return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)
if isinstance(arg, composite_tensor.CompositeTensor):
# TODO(b/133606651) Do we need to inject arg_name?
return arg._type_spec # pylint: disable=protected-access
if isinstance(arg, (
int,
float,
bool,
type(None),
dtypes.DType,
tensor_spec.TensorSpec,
)):
return arg
return UnknownArgument()
# We are using the flattened paths to name the TensorSpecs. We need an
# explicit name for them downstream.
flattened = nest.flatten_with_tuple_paths(structure)
if arg_names:
if len(arg_names) != len(structure):
raise ValueError(
"Passed in arg_names don't match actual signature (%s)." % arg_names)
# Replace all top-level names with their actual arg_names. If a path before
# was "(2,'a',1)", it will become "(arg_names[2],'a',1)".
flattened = [
((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened
]
mapped = [encode_arg(arg, path) for path, arg in flattened]
return nest.pack_sequence_as(structure, mapped)
class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
control_outputs: Operations that must be executed before the function
represented by this graph can be said to have been executed.
structured_input_signature: A tuple of (args, kwargs), which are both
possibly-nested python objects that were received by this function. Note
that these structures might contain Python `None`s.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
deferred_captures: Maps arbitrary key -> (closure, placeholder), where at
function call time the value of closure() will be used to feed the
placeholder.
control_captures: Set of external ops on which this graph has a control
dependency.
seed: The graph-level random seed.
capture_by_value: If True, the func graph will capture Variables by value
instead of reference.
"""
def __init__(self, name, collections=None, capture_by_value=None):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, and distribution
strategy stack from the current context or graph.
Args:
name: the name of the function.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write
to) the outer graph's collections that are not whitelisted, and both
read and write to the outer graph's collections that are whitelisted.
The current whitelisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will
capture Variables by value instead of reference. By default inherit
from outer graphs, and failing that will default to False.
"""
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.control_outputs = []
self.control_captures = set()
self.structured_input_signature = None
self.structured_outputs = None
self._weak_variables = []
self._watched_variables = weakref.WeakSet()
self.outer_graph = ops.get_default_graph()
self.captures = py_collections.OrderedDict()
self.deferred_captures = py_collections.OrderedDict()
# Inherit capture-by-value from outer graph.
if capture_by_value is not None:
self.capture_by_value = capture_by_value
elif self.outer_graph is not None and isinstance(
self.outer_graph, FuncGraph):
self.capture_by_value = self.outer_graph.capture_by_value
else:
self.capture_by_value = False
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
# [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of
# any None op_seed for random_op in the function, in which case we end up
# using function seed, which could be unintended behavior for the op.
self._seed_used = False
else:
self.seed = graph.seed
self._seed_used = False
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
if collections is None:
for collection_name in graph.get_all_collection_keys():
if collection_name not in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
else:
self._collections = collections
def __str__(self):
return "FuncGraph(name=%s, id=%s)" % (self.name, id(self))
def watch_variable(self, v):
"""Marks the variable v as accessed while building this graph."""
while self is not None and isinstance(self, FuncGraph):
self._watched_variables.add(v)
self = self.outer_graph
def capture_call_time_value(self, closure, spec, key=None):
"""Creates a placeholder which at call time has the value closure().
Useful, for example, to respect TensorFlow context managers, which are often
dynamically scoped.
Args:
closure: function which takes no arguments, to be evaluated at function
call time, returning a tensor of compatible `shape` and `dtype`
spec: TypeSpec for the value to capture.
key: optional. If not None, multiple calls to lazy_capture with the same
key in the same graph will return the same placeholder, and the
first closure will be used at function call time.
Returns:
placeholder which, at function call time, will be fed with the result
of calling closure().
Raises:
ValueError: at function call time, if the return value of closure() is
not compatible with shape and dtype.
TypeError: if spec is not a supported type (currently only TensorSpec
is supported).
"""
if key is None:
key = object()
if not isinstance(spec, tensor_spec.TensorSpec):
raise TypeError("Only TensorSpec supported so far, not", repr(spec))
dtype = spec.dtype
shape = spec.shape
if key not in self.deferred_captures:
placeholder = array_ops.placeholder(dtype=dtype, shape=shape)
def wrapped_closure():
tensor = ops.convert_to_tensor(closure(), dtype=dtype)
if not tensor.shape.is_compatible_with(shape):
raise ValueError(
"Return value of closure,", tensor,
"not compatible with shape", shape, "passed to lazy_placeholder")
return tensor
self.deferred_captures[key] = (wrapped_closure, placeholder)
return self.deferred_captures[key][1]
def control_dependencies(self, control_inputs):
"""Handles control dependencies.
FuncGraph wraps Graph's control_dependencies logic by first filtering out
any external tensors / operations and storing them in the graph's
control_captures member. Any consumers of this function graph must then
decide how to handle the control captures.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return super(FuncGraph, self).control_dependencies(control_inputs)
filtered_control_inputs = []
for c in control_inputs:
# Check for _UnreadVariable
if (isinstance(c, ops.IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
graph_element = ops._as_graph_element(c) # pylint: disable=protected-access
if graph_element is None:
graph_element = c
if graph_element is not None and getattr(
graph_element, "graph", None) is not self:
self.control_captures.add(graph_element)
else:
filtered_control_inputs.append(graph_element)
return super(FuncGraph, self).control_dependencies(filtered_control_inputs)
def as_default(self):
outer_cm = super(FuncGraph, self).as_default()
@tf_contextlib.contextmanager
def inner_cm():
"""Context manager for copying distribute.Strategy scope information."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on
# inheriting this stack from the default graph even in eager mode. Maybe
# it should be part of the eager context? This would also allow us to
# remove a get_default_graph() call from the function cache lookup.
old_strategy_stack = self._distribution_strategy_stack
self._distribution_strategy_stack = list(
graph._distribution_strategy_stack)
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
# so that the same trace of the Python function may be placed on several
# different devices and saved functions may be placed on new devices when
# restored.
old_device_stack = self._device_function_stack
if context.executing_eagerly():
if self._distribution_strategy_stack:
self._device_function_stack = self._device_function_stack.copy()
self._add_device_to_stack(context.context().device_name)
else:
if (self._distribution_strategy_stack
or device_stack_has_callable(graph._device_function_stack)):
# Hard-code devices from device functions in the function body
self._device_function_stack = graph._device_function_stack.copy()
old_creator_stack = self._variable_creator_stack
self._variable_creator_stack = graph._variable_creator_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
old_graph_key = self._graph_key
self._graph_key = graph._graph_key
# pylint: enable=protected-access
with outer_cm as g:
try:
yield g
finally:
self._distribution_strategy_stack = old_strategy_stack
self._device_function_stack = old_device_stack
self._variable_creator_stack = old_creator_stack
self._graph_key = old_graph_key
return inner_cm()
@property
def output_types(self):
return [t.dtype for t in self.outputs]
@property
def output_shapes(self):
return [t.shape for t in self.outputs]
@property
def variables(self):
"""A list of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Yields:
Strong references to variables accessed by this FuncGraph.
"""
for weak_v in self._weak_variables:
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
yield v
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def _capture_by_value(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
# When capturing by value, do the read outside
reverse_captures = dict((v, k) for k, v in self.captures.items())
uncaptured_inputs = [reverse_captures.get(t, t) for t in inputs]
with ops.init_scope():
if context.executing_eagerly():
attr_list = ("dtype", int(attrs["dtype"].type))
value, = execute.execute(
compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
context.context())
else:
op = ops.get_default_graph()._create_op_internal( # pylint: disable=protected-access
op_type,
uncaptured_inputs,
dtypes,
input_types,
name,
attrs,
op_def,
compute_device)
value = op.outputs[0]
captured_value = self.capture(value)
return captured_value.op
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
del compute_shapes
if self.capture_by_value and op_type in ["ReadVariableOp",
"ResourceGather"]:
return self._capture_by_value(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
for i, inp in enumerate(inputs):
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
inputs[i] = inp
return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_device)
def capture(self, tensor, name=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
Returns:
Tensor from this FuncGraph.
"""
# Note: _forward_func_graph is currently only set when building the gradient
# graph graph of a defun call. If the backwards graph tries to capture
# tensors those will be captured first in the forward graph. This
# makes sure that any tensor needed by a custom_gradient is correctly
# captured.
# TODO(b/134097853): figure out a better way to check distributed variables
if hasattr(tensor, "_distribute_strategy") and hasattr(tensor, "_values"):
# This checks if the 'tensor' is a DistributedVariable. When it is a
# DistributedVariable, we do not want to check its "graph" attr as the
# following if branch does, because "graph" is not an attr for the
# container DistributedVariable object, and the underlying components may
# not have been initialized yet.
# The reason we do not use isinstance() is due to cyclic dependency issue.
if name is None:
name = str("distributed_variable")
return self._capture_helper(tensor, name)
if (getattr(tensor, "graph", None) is not self and
hasattr(self, "_forward_func_graph") and
isinstance(self._forward_func_graph, FuncGraph)):
tensor = self._forward_func_graph.capture(tensor)
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
return self._capture_helper(tensor, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
inner_graph = tensor.graph
while inner_graph is not None and isinstance(inner_graph, FuncGraph):
if inner_graph is self:
raise ValueError(
"Trying to capture a tensor from an inner function. This can be "
"caused by accessing a tensor defined inside a loop or "
"conditional body, or a subfunction, from a calling function, "
"without going through the proper return value mechanism. "
"Consider using TensorFlow mechanisms such as TensorArrays "
"to return tensors from inner functions or loop / conditional "
"bodies. Tensor: %s; tensor graph: %s; this graph: %s"
% (tensor, tensor.graph, self))
inner_graph = inner_graph.outer_graph
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name):
captured_tensor = self.captures.get(tensor, None)
if captured_tensor is None:
captured_tensor = _create_substitute_placeholder(tensor, name=name,
dtype=tensor.dtype)
self.captures[tensor] = captured_tensor
self.inputs.append(captured_tensor)
tape.record_operation("captured_value", [captured_tensor], [tensor],
lambda x: [x])
return captured_tensor
@property
def external_captures(self):
"""External tensors captured by this function."""
return list(self.captures.keys())
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return list(self.captures.values())
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
autograph=False,
autograph_options=None,
add_control_dependencies=True,
arg_names=None,
op_return_value=None,
collections=None,
capture_by_value=None,
override_flat_arg_shapes=None):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwargs: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwargs` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
func_graph: Optional. An instance of FuncGraph. If provided, we will use
this graph else a new one is built and returned.
autograph: whether to use autograph to compile `python_func`.
See https://www.tensorflow.org/guide/autograph for more information.
autograph_options: additional knobs to control when `autograph=True`.
See https://www.tensorflow.org/guide/autograph for more information.
add_control_dependencies: If True, automatically adds control dependencies
to ensure program order matches execution order and stateful ops always
execute.
arg_names: Optional list of argument names, used to give input placeholders
recognizable names.
op_return_value: Optional. A Tensor. If set and `python_func` returns
Operations, those return values will be replaced with this value. If not
set, returning an Operation triggers an error.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write to)
the outer graph's collections that are not whitelisted, and both
read and write to the outer graph's collections that are whitelisted.
The current whitelisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will capture
Variables by value instead of reference. By default inherit from outer
graphs, and failing that will default to False.
override_flat_arg_shapes: An optional list of instances that are either
`None` or `TensorShape`. The length must match that of
`nest.flatten((args, kwargs), expand_composites=True)`. The entries
containing value `None` must match entries in flattened arguments
containing non-tensors, while entries containing a `TensorShape` must
match entries in the flattened arguments containing tensors.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None` nor a
`Tensor`.
ValueError: If both `signature` and `override_flat_arg_shapes` are
passed in.
"""
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name, collections=collections,
capture_by_value=capture_by_value)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
control_manager = AutomaticControlDependencies()
else:
control_manager = ops.NullContextmanager()
with func_graph.as_default(), control_manager as a:
current_scope = variable_scope.get_variable_scope()
default_use_recource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None and override_flat_arg_shapes is not None:
raise ValueError(
"Passed both signature and override_flat_arg_shapes: %s and %s."
% (signature, override_flat_arg_shapes))
if signature is not None:
args = signature
kwargs = {}
# Creates and names placeholders for all arguments.
if override_flat_arg_shapes is not None:
flat_args = nest.flatten(args, expand_composites=True)
arg_shapes = override_flat_arg_shapes[:len(flat_args)]
kwarg_shapes = override_flat_arg_shapes[len(flat_args):]
else:
arg_shapes = None
kwarg_shapes = None
func_args = _get_defun_inputs_from_args(
args, arg_names, flat_shapes=arg_shapes)
func_kwargs = _get_defun_inputs_from_kwargs(
kwargs, flat_shapes=kwarg_shapes)
# Convert all Tensors into TensorSpecs before saving the structured inputs.
# If storing pure concrete functions that are not called through polymorphic
# functions, we don't have access to FunctionSpec, so we need to call the
# TensorSpecs by their `arg_names` for later binding.
func_graph.structured_input_signature = (
convert_structure_to_signature(func_args, arg_names),
convert_structure_to_signature(func_kwargs))
flat_func_args = nest.flatten(func_args, expand_composites=True)
flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)
# Temporarily set inputs to allow graph building code to inspect
# them. Reassigned below.
func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs
if isinstance(arg, ops.Tensor)]
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, flat_func_args,
expand_composites=True)
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, flat_func_kwargs, expand_composites=True)
def convert(x):
"""Converts a function output to a Tensor."""
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# this won't work if x needs to be captured (i.e. if python_func returns
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_composite(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.contrib.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
if add_control_dependencies:
x = a.mark_as_return(x)
return x
try:
if autograph:
from tensorflow.python import autograph # pylint: disable=g-import-not-at-top
_, original_func = tf_decorator.unwrap(python_func)
def wrapper(*args, **kwargs):
"""Calls a converted version of original_func."""
# TODO(mdan): Push this block higher in tf.function's call stack.
try:
return autograph.converted_call(
original_func, None,
autograph.ConversionOptions(
recursive=True,
optional_features=autograph_options,
force_conversion=True,
), args, kwargs)
except Exception as e: # pylint:disable=broad-except
if hasattr(e, "ag_error_metadata"):
raise e.ag_error_metadata.to_exception(type(e))
else:
raise
# Wrapping around a decorator allows checks like tf_inspect.getargspec
# to be accurate.
converted_func = tf_decorator.make_decorator(original_func, wrapper)
python_func = tf_decorator.rewrap(python_func, original_func,
converted_func)
func_outputs = python_func(*func_args, **func_kwargs)
# invariant: `func_outputs` contains only Tensors, CompositeTensors,
# TensorArrays and `None`s.
func_outputs = nest.map_structure(convert, func_outputs,
expand_composites=True)
check_mutation(func_args_before, func_args)
check_mutation(func_kwargs_before, func_kwargs)
finally:
current_scope.set_use_resource(default_use_recource)
# Variables in `func_args`, `func_kwargs` should be explicit inputs
# to the function, not captured inputs.
graph_variables = list(func_graph._watched_variables) # pylint: disable=protected-access
arg_variables = set()
inputs = []
for arg in (nest.flatten(func_args, expand_composites=True) +
nest.flatten(func_kwargs, expand_composites=True)):
if isinstance(arg, resource_variable_ops.BaseResourceVariable):
# Even if an argument variable was not used in the function, we've
# already manually captured the resource Tensor when creating argument
# placeholders.
resource_placeholder = func_graph.captures.pop(arg.handle, None)
if resource_placeholder is None:
continue
arg_variables.add(arg)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in graph_variables if v not in arg_variables]
func_graph.inputs = inputs + list(
func_graph.captures.values()) + [
x[1] for x in func_graph.deferred_captures.values()]
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
if add_control_dependencies:
func_graph.control_outputs.extend(control_manager.ops_which_must_run)
return func_graph
def maybe_captured(tensor):
"""If t is a captured value placeholder, returns the original captured value.
Args:
tensor: Tensor.
Returns:
A tensor, potentially from a different Graph/FuncGraph.
"""
if (not isinstance(tensor, ops.EagerTensor) and
tensor.op.graph.building_function and tensor.op.type == "Placeholder"):
for input_t, placeholder_t in tensor.op.graph.captures.items():
if tensor == placeholder_t:
return maybe_captured(input_t)
# pylint: enable=protected-access
return tensor
def device_stack_has_callable(device_stack):
"""Checks whether a device stack contains a callable."""
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2):
"""Check if two list of arguments are exactly the same."""
errmsg = ("Function to be traced should not modify structure of input "
"arguments. Check if your function has list and dictionary "
"operations that alter input arguments, "
"such as `list.pop`, `list.append`")
try:
nest.assert_same_structure(n1, n2, expand_composites=True)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),
nest.flatten(n2, expand_composites=True)):
if arg1 is not arg2:
raise ValueError(errmsg)
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def flatten(sequence):
"""Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.
Args:
sequence: A nested structure of Tensors, CompositeTensors, and
TensorArrays.
Returns:
A list of tensors.
"""
flat_sequence = nest.flatten(sequence, expand_composites=True)
return [
item.flow if isinstance(item, tensor_array_ops.TensorArray) else item
for item in flat_sequence]
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def pack_sequence_as(structure, flat_sequence):
"""Like `nest.pack_sequence_as` but also builds TensorArrays from flows.
Args:
structure: The structure to pack into. May contain Tensors,
CompositeTensors, or TensorArrays.
flat_sequence: An iterable containing tensors.
Returns:
A nested structure.
Raises:
AssertionError if `structure` and `flat_sequence` are not compatible.
"""
flat_sequence = list(flat_sequence)
flattened_structure = nest.flatten(structure, expand_composites=True)
if len(flattened_structure) != len(flat_sequence):
raise ValueError("Mismatch in element count")
for i in range(len(flat_sequence)):
if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):
flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(
old_ta=flattened_structure[i], flow=flat_sequence[i])
return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)
def _create_substitute_placeholder(value, name=None, dtype=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
custom_gradient.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names, flat_shapes=None):
"""Maps Python function positional args to graph-construction inputs."""
return _get_defun_inputs(
args, names, structure=args, flat_shapes=flat_shapes)
def _get_defun_inputs(args, names, structure, flat_shapes=None):
"""Maps python function args to graph-construction inputs.
Args:
args: A flat list of user-specified arguments.
names: A list of strings with user-specified argument names, same length as
`args`. May be `None`, in which case a generic name is used.
structure: The original argument list or dictionary.
flat_shapes: A flat list of values that are either `None` or
instances of `TensorShape`. If provided, then length must match
that of `nest.flatten(args, expand_composites=True)`; and locations where
`args` are instances of `Tensor` must have a corresponding `TensorShape`
in `flat_shapes`. May be `None`, in which case exact shapes are read
directly from the args.
Returns:
Placeholders with the same structure as `structure`.
Raises:
RuntimeError: if `flat_shapes` is provided, but
`len(flat_shapes) != len(nest.flatten(args, expand_composites=True))`.
RuntimeError: if a shape from `flat_shapes` is not None
for an argument that is not a `Tensor`, `TensorSpec`,
or `ResourceVariable`.
"""
func_graph = ops.get_default_graph()
function_inputs = []
if names is None:
names = [None] * len(args)
if flat_shapes is None:
shapes_iter = itertools.repeat(None)
else:
len_flat_args = len(nest.flatten(args, expand_composites=True))
if len_flat_args != len(flat_shapes):
raise RuntimeError(
"Length of fully flat shapes (%d) must match that of "
"flatten(args) (%d). args: %s, flat_shapes: %s"
% (len(flat_shapes),
len_flat_args,
args,
flat_shapes))
shapes_iter = iter(flat_shapes)
for arg_value, name in zip(args, names):
flattened = nest.flatten(arg_value, expand_composites=True)
tensor_specs = [
arg for arg in flattened if isinstance(arg, tensor_spec.TensorSpec)
]
specified_names = [arg.name for arg in tensor_specs if arg.name]
if specified_names and len(specified_names) < len(tensor_specs):
raise ValueError("If specifying TensorSpec names for nested structures, "
"either zero or all names have to be specified.")
for arg in flattened:
# We have a shape entry for each arg, regadless of whether it's a real
# Tensor or not. For non-tensor entries it should be None.
shape = next(shapes_iter)
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
if isinstance(arg, tensor_spec.TensorSpec) and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder_shape = shape if shape is not None else arg.shape
try:
placeholder = graph_placeholder(
arg.dtype, placeholder_shape,
name=requested_name)
except ValueError:
# Sometimes parameter names are not valid op names, so fall back to
# unnamed placeholders.
placeholder = graph_placeholder(arg.dtype, placeholder_shape)
if name is not None:
# Record the requested/user-specified name in case it's different than
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
elif isinstance(arg, resource_variable_ops.BaseResourceVariable):
# Capture arg variables to create placeholders for them. These will be
# removed as captures after the function is traced (since otherwise we'd
# just add it back with a new placeholder when the variable was
# referenced).
placeholder = func_graph.capture(arg.handle, name=name)
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
function_inputs.append(arg)
else:
if shape is not None:
raise RuntimeError(
"Expected provided shape override to be None for arg that isn't "
"a Tensor, but saw arg: '%s', shape: '%s'. args: %s"
% (arg, shape, args))
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs,
expand_composites=True)
def _get_defun_inputs_from_kwargs(kwargs, flat_shapes):
"""Maps Python function keyword args to graph-construction inputs."""
if kwargs:
names, args = zip(*sorted(kwargs.items()))
else:
names = []
args = []
return _get_defun_inputs(
args, names, structure=kwargs, flat_shapes=flat_shapes)
def dismantle_func_graph(func_graph):
"""Removes reference cycles in `func_graph` FuncGraph.
Helpful for making sure the garbage collector doesn't need to run when
the FuncGraph goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable
after this function.
"""
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while func_graph.captures:
func_graph.captures.popitem()
memory.dismantle_ordered_dict(func_graph.captures)
while func_graph.deferred_captures:
func_graph.deferred_captures.popitem()
memory.dismantle_ordered_dict(func_graph.deferred_captures)
ops.dismantle_graph(func_graph)
|
tensorflow-master
|
tensorflow/python/framework/func_graph.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.device_spec."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import device_spec
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
TEST_V1_AND_V2 = (("v1", device_spec.DeviceSpecV1),
("v2", device_spec.DeviceSpecV2))
class DeviceSpecTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters(*TEST_V1_AND_V2)
def test_empty(self, device_spec_type):
d = device_spec_type()
self.assertEqual("", d.to_string())
d.parse_from_string("")
self.assertEqual("", d.to_string())
@parameterized.named_parameters(*TEST_V1_AND_V2)
def test_constructor(self, device_spec_type):
d = device_spec_type(job="j", replica=0, task=1,
device_type="CPU", device_index=2)
self.assertEqual("j", d.job)
self.assertEqual(0, d.replica)
self.assertEqual(1, d.task)
self.assertEqual("CPU", d.device_type)
self.assertEqual(2, d.device_index)
self.assertEqual("/job:j/replica:0/task:1/device:CPU:2", d.to_string())
d = device_spec_type(device_type="GPU", device_index=0)
self.assertEqual("/device:GPU:0", d.to_string())
def testto_string_legacy(self):
"""DeviceSpecV1 allows direct mutation."""
d = device_spec.DeviceSpecV1()
d.job = "foo"
self.assertEqual("/job:foo", d.to_string())
d.task = 3
self.assertEqual("/job:foo/task:3", d.to_string())
d.device_type = "CPU"
d.device_index = 0
self.assertEqual("/job:foo/task:3/device:CPU:0", d.to_string())
d.task = None
d.replica = 12
self.assertEqual("/job:foo/replica:12/device:CPU:0", d.to_string())
d.device_type = "GPU"
d.device_index = 2
self.assertEqual("/job:foo/replica:12/device:GPU:2", d.to_string())
d.device_type = "CPU"
d.device_index = 1
self.assertEqual("/job:foo/replica:12/device:CPU:1", d.to_string())
d.device_type = None
d.device_index = None
self.assertEqual("/job:foo/replica:12", d.to_string())
# Test wildcard
d = device_spec.DeviceSpecV1(job="foo", replica=12, task=3,
device_type="GPU")
self.assertEqual("/job:foo/replica:12/task:3/device:GPU:*", d.to_string())
@parameterized.named_parameters(*TEST_V1_AND_V2)
def test_replace(self, device_spec_type):
d = device_spec_type()
d = d.replace(job="foo")
self.assertEqual("/job:foo", d.to_string())
d = d.replace(task=3)
self.assertEqual("/job:foo/task:3", d.to_string())
d = d.replace(device_type="CPU", device_index=0)
self.assertEqual("/job:foo/task:3/device:CPU:0", d.to_string())
d = d.replace(task=None, replica=12)
self.assertEqual("/job:foo/replica:12/device:CPU:0", d.to_string())
d = d.replace(device_type="GPU", device_index=2)
self.assertEqual("/job:foo/replica:12/device:GPU:2", d.to_string())
d = d.replace(device_type="CPU", device_index=1)
self.assertEqual("/job:foo/replica:12/device:CPU:1", d.to_string())
d = d.replace(device_type=None, device_index=None)
self.assertEqual("/job:foo/replica:12", d.to_string())
# Test wildcard
d = device_spec.DeviceSpecV1(job="foo", replica=12, task=3,
device_type="GPU")
self.assertEqual("/job:foo/replica:12/task:3/device:GPU:*", d.to_string())
@parameterized.named_parameters(*TEST_V1_AND_V2)
def testto_string(self, device_spec_type):
d = device_spec_type(job="foo")
self.assertEqual("/job:foo", d.to_string())
d = device_spec_type(job="foo", task=3)
self.assertEqual("/job:foo/task:3", d.to_string())
d = device_spec_type(job="foo", task=3, device_type="cpu", device_index=0)
self.assertEqual("/job:foo/task:3/device:CPU:0", d.to_string())
d = device_spec_type(job="foo", replica=12, device_type="cpu",
device_index=0)
self.assertEqual("/job:foo/replica:12/device:CPU:0", d.to_string())
d = device_spec_type(job="foo", replica=12, device_type="gpu",
device_index=2)
self.assertEqual("/job:foo/replica:12/device:GPU:2", d.to_string())
d = device_spec_type(job="foo", replica=12)
self.assertEqual("/job:foo/replica:12", d.to_string())
# Test wildcard
d = device_spec_type(job="foo", replica=12, task=3, device_type="GPU")
self.assertEqual("/job:foo/replica:12/task:3/device:GPU:*", d.to_string())
def test_parse_legacy(self):
d = device_spec.DeviceSpecV1()
d.parse_from_string("/job:foo/replica:0")
self.assertEqual("/job:foo/replica:0", d.to_string())
d.parse_from_string("/replica:1/task:0/cpu:0")
self.assertEqual("/replica:1/task:0/device:CPU:0", d.to_string())
d.parse_from_string("/replica:1/task:0/device:CPU:0")
self.assertEqual("/replica:1/task:0/device:CPU:0", d.to_string())
d.parse_from_string("/job:muu/device:GPU:2")
self.assertEqual("/job:muu/device:GPU:2", d.to_string())
with self.assertRaisesRegexp(ValueError, "Cannot specify multiple"):
d.parse_from_string("/job:muu/device:GPU:2/cpu:0")
@parameterized.named_parameters(*TEST_V1_AND_V2)
def test_to_from_string(self, device_spec_type):
d = device_spec_type.from_string("/job:foo/replica:0")
self.assertEqual("/job:foo/replica:0", d.to_string())
self.assertEqual(0, d.replica)
d = device_spec_type.from_string("/replica:1/task:0/cpu:0")
self.assertEqual("/replica:1/task:0/device:CPU:0", d.to_string())
self.assertAllEqual([1, 0, "CPU", 0],
[d.replica, d.task, d.device_type, d.device_index])
d = device_spec_type.from_string("/replica:1/task:0/device:CPU:0")
self.assertEqual("/replica:1/task:0/device:CPU:0", d.to_string())
self.assertAllEqual([1, 0, "CPU", 0],
[d.replica, d.task, d.device_type, d.device_index])
d = device_spec_type.from_string("/job:muu/device:GPU:2")
self.assertEqual("/job:muu/device:GPU:2", d.to_string())
self.assertAllEqual(["muu", "GPU", 2],
[d.job, d.device_type, d.device_index])
with self.assertRaisesRegexp(ValueError, "Cannot specify multiple"):
d.parse_from_string("/job:muu/device:GPU:2/cpu:0")
def test_merge_legacy(self):
d = device_spec.DeviceSpecV1.from_string("/job:foo/replica:0")
self.assertEqual("/job:foo/replica:0", d.to_string())
d.merge_from(device_spec.DeviceSpecV1.from_string("/task:1/device:GPU:2"))
self.assertEqual("/job:foo/replica:0/task:1/device:GPU:2", d.to_string())
d = device_spec.DeviceSpecV1()
d.merge_from(device_spec.DeviceSpecV1.from_string("/task:1/cpu:0"))
self.assertEqual("/task:1/device:CPU:0", d.to_string())
d.merge_from(device_spec.DeviceSpecV1.from_string("/job:boo/device:GPU:0"))
self.assertEqual("/job:boo/task:1/device:GPU:0", d.to_string())
d.merge_from(device_spec.DeviceSpecV1.from_string("/job:muu/cpu:2"))
self.assertEqual("/job:muu/task:1/device:CPU:2", d.to_string())
d.merge_from(device_spec.DeviceSpecV1.from_string(
"/job:muu/device:MyFunnyDevice:2"))
self.assertEqual("/job:muu/task:1/device:MyFunnyDevice:2", d.to_string())
def test_merge_removed(self):
with self.assertRaises(AttributeError):
d = device_spec.DeviceSpecV2()
d.merge_from(device_spec.DeviceSpecV2.from_string("/task:1/cpu:0"))
@parameterized.named_parameters(*TEST_V1_AND_V2)
def test_combine(self, device_spec_type):
d = device_spec_type.from_string("/job:foo/replica:0")
self.assertEqual("/job:foo/replica:0", d.to_string())
d = d.make_merged_spec(
device_spec_type.from_string("/task:1/device:GPU:2"))
self.assertEqual("/job:foo/replica:0/task:1/device:GPU:2", d.to_string())
d = device_spec_type()
d = d.make_merged_spec(device_spec_type.from_string("/task:1/cpu:0"))
self.assertEqual("/task:1/device:CPU:0", d.to_string())
d = d.make_merged_spec(
device_spec_type.from_string("/job:boo/device:GPU:0"))
self.assertEqual("/job:boo/task:1/device:GPU:0", d.to_string())
d = d.make_merged_spec(device_spec_type.from_string("/job:muu/cpu:2"))
self.assertEqual("/job:muu/task:1/device:CPU:2", d.to_string())
d = d.make_merged_spec(device_spec_type.from_string(
"/job:muu/device:MyFunnyDevice:2"))
self.assertEqual("/job:muu/task:1/device:MyFunnyDevice:2", d.to_string())
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/framework/device_spec_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Classes and functions for building TensorFlow graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Classes used when building a Graph.
from tensorflow.python.framework.device import DeviceSpec
from tensorflow.python.framework.ops import Graph
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.framework.ops import IndexedSlices
from tensorflow.python.framework.sparse_tensor import SparseTensor
from tensorflow.python.framework.sparse_tensor import SparseTensorValue
# Utilities used when building a Graph.
from tensorflow.python.framework.ops import device
from tensorflow.python.framework.ops import container
from tensorflow.python.framework.ops import name_scope
from tensorflow.python.framework.ops import op_scope
from tensorflow.python.framework.ops import colocate_with
from tensorflow.python.framework.ops import control_dependencies
from tensorflow.python.framework.ops import get_default_graph
from tensorflow.python.framework.ops import reset_default_graph
from tensorflow.python.framework.ops import GraphKeys
from tensorflow.python.framework.ops import add_to_collection
from tensorflow.python.framework.ops import add_to_collections
from tensorflow.python.framework.ops import get_collection
from tensorflow.python.framework.ops import get_collection_ref
from tensorflow.python.framework.ops import convert_to_tensor
from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices
from tensorflow.python.framework.random_seed import get_seed
from tensorflow.python.framework.random_seed import set_random_seed
from tensorflow.python.framework.sparse_tensor import convert_to_tensor_or_sparse_tensor
from tensorflow.python.framework.importer import import_graph_def
# Utilities for working with Tensors
from tensorflow.python.framework.tensor_util import make_tensor_proto
from tensorflow.python.framework.tensor_util import MakeNdarray as make_ndarray
# Needed when you defined a new Op in C++.
from tensorflow.python.framework.ops import RegisterGradient
from tensorflow.python.framework.ops import NotDifferentiable
from tensorflow.python.framework.ops import NoGradient
from tensorflow.python.framework.ops import RegisterShape
from tensorflow.python.framework.tensor_shape import Dimension
from tensorflow.python.framework.tensor_shape import TensorShape
# Needed when interfacing tensorflow to new array libraries
from tensorflow.python.framework.ops import register_tensor_conversion_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.framework.dtypes import * # pylint: disable=redefined-builtin
# Load a TensorFlow plugin
from tensorflow.python.framework.load_library import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/python/framework/framework_lib.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of common shape functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six.moves
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def has_fully_defined_shape(tensor):
"""Returns true if tensor has a fully defined shape."""
return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()
def rank(tensor):
"""Return a rank if it is a tensor, else return None."""
if isinstance(tensor, ops.Tensor):
return tensor._rank() # pylint: disable=protected-access
return None
def scalar_shape(unused_op):
"""Shape function for ops that output a scalar value."""
return [tensor_shape.scalar()]
def unchanged_shape(op):
"""Shape function for ops that output a tensor like their first input."""
return [op.inputs[0].get_shape()]
def unchanged_shape_with_rank(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: The exact rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_least(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: A lower bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_least(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_most(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: An upper bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_most(rank)]
return _ShapeFunction
def matmul_shape(op):
"""Shape function for a MatMul op."""
a_shape = op.inputs[0].get_shape().with_rank(2)
transpose_a = op.get_attr("transpose_a")
b_shape = op.inputs[1].get_shape().with_rank(2)
transpose_b = op.get_attr("transpose_b")
output_rows = a_shape[1] if transpose_a else a_shape[0]
output_cols = b_shape[0] if transpose_b else b_shape[1]
inner_a = a_shape[0] if transpose_a else a_shape[1]
inner_b = b_shape[1] if transpose_b else b_shape[0]
inner_a.assert_is_compatible_with(inner_b)
return [tensor_shape.TensorShape([output_rows, output_cols])]
def get_conv_output_size(input_size, filter_size, strides, padding_type):
"""Returns the spatial size of a n-d convolution/pooling output."""
input_size = tuple([tensor_shape.as_dimension(x).value for x in input_size])
filter_size = tuple([tensor_shape.as_dimension(x).value for x in filter_size])
strides = [int(x) for x in strides]
if all(x == 1 for x in input_size) and all(x == 1 for x in filter_size):
return input_size
if any(x is not None and y is not None and x > y for x, y in
zip(filter_size, input_size)):
raise ValueError("Filter must not be larger than the input: "
"Filter: %r Input: %r" % (filter_size, input_size))
if padding_type == b"VALID":
def _valid(in_dim, k_dim, s_dim):
if in_dim is not None and k_dim is not None:
return (in_dim - k_dim + s_dim) // s_dim
else:
return None
output_size = [
_valid(in_dim, k_dim, s_dim)
for in_dim, k_dim, s_dim in zip(input_size, filter_size, strides)
]
elif padding_type == b"SAME":
def _same(in_dim, s_dim):
if in_dim is not None:
return (in_dim + s_dim - 1) // s_dim
else:
return None
output_size = [_same(in_dim, s_dim)
for in_dim, s_dim in zip(input_size, strides)]
else:
raise ValueError("Invalid padding: %r" % padding_type)
return tuple(output_size)
def get2d_conv_output_size(input_height, input_width, filter_height,
filter_width, row_stride, col_stride, padding_type):
"""Returns the number of rows and columns in a convolution/pooling output."""
return get_conv_output_size((input_height, input_width),
(filter_height, filter_width),
(row_stride, col_stride), padding_type)
def conv2d_shape(op):
"""Shape function for a Conv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A Conv2D Operation.
Returns:
A list containing the Shape of the Conv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
if data_format == b"NCHW":
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth_out]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def depthwise_conv2d_native_shape(op):
"""Shape function for a DepthwiseConv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depthwise_multiplier]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_in*depthwise_multiplier], where out_rows and out_cols depend
on the value of the op's "padding" and "strides" attrs.
Args:
op: A DepthwiseConv2dNative Operation.
Returns:
A list containing the Shape of the DepthwiseConv2DNative output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3] * filter_shape[2]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def separable_conv2d_shape(op):
"""Shape function for a SeparableConv2D op.
This op has three inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* depthwise_filter, a 4D tensor with shape = [filter_rows,
filter_cols, depth_in, depth_multiplier]
* pointwise_filter, a 4D tensor with shape = [1, 1, depth_in *
depth_multiplier, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A SeparableConv2D Operation.
Returns:
A list containing the Shape of the SeparableConv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
depthwise_filter_shape = op.inputs[1].get_shape().merge_with(
tensor_shape.TensorShape([None, None, input_shape[3], None]))
pointwise_depth_in = depthwise_filter_shape[2] * depthwise_filter_shape[3]
pointwise_filter_shape = op.inputs[2].get_shape().merge_with(
tensor_shape.TensorShape([1, 1, pointwise_depth_in, None]))
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = depthwise_filter_shape[0]
filter_cols = depthwise_filter_shape[1]
depth_out = pointwise_filter_shape[3]
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def avg_pool_shape(op):
"""Shape function for an AvgPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: An AvgPool Operation.
Returns:
A single-element list containing the Shape of the AvgPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1 or ksize_d != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch and depth dimensions.")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not support strides "
"in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def max_pool_shape(op):
"""Shape function for a MaxPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows, out_cols, and depth_out depend
on the value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: A MaxPool Operation.
Returns:
A single-element list containing the Shape of the MaxPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch dimension.")
if stride_b != 1:
raise ValueError("Current implementation does not support strides "
"in the batch dimension.")
if not ((ksize_r == 1 and ksize_c == 1) or ksize_d == 1):
raise ValueError("MaxPooling supports exactly one of pooling across depth "
"or pooling across width/height.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
if ksize_d == 1:
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
else:
if depth % ksize_d > 0:
raise ValueError("Depthwise max pooling requires the depth window "
"to evenly divide the input depth.")
if stride_d != ksize_d:
raise ValueError("Depthwise max pooling requires the depth window "
"to equal the depth stride.")
output_shape = [batch_size, in_rows, in_cols, depth // ksize_d]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def no_outputs(unused_op):
"""Shape function for use with ops that have no outputs."""
return []
def unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
def _broadcast_shape_helper(shape_x, shape_y):
"""Helper functions for is_broadcast_compatible and broadcast_shape.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
Returns None if the shapes are not broadcast compatible,
a list of the broadcast dimensions otherwise.
"""
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
return None
return return_dims
def is_broadcast_compatible(shape_x, shape_y):
"""Returns True if `shape_x` and `shape_y` are broadcast compatible.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
True if a shape exists that both `shape_x` and `shape_y` can be broadcasted
to. False otherwise.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return False
return _broadcast_shape_helper(shape_x, shape_y) is not None
def broadcast_shape(shape_x, shape_y):
"""Returns the broadcasted shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return tensor_shape.unknown_shape()
return_dims = _broadcast_shape_helper(shape_x, shape_y)
if return_dims is None:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return tensor_shape.TensorShape(return_dims)
def call_cpp_shape_fn(op, require_shape_fn=True):
"""A shape function that delegates to the registered C++ shape function.
Args:
op: the node in the graph for which to compute output shapes.
require_shape_fn: If true, and the C++ shape function is not registered
in the current binary then an exception is raised; otherwise, if the
C++ shape function is not registered then unknown_shape is used.
Returns:
A dictionary with the following keys:
shapes: A TensorShape list of the output shapes of the op, as computed
using the C++ shape inference function registered for the op.
handle_shapes: A TensorShape list of the shapes for handle outputs, if
any.
handle_dtypes: A list of DataType enums for the handle outputs, if any.
Raises:
ValueError: If the C++ shape function returned an error (e.g. because the
shapes of the inputs are of the wrong rank or otherwise incompatible
according to the shape function).
RuntimeError: If the C++ shape function is not registered and
<require_shape_fn> is True.
"""
if op.type == "Const":
# To avoid serializing large constants, we special-case constant
# here, even though it has a C++ shape function. When Python
# calls the C / C-API directly, we should be able to remove this.
return {
"shapes": [tensor_shape.TensorShape(op.get_attr("value").tensor_shape)],
"handle_data": [None]
}
input_tensors_needed = []
input_tensors_as_shapes_needed = []
while True:
res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
input_tensors_as_shapes_needed,
require_shape_fn)
if not isinstance(res, dict):
# Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
return res
# See if we need to evaluate some inputs.
if not res["inputs_needed"]:
return res
p = cpp_shape_inference_pb2.CppShapeInferenceInputsNeeded()
p = p.FromString(res["inputs_needed"])
changed = False
for idx in p.input_tensors_needed:
if idx not in input_tensors_needed:
input_tensors_needed.append(idx)
changed = True
for idx in p.input_tensors_as_shapes_needed:
if idx not in input_tensors_as_shapes_needed:
input_tensors_as_shapes_needed.append(idx)
changed = True
if not changed:
return res
def _call_cpp_shape_fn_impl(
op, input_tensors_needed, input_tensors_as_shapes_needed, require_shape_fn):
"""Core implementation of call_cpp_shape_fn."""
graph_def_version = op.graph.graph_def_versions.producer
node_def_str = op.node_def.SerializeToString()
def tensor_to_inference_result(t):
r = cpp_shape_inference_pb2.CppShapeInferenceResult()
r.shape.CopyFrom(t.get_shape().as_proto())
# pylint: disable=protected-access
if t._handle_data is not None:
r.handle_data.CopyFrom(t._handle_data)
# pylint: enable=protected-access
return r.SerializeToString()
input_shapes = [tensor_to_inference_result(i) for i in op.inputs]
input_tensors = [None for i in input_shapes]
for idx in input_tensors_needed:
v = tensor_util.constant_value(op.inputs[idx])
if v is not None:
input_tensors[idx] = np.asarray(v)
serialized_unknown_shape = (
tensor_shape.TensorShape(None).as_proto().SerializeToString())
arr = [serialized_unknown_shape for i in input_shapes]
for idx in input_tensors_as_shapes_needed:
s = tensor_util.constant_value_as_shape(op.inputs[idx])
if s is not None:
arr[idx] = s.as_proto().SerializeToString()
input_tensors_as_shapes = arr
missing_shape_fn = False
try:
output = pywrap_tensorflow.RunCppShapeInference(
graph_def_version, node_def_str, input_shapes, input_tensors,
input_tensors_as_shapes)
except errors.InvalidArgumentError as err:
if err.message.startswith("No shape inference function exists for op"):
missing_shape_fn = True
else:
raise ValueError(err.message)
if missing_shape_fn:
if require_shape_fn:
raise RuntimeError(
"No C++ shape function registered for standard op: %s" % op.type)
return unknown_shape(op)
output_shapes = output[:-1]
# Convert TensorShapeProto values in output_shapes.
result_protos = [
cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s)
for s in output_shapes
]
result = [r.shape for r in result_protos]
result_handle_data = [
r.handle_data if r.handle_data.is_set else None for r in result_protos
]
return {
"shapes": result,
"handle_data": result_handle_data,
"inputs_needed": output[-1]
}
# pylint: disable=protected-access
ops._set_call_cpp_shape_fn(call_cpp_shape_fn)
# pylint: enable=protected-access
|
tensorflow-master
|
tensorflow/python/framework/common_shapes.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""For seeding individual ops based on a graph-level seed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
DEFAULT_GRAPH_SEED = 87654321
_MAXINT32 = 2**31 - 1
def _truncate_seed(seed):
return seed % _MAXINT32 # Truncate to fit into 32-bit integer
@tf_export(v1=['random.get_seed', 'get_seed'])
@deprecation.deprecated_endpoints('get_seed')
def get_seed(op_seed):
"""Returns the local seeds an operation should use given an op-specific seed.
Given operation-specific seed, `op_seed`, this helper function returns two
seeds derived from graph-level and op-level seeds. Many random operations
internally use the two seeds to allow user to change the seed globally for a
graph, or for only specific operations.
For details on how the graph-level seed interacts with op seeds, see
`tf.compat.v1.random.set_random_seed`.
Args:
op_seed: integer.
Returns:
A tuple of two integers that should be used for the local seed of this
operation.
"""
eager = context.executing_eagerly()
if eager:
global_seed = context.global_seed()
else:
global_seed = ops.get_default_graph().seed
if global_seed is not None:
if op_seed is None:
# pylint: disable=protected-access
if hasattr(ops.get_default_graph(), '_seed_used'):
ops.get_default_graph()._seed_used = True
if eager:
op_seed = context.internal_operation_seed()
else:
op_seed = ops.get_default_graph()._last_id
seeds = _truncate_seed(global_seed), _truncate_seed(op_seed)
else:
if op_seed is not None:
seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed)
else:
seeds = None, None
# Avoid (0, 0) as the C++ ops interpret it as nondeterminism, which would
# be unexpected since Python docs say nondeterminism is (None, None).
if seeds == (0, 0):
return (0, _MAXINT32)
return seeds
@tf_export(v1=['random.set_random_seed', 'set_random_seed'])
def set_random_seed(seed):
"""Sets the graph-level random seed for the default graph.
Operations that rely on a random seed actually derive it from two seeds:
the graph-level and operation-level seeds. This sets the graph-level seed.
Its interactions with operation-level seeds is as follows:
1. If neither the graph-level nor the operation seed is set:
A random seed is used for this op.
2. If the graph-level seed is set, but the operation seed is not:
The system deterministically picks an operation seed in conjunction
with the graph-level seed so that it gets a unique random sequence.
3. If the graph-level seed is not set, but the operation seed is set:
A default graph-level seed and the specified operation seed are used to
determine the random sequence.
4. If both the graph-level and the operation seed are set:
Both seeds are used in conjunction to determine the random sequence.
To illustrate the user-visible effects, consider these examples:
To generate different sequences across sessions, set neither
graph-level nor op-level seeds:
```python
a = tf.random.uniform([1])
b = tf.random.normal([1])
print("Session 1")
with tf.compat.v1.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.compat.v1.Session() as sess2:
print(sess2.run(a)) # generates 'A3'
print(sess2.run(a)) # generates 'A4'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To generate the same repeatable sequence for an op across sessions, set the
seed for the op:
```python
a = tf.random.uniform([1], seed=1)
b = tf.random.normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequence of values for 'a', but different sequences of values for 'b'.
print("Session 1")
with tf.compat.v1.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.compat.v1.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To make the random sequences generated by all ops be repeatable across
sessions, set a graph-level seed:
```python
tf.compat.v1.random.set_random_seed(1234)
a = tf.random.uniform([1])
b = tf.random.normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequences of 'a' and 'b'.
print("Session 1")
with tf.compat.v1.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.compat.v1.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B1'
print(sess2.run(b)) # generates 'B2'
```
Args:
seed: integer.
"""
if context.executing_eagerly():
context.set_global_seed(seed)
else:
ops.get_default_graph().seed = seed
@tf_export('random.set_seed', v1=[])
def set_seed(seed):
"""Sets the graph-level random seed.
Operations that rely on a random seed actually derive it from two seeds:
the graph-level and operation-level seeds. This sets the graph-level seed.
Its interactions with operation-level seeds is as follows:
1. If neither the graph-level nor the operation seed is set:
A random seed is used for this op.
2. If the graph-level seed is set, but the operation seed is not:
The system deterministically picks an operation seed in conjunction
with the graph-level seed so that it gets a unique random sequence.
3. If the graph-level seed is not set, but the operation seed is set:
A default graph-level seed and the specified operation seed are used to
determine the random sequence.
4. If both the graph-level and the operation seed are set:
Both seeds are used in conjunction to determine the random sequence.
To illustrate the user-visible effects, consider these examples:
To generate different sequences across sessions, set neither
graph-level nor op-level seeds:
```python
a = tf.random.uniform([1])
b = tf.random.normal([1])
print("Session 1")
with tf.compat.v1.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.compat.v1.Session() as sess2:
print(sess2.run(a)) # generates 'A3'
print(sess2.run(a)) # generates 'A4'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To generate the same repeatable sequence for an op across sessions, set the
seed for the op:
```python
a = tf.random.uniform([1], seed=1)
b = tf.random.normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequence of values for 'a', but different sequences of values for 'b'.
print("Session 1")
with tf.compat.v1.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.compat.v1.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To make the random sequences generated by all ops be repeatable across
sessions, set a graph-level seed:
```python
tf.random.set_seed(1234)
a = tf.random.uniform([1])
b = tf.random.normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequences of 'a' and 'b'.
print("Session 1")
with tf.compat.v1.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.compat.v1.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B1'
print(sess2.run(b)) # generates 'B2'
```
Args:
seed: integer.
"""
# TODO(go/tf2-random): change doc, update to match design doc
set_random_seed(seed)
|
tensorflow-master
|
tensorflow/python/framework/random_seed.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the system configuration methods work properly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
def reset_eager(fn):
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
finally:
del context._context
context._context = context.Context()
ops.enable_eager_execution()
return wrapper
class ConfigTest(test.TestCase, parameterized.TestCase):
@test_util.run_gpu_only
@reset_eager
def testDevicePolicy(self):
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
# If no op has been executed we should be able to set the device policy as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_device_policy('silent')
config.set_intra_op_parallelism_threads(2)
context.ensure_initialized()
def copy_tensor(dtype=dtypes.int32):
cpu_tensor = constant_op.constant(1, dtype=dtype)
gpu_tensor = cpu_tensor.gpu()
self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
config.set_device_policy('silent')
self.assertEqual(config.get_device_policy(), 'silent')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
copy_tensor()
config.set_device_policy('silent_for_int32')
self.assertEqual(config.get_device_policy(), 'silent_for_int32')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT_FOR_INT32,
context.context().device_policy)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor(dtypes.float32)
copy_tensor()
config.set_device_policy('warn')
self.assertEqual(config.get_device_policy(), 'warn')
self.assertEqual(context.DEVICE_PLACEMENT_WARN,
context.context().device_policy)
copy_tensor()
config.set_device_policy('explicit')
self.assertEqual(config.get_device_policy(), 'explicit')
self.assertEqual(context.DEVICE_PLACEMENT_EXPLICIT,
context.context().device_policy)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor()
config.set_device_policy(None)
self.assertEqual(config.get_device_policy(), 'silent')
@reset_eager
def testExecutionMode(self):
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
# If no op has been executed we should be able to set the execution mode as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_synchronous_execution(False)
config.set_intra_op_parallelism_threads(2)
config.set_synchronous_execution(True)
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
config.set_synchronous_execution(False)
self.assertFalse(config.get_synchronous_execution())
self.assertEqual(context.ASYNC, context.context().execution_mode)
@reset_eager
def testIntraOpParallelismThreads(self):
config.set_intra_op_parallelism_threads(10)
self.assertEqual(
config.get_intra_op_parallelism_threads(),
context.context().intra_op_parallelism_threads)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
config.set_intra_op_parallelism_threads(1)
config.set_intra_op_parallelism_threads(10)
@reset_eager
def testInterOpParallelismThreads(self):
config.set_inter_op_parallelism_threads(10)
self.assertEqual(
config.get_inter_op_parallelism_threads(),
context.context().inter_op_parallelism_threads)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
config.set_inter_op_parallelism_threads(1)
config.set_inter_op_parallelism_threads(10)
@test_util.run_gpu_only
@reset_eager
def testSoftPlacement(self):
if context.executing_eagerly():
self.assertTrue(config.get_soft_device_placement())
else:
self.assertFalse(config.get_soft_device_placement())
@def_function.function
def mod():
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
return math_ops.mod(a, b)
config.set_soft_device_placement(True)
self.assertEqual(config.get_soft_device_placement(), True)
self.assertEqual(
config.get_soft_device_placement(),
context.context().soft_device_placement)
# Since soft placement is enabled, the mod operation should work with CPU
mod()
config.set_soft_device_placement(False)
self.assertEqual(config.get_soft_device_placement(), False)
self.assertEqual(
config.get_soft_device_placement(),
context.context().soft_device_placement)
# Since soft placement is disabled, the mod operation should fail on GPU
with self.assertRaises(errors.InvalidArgumentError):
mod()
@reset_eager
def testLogDevicePlacement(self):
self.assertFalse(context.get_log_device_placement())
context.set_log_device_placement(True)
self.assertEqual(context.get_log_device_placement(), True)
self.assertEqual(
context.get_log_device_placement(),
context.context().log_device_placement)
context.set_log_device_placement(False)
self.assertEqual(context.get_log_device_placement(), False)
self.assertEqual(
context.get_log_device_placement(),
context.context().log_device_placement)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
context.set_log_device_placement(True)
# If the setting the device placement is a no-op, do not throw a runtime
# exception.
context.set_log_device_placement(False)
@test_util.run_gpu_only
@reset_eager
def testJit(self):
self.assertEqual(config.get_optimizer_jit(), False)
# the following function should cause Op fusion to occur. However, there is
# unfortunately no straightforward way to ensure this. We will just have to
# settle for creating a test that can trigger JIT.
@def_function.function
def fun(a, b):
c = a * b
d = c + a
return d
a = constant_op.constant([2., 2.])
b = constant_op.constant([2., 2.])
self.evaluate(fun(a, b))
config.set_optimizer_jit(True)
self.assertEqual(config.get_optimizer_jit(), True)
self.assertEqual(config.get_optimizer_jit(),
context.context().optimizer_jit)
self.evaluate(fun(a, b))
config.set_optimizer_jit(False)
self.assertEqual(config.get_optimizer_jit(), False)
self.assertEqual(config.get_optimizer_jit(),
context.context().optimizer_jit)
self.evaluate(fun(a, b))
@parameterized.named_parameters(
('LayoutOptimizer', 'layout_optimizer'),
('ConstantFolding', 'constant_folding'),
('ShapeOptimization', 'shape_optimization'),
('Remapping', 'remapping'),
('ArithmeticOptimization', 'arithmetic_optimization'),
('DependencyOptimization', 'dependency_optimization'),
('LoopOptimization', 'loop_optimization'),
('FunctionOptimization', 'function_optimization'),
('DebugStripper', 'debug_stripper'),
('ScopedAllocatorOptimization', 'scoped_allocator_optimization'),
('ImplementationSelector', 'implementation_selector'),
('AutoMixedPrecision', 'auto_mixed_precision'))
@reset_eager
def testOptimizerToggleOption(self, field):
# TODO(b/128531235): Improve testing of option
options = config.get_optimizer_experimental_options()
self.assertIsNone(options.get(field))
config.set_optimizer_experimental_options({field: True})
options[field] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
config.set_optimizer_experimental_options({field: False})
options[field] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
@parameterized.named_parameters(
('DisableModelPruning', 'disable_model_pruning'),
('DisableMetaOptimizer', 'disable_meta_optimizer'))
@reset_eager
def testOptimizerBoolOption(self, field):
# TODO(b/128531235): Improve testing of option
options = config.get_optimizer_experimental_options()
self.assertFalse(options.get(field))
config.set_optimizer_experimental_options({field: True})
options[field] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
config.set_optimizer_experimental_options({field: False})
options[field] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
@test_util.run_gpu_only
@reset_eager
def testOptimizerToggleOptionPinToHost(self):
options = config.get_optimizer_experimental_options()
self.assertIsNone(options.get('pin_to_host_optimization'))
@def_function.function
def fun():
op = test_ops.device_placement_op()
return op
# Force optimizer to run for all graphs
config.set_optimizer_experimental_options({'min_graph_nodes': -1})
options['min_graph_nodes'] = -1
# Since pin to host is disabled, the operation should go on GPU
gpu = self.evaluate(fun())
self.assertIn(compat.as_bytes('GPU'), gpu)
config.set_optimizer_experimental_options(
{'pin_to_host_optimization': True})
options['pin_to_host_optimization'] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
# Since pin to host is enabled, the operation should go on CPU
cpu = self.evaluate(fun())
self.assertIn(compat.as_bytes('CPU'), cpu)
config.set_optimizer_experimental_options(
{'pin_to_host_optimization': False})
options['pin_to_host_optimization'] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
# Since pin to host is disabled again, the operation should go on GPU
gpu2 = self.evaluate(fun())
self.assertIn(compat.as_bytes('GPU'), gpu2)
class DeviceTest(test.TestCase):
@reset_eager
def testPhysicalDevices(self):
cpus = config.list_physical_devices('CPU')
self.assertGreater(len(cpus), 0)
if test_util.is_gpu_available():
gpus = config.list_physical_devices('GPU')
self.assertGreater(len(gpus), 0)
@reset_eager
def testCpuMultiple(self):
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_virtual_device_configuration(cpus[0], [
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration()
])
context.ensure_initialized()
vcpus = config.list_logical_devices('CPU')
self.assertEqual(len(vcpus), 2)
with ops.device('/device:CPU:0'):
a = constant_op.constant(1.0)
self.evaluate(a)
with ops.device('/device:CPU:1'):
b = constant_op.constant(1.0)
self.evaluate(b)
with self.assertRaisesRegexp(RuntimeError, 'unknown device'):
with ops.device('/device:CPU:2'):
c = constant_op.constant(1.0)
self.evaluate(c)
# Ensure we can place ops on each of the device names
for vcpu in vcpus:
with ops.device(vcpu.name):
d = constant_op.constant(1.0)
self.evaluate(d)
# Modifying the CPU configuration is not supported
with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
config.set_virtual_device_configuration(cpus[0], [
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration()
])
# Setting the same CPU configuration is fine
config.set_virtual_device_configuration(cpus[0], [
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration()
])
@test_util.run_gpu_only
@reset_eager
def testGpuNone(self):
gpus = config.list_physical_devices('GPU')
self.assertGreater(len(gpus), 0)
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
self.assertEqual(len(config.get_visible_devices('CPU')), 1)
self.assertGreater(len(config.get_visible_devices('GPU')), 0)
config.set_visible_devices(cpus[0])
self.assertEqual(len(config.get_visible_devices('CPU')), 1)
self.assertEqual(len(config.get_visible_devices('GPU')), 0)
with self.assertRaisesRegexp(RuntimeError, 'unknown device'):
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0)
self.evaluate(a)
# Modifying the visible devices is not supported
with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
config.set_visible_devices(gpus)
# Setting the same visible devices is fine
config.set_visible_devices(cpus[0])
@reset_eager
def testGpuMultiple(self):
gpus = config.list_physical_devices('GPU')
if len(gpus) < 2:
self.skipTest('Need at least 2 GPUs')
context.ensure_initialized()
for i in range(0, len(gpus)):
with ops.device('/device:GPU:' + str(i)):
a = constant_op.constant(1.0)
self.evaluate(a)
with self.assertRaisesRegexp(RuntimeError, 'unknown device'):
with ops.device('/device:GPU:' + str(len(gpus))):
a = constant_op.constant(1.0)
self.evaluate(a)
@test_util.run_gpu_only
@reset_eager
def testVirtualGpu(self):
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
self.assertIsNone(config.get_virtual_device_configuration(gpus[-1]))
config.set_virtual_device_configuration(gpus[-1], [
context.VirtualDeviceConfiguration(memory_limit=10),
context.VirtualDeviceConfiguration(memory_limit=10)
])
self.assertEqual(len(config.get_virtual_device_configuration(gpus[-1])), 2)
logical_gpus = config.list_logical_devices('GPU')
self.assertTrue(len(logical_gpus), len(gpus) + 1)
for i in range(0, len(logical_gpus)):
with ops.device('/device:GPU:' + str(i)):
a = constant_op.constant(1.0)
self.evaluate(a)
with self.assertRaisesRegexp(RuntimeError, 'unknown device'):
with ops.device('/device:GPU:' + str(len(logical_gpus))):
a = constant_op.constant(1.0)
self.evaluate(a)
# Modifying the GPU configuration is not supported
with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
config.set_virtual_device_configuration(gpus[-1], [
context.VirtualDeviceConfiguration(memory_limit=20),
context.VirtualDeviceConfiguration(memory_limit=20)
])
with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
config.set_virtual_device_configuration(gpus[-1], [
context.VirtualDeviceConfiguration(memory_limit=10),
context.VirtualDeviceConfiguration(memory_limit=10),
context.VirtualDeviceConfiguration(memory_limit=10)
])
# Setting the same GPU configuration is fine
config.set_virtual_device_configuration(gpus[-1], [
context.VirtualDeviceConfiguration(memory_limit=10),
context.VirtualDeviceConfiguration(memory_limit=10)
])
@test_util.run_gpu_only
@reset_eager
def testGpuGrowth(self):
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
self.assertIsNone(config.get_memory_growth(gpus[-1]))
for gpu in gpus:
config.set_memory_growth(gpu, True)
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
logical_gpus = config.list_logical_devices('GPU')
self.assertTrue(len(logical_gpus), len(gpus))
# Modifying the GPU configuration is not supported
with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
for gpu in gpus:
config.set_memory_growth(gpu, False)
# Setting the same GPU configuration is fine
for gpu in gpus:
config.set_memory_growth(gpu, True)
@test_util.run_gpu_only
@reset_eager
def testGpuInvalidConfig(self):
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
if len(gpus) > 1:
# Assert if other GPUs were not configured
config.set_memory_growth(gpus[0], True)
with self.assertRaisesRegexp(ValueError, 'cannot differ'):
c = context.context().config
# If we limit visibility to GPU 0, growth is fine
config.set_visible_devices(gpus[0], 'GPU')
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
# Default setting for second GPU is False and works if we set visibility
config.set_visible_devices(gpus[1], 'GPU')
c = context.context().config
self.assertFalse(c.gpu_options.allow_growth)
# Growth now fails because all the GPUs are visible and not the same
config.set_visible_devices(gpus, 'GPU')
with self.assertRaisesRegexp(ValueError, 'cannot differ'):
c = context.context().config
for gpu in gpus:
config.set_memory_growth(gpu, True)
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
with self.assertRaisesRegexp(ValueError, 'memory limit'):
config.set_virtual_device_configuration(gpus[-1], [
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration()
])
self.assertIsNone(config.get_virtual_device_configuration(gpus[-1]))
config.set_virtual_device_configuration(gpus[-1], [
context.VirtualDeviceConfiguration(memory_limit=10),
context.VirtualDeviceConfiguration(memory_limit=10)
])
c = context.context().config
self.assertFalse(c.gpu_options.allow_growth)
with self.assertRaisesRegexp(ValueError, 'virtual devices'):
config.set_memory_growth(gpus[-1], False)
@test_util.run_gpu_only
@reset_eager
def testRemote(self):
gpus = config.list_logical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
context.ensure_initialized()
gpus = config.list_logical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
for gpu in gpus:
self.assertIsNotNone(gpu.name)
context.ensure_initialized()
job_name = 'test'
cluster_def = cluster_pb2.ClusterDef()
job_def = cluster_def.job.add()
job_def.name = job_name
job_def.tasks[0] = 'localhost:0'
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name=job_name, task_index=0, protocol='grpc')
context.set_server_def(server_def)
gpus = config.list_logical_devices('GPU')
for gpu in gpus:
self.assertIsNotNone(gpu.name)
@reset_eager
def testV1CompatibilityDummyInivisibleDeviceList(self):
gpus = config.list_physical_devices('GPU')
if gpus:
self.skipTest('Test requires no GPUs')
# Ensure GPU options left untouched on CPU only environments
context.context()._physical_devices = None
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list='0'))
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list, '0')
@test_util.run_gpu_only
@reset_eager
def testV1Compatibility(self):
# Ensure we set 1 CPU by default
context.context()._config = config_pb2.ConfigProto()
new_config = context.context().config
self.assertEqual(new_config.device_count['CPU'], 1)
context.context()._physical_devices = None
# Ensure CPU is split
context.context()._config = config_pb2.ConfigProto(device_count={'CPU': 2})
new_config = context.context().config
self.assertEqual(new_config.device_count['CPU'], 2)
context.context()._physical_devices = None
# Handle empty visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=''))
gpus = config.list_physical_devices('GPU')
gpu_count = len(gpus)
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list,
','.join(str(i) for i in range(len(gpus))))
context.context()._physical_devices = None
# Handle invalid visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=str(gpu_count)))
with self.assertRaisesRegexp(ValueError, 'Invalid visible device index'):
gpus = config.list_physical_devices('GPU')
new_config = context.context().config
context.context()._physical_devices = None
# Handle single visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=str(gpu_count-1)))
gpus = config.list_physical_devices('GPU')
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list,
str(gpu_count-1))
context.context()._physical_devices = None
def testConfigureCollectiveOps(self):
context.context().configure_collective_ops(
collective_leader='/job:worker/replica:0/task:0',
scoped_allocator_enabled_ops=('CollectiveReduce',),
use_nccl_communication=False,
device_filters=['/job:worker/task:1'])
new_config = context.context().config
# Verify group leader
self.assertEqual('/job:worker/replica:0/task:0',
new_config.experimental.collective_group_leader)
# Verify device filters.
self.assertEqual(['/job:worker/task:1'], new_config.device_filters)
# Verify rewrite options.
new_rewrite_options = new_config.graph_options.rewrite_options
self.assertEqual(rewriter_config_pb2.RewriterConfig.ON,
new_rewrite_options.scoped_allocator_optimization)
self.assertEqual(['CollectiveReduce'],
new_rewrite_options.scoped_allocator_opts.enable_op)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/framework/config_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for file_system."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.framework import load_library
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class FileSystemTest(test.TestCase):
def setUp(self):
file_system_library = os.path.join(resource_loader.get_data_files_path(),
"test_file_system.so")
load_library.load_file_system_library(file_system_library)
@test_util.run_deprecated_v1
def testBasic(self):
with self.cached_session() as sess:
reader = io_ops.WholeFileReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queue.enqueue_many([["test://foo"]]).run()
queue.close().run()
key, value = self.evaluate(reader.read(queue))
self.assertEqual(key, compat.as_bytes("test://foo"))
self.assertEqual(value, compat.as_bytes("AAAAAAAAAA"))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/framework/file_system_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple stack that associates filename and line numbers with each object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util import tf_stack
class TraceableObject(object):
"""Wrap an object together with its the code definition location."""
# Return codes for the set_filename_and_line_from_caller() method.
SUCCESS, HEURISTIC_USED, FAILURE = (0, 1, 2)
def __init__(self, obj, filename=None, lineno=None):
self.obj = obj
self.filename = filename
self.lineno = lineno
def set_filename_and_line_from_caller(self, offset=0):
"""Set filename and line using the caller's stack frame.
If the requested stack information is not available, a heuristic may
be applied and self.HEURISTIC USED will be returned. If the heuristic
fails then no change will be made to the filename and lineno members
(None by default) and self.FAILURE will be returned.
Args:
offset: Integer. If 0, the caller's stack frame is used. If 1,
the caller's caller's stack frame is used. Larger values are
permissible but if out-of-range (larger than the number of stack
frames available) the outermost stack frame will be used.
Returns:
TraceableObject.SUCCESS if appropriate stack information was found,
TraceableObject.HEURISTIC_USED if the offset was larger than the stack,
and TraceableObject.FAILURE if the stack was empty.
"""
# Offset is defined in "Args" as relative to the caller. We are one frame
# beyond the caller.
local_offset = offset + 1
frame_records = tf_stack.extract_stack_file_and_line(
max_length=local_offset + 1)
if not frame_records:
return self.FAILURE
if len(frame_records) > local_offset:
# Negative indexing is one-indexed instead of zero-indexed.
negative_offset = -(local_offset + 1)
self.filename, self.lineno = frame_records[negative_offset]
return self.SUCCESS
else:
# If the offset is too large then we use the largest offset possible,
# meaning we use the outermost stack frame at index 0.
self.filename, self.lineno = frame_records[0]
return self.HEURISTIC_USED
def copy_metadata(self):
"""Return a TraceableObject like this one, but without the object."""
return self.__class__(None, filename=self.filename, lineno=self.lineno)
class TraceableStack(object):
"""A stack of TraceableObjects."""
def __init__(self, existing_stack=None):
"""Constructor.
Args:
existing_stack: [TraceableObject, ...] If provided, this object will
set its new stack to a SHALLOW COPY of existing_stack.
"""
self._stack = existing_stack[:] if existing_stack else []
def push_obj(self, obj, offset=0):
"""Add object to the stack and record its filename and line information.
Args:
obj: An object to store on the stack.
offset: Integer. If 0, the caller's stack frame is used. If 1,
the caller's caller's stack frame is used.
Returns:
TraceableObject.SUCCESS if appropriate stack information was found,
TraceableObject.HEURISTIC_USED if the stack was smaller than expected,
and TraceableObject.FAILURE if the stack was empty.
"""
traceable_obj = TraceableObject(obj)
self._stack.append(traceable_obj)
# Offset is defined in "Args" as relative to the caller. We are 1 frame
# beyond the caller and need to compensate.
return traceable_obj.set_filename_and_line_from_caller(offset + 1)
def pop_obj(self):
"""Remove last-inserted object and return it, without filename/line info."""
return self._stack.pop().obj
def peek_top_obj(self):
"""Return the most recent stored object."""
return self._stack[-1].obj
def peek_objs(self):
"""Return iterator over stored objects ordered newest to oldest."""
return (t_obj.obj for t_obj in reversed(self._stack))
def peek_traceable_objs(self):
"""Return iterator over stored TraceableObjects ordered newest to oldest."""
return reversed(self._stack)
def __len__(self):
"""Return number of items on the stack, and used for truth-value testing."""
return len(self._stack)
def copy(self):
"""Return a copy of self referencing the same objects but in a new list.
This method is implemented to support thread-local stacks.
Returns:
TraceableStack with a new list that holds existing objects.
"""
return TraceableStack(self._stack)
|
tensorflow-master
|
tensorflow/python/framework/traceable_stack.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for tensor_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class TensorUtilTest(test.TestCase):
def testFloat(self):
value = 10.0
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape {}
float_val: %.1f
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array(value, dtype=np.float32), a)
def testFloatN(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTyped(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerce(self):
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerceNdarray(self):
arr = np.asarray([10, 20, 30], dtype="int")
t = tensor_util.make_tensor_proto(arr, dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatSizes(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[1, 3])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0, 20.0, 30.0]], dtype=np.float32), a)
def testFloatSizes2(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[3, 1])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0], [20.0], [30.0]], dtype=np.float32), a)
def testFloatSizesLessValues(self):
t = tensor_util.make_tensor_proto(10.0, shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
float_val: 10.0
""", t)
# No conversion to Ndarray for this one: not enough values.
def testFloatNpArrayFloat64(self):
t = tensor_util.make_tensor_proto(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64))
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "@$\000\000\000\000\000\000@4\000\000\000\000\000\000@>\000\000\000\000\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000$@\000\000\000\000\000\0004@\000\000\000\000\000\000>@"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float64, a.dtype)
self.assertAllClose(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64),
tensor_util.MakeNdarray(t))
def testFloatTypesWithImplicitRepeat(self):
for dtype, nptype in [(dtypes.float32, np.float32),
(dtypes.float64, np.float64)]:
t = tensor_util.make_tensor_proto([10.0], shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(
np.array(
[[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0]],
dtype=nptype),
a)
def testFloatMutateArray(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=dtypes.float32)
a = tensor_util.MakeNdarray(t)
a[0] = 5.0
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([5.0, 20.0, 30.0], dtype=np.float32), a)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
def testHalf(self):
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=np.float16))
self.assertProtoEquals("""
dtype: DT_HALF
tensor_shape {
dim {
size: 2
}
}
half_val: 18688
half_val: 19712
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float16, a.dtype)
self.assertAllClose(np.array([10.0, 20.0], dtype=np.float16), a)
def testBfloat16(self):
test_type = dtypes.bfloat16.as_numpy_dtype
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=test_type))
# 10.0: 16672 = 010000010(130) 0100000: (1+0/2+1/4) * 2^(130-127)
# 20.0: 16800 = 010000011(131) 0100000: (1+0/2+1/4) * 2^(131-127)
self.assertProtoEquals("""
dtype: DT_BFLOAT16
tensor_shape {
dim {
size: 2
}
}
half_val: 16672
half_val: 16800
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(test_type, a.dtype)
self.assertAllClose(np.array([10.0, 20.0], dtype=test_type), a)
def testInt(self):
t = tensor_util.make_tensor_proto(10)
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape {}
int_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int32), a)
def testLargeInt(self):
value = np.iinfo(np.int64).max
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testLargeNegativeInt(self):
# We don't use the min np.int64 value here
# because it breaks np.abs().
#
# np.iinfo(np.int64).min = -9223372036854775808
# np.iinfo(np.int64).max = 9223372036854775807
# np.abs(-9223372036854775808) = -9223372036854775808
value = np.iinfo(np.int64).min + 1
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testIntNDefaultType(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\000\000\000\n\000\000\000\024\000\000\000\036\000\000\000("
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\n\000\000\000\024\000\000\000\036\000\000\000(\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array([[10, 20], [30, 40]], dtype=np.int32), a)
def testIntTypes(self):
for dtype, nptype in [(dtypes.int32, np.int32),
(dtypes.uint8, np.uint8),
(dtypes.uint16, np.uint16),
(dtypes.int16, np.int16),
(dtypes.int8, np.int8)]:
# Test with array.
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtype)
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
# Test with ndarray.
t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype))
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
def testIntTypesWithImplicitRepeat(self):
for dtype, nptype in [(dtypes.int64, np.int64), (dtypes.int32, np.int32),
(dtypes.uint8, np.uint8), (dtypes.uint16, np.uint16),
(dtypes.int16, np.int16), (dtypes.int8, np.int8)]:
self.assertAllEqual(
np.array([[10, 11, 12, 12], [12, 12, 12, 12], [12, 12, 12, 12]],
dtype=nptype),
tensor_util.MakeNdarray(
tensor_util.make_tensor_proto([10, 11, 12],
shape=[3, 4],
dtype=dtype)))
def testIntMixedWithDimension(self):
# Github issue: 11974
dtype = dtypes.int32
nptype = np.int32
t = tensor_util.make_tensor_proto(
[10, tensor_shape.Dimension(20), 30], dtype=dtype)
self.assertEquals(dtype, t.dtype)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
def testLong(self):
t = tensor_util.make_tensor_proto(10, dtype=dtypes.int64)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int64), a)
def testLongN(self):
t = tensor_util.make_tensor_proto(
[10, 20, 30], shape=[1, 3], dtype=dtypes.int64)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([[10, 20, 30]], dtype=np.int64), a)
def testLongNpArray(self):
t = tensor_util.make_tensor_proto(np.array([10, 20, 30]))
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=np.int64), a)
def testQuantizedTypes(self):
# Test with array.
data = [(21,), (22,), (23,)]
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\025\000\000\000\026\000\000\000\027"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\000\000\026\000\000\000\027\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint32.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint8)
self.assertProtoEquals(r"""
dtype: DT_QUINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.quint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint8)
self.assertProtoEquals(r"""
dtype: DT_QINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint16)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.quint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint16)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
def testString(self):
t = tensor_util.make_tensor_proto("foo")
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape {}
string_val: "foo"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertEquals([b"foo"], a)
def testStringWithImplicitRepeat(self):
t = tensor_util.make_tensor_proto(["f", "g"], shape=[3, 4])
a = tensor_util.MakeNdarray(t)
self.assertAllEqual(
np.array([[b"f", b"g", b"g", b"g"], [b"g", b"g", b"g", b"g"],
[b"g", b"g", b"g", b"g"]],
dtype=np.object), a)
def testStringN(self):
t = tensor_util.make_tensor_proto([b"foo", b"bar", b"baz"], shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringNpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[b"a", b"ab"], [b"abc", b"abcd"]]))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a)
def testArrayMethod(self):
class Wrapper(object):
def __array__(self):
return np.array([b"foo", b"bar", b"baz"])
t = tensor_util.make_tensor_proto(Wrapper(), shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testArrayInterface(self):
class Wrapper(object):
@property
def __array_interface__(self):
return np.array([b"foo", b"bar", b"baz"]).__array_interface__
t = tensor_util.make_tensor_proto(Wrapper(), shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringTuple(self):
t = tensor_util.make_tensor_proto((b"a", b"ab", b"abc", b"abcd"))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 4 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array((b"a", b"ab", b"abc", b"abcd")), a)
def testStringNestedTuple(self):
t = tensor_util.make_tensor_proto(((b"a", b"ab"), (b"abc", b"abcd")))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array(((b"a", b"ab"), (b"abc", b"abcd"))), a)
def testComplex64(self):
t = tensor_util.make_tensor_proto((1 + 2j), dtype=dtypes.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape {}
scomplex_val: 1
scomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplex128(self):
t = tensor_util.make_tensor_proto((1 + 2j), dtype=dtypes.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape {}
dcomplex_val: 1
dcomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplexWithImplicitRepeat(self):
for dtype, np_dtype in [(dtypes.complex64, np.complex64),
(dtypes.complex128, np.complex128)]:
t = tensor_util.make_tensor_proto((1 + 1j), shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(
np.array(
[[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)]],
dtype=np_dtype),
a)
def testComplex64N(self):
t = tensor_util.make_tensor_proto(
[(1 + 2j), (3 + 4j), (5 + 6j)], shape=[1, 3], dtype=dtypes.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 1 } dim { size: 3 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1 + 2j), (3 + 4j), (5 + 6j)]]), a)
def testComplex128N(self):
t = tensor_util.make_tensor_proto(
[(1 + 2j), (3 + 4j), (5 + 6j)], shape=[1, 3], dtype=dtypes.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 1 } dim { size: 3 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array([[(1 + 2j), (3 + 4j), (5 + 6j)]]), a)
def testComplex64NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]),
dtype=dtypes.complex64)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 2 } dim { size: 2 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
scomplex_val: 7
scomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]), a)
def testComplex128NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]),
dtype=dtypes.complex128)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 2 } dim { size: 2 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
dcomplex_val: 7
dcomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]), a)
def testUnsupportedDTypes(self):
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(np.array([1]), 0)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(3, dtype=dtypes.qint8)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto([3], dtype=dtypes.qint8)
# Validate the helpful error message when trying to convert an
# unconvertible list as strings.
with self.assertRaisesRegexp(TypeError, "Failed to convert object"):
tensor_util.make_tensor_proto([tensor_shape.Dimension(1)])
def testTensorShapeVerification(self):
array = np.array([[1], [2]])
correct_shape = (2, 1)
incorrect_shape = (1, 2)
tensor_util.make_tensor_proto(array, shape=correct_shape, verify_shape=True)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(
array, shape=incorrect_shape, verify_shape=True)
def testShapeTooLarge(self):
with self.assertRaises(ValueError):
tensor_util.make_tensor_proto(np.array([1, 2]), shape=[1])
def testLowRankSupported(self):
t = tensor_util.make_tensor_proto(np.array(7))
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 7
""", t)
def testShapeEquals(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
self.assertTrue(tensor_util.ShapeEquals(t, [2, 2]))
self.assertTrue(tensor_util.ShapeEquals(t, (2, 2)))
self.assertTrue(
tensor_util.ShapeEquals(t, tensor_shape.as_shape([2, 2]).as_proto()))
self.assertFalse(tensor_util.ShapeEquals(t, [5, 3]))
self.assertFalse(tensor_util.ShapeEquals(t, [1, 4]))
self.assertFalse(tensor_util.ShapeEquals(t, [4]))
class IsTensorTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testConstantTensor(self):
np_val = np.random.rand(3).astype(np.int32)
tf_val = constant_op.constant(np_val)
self.assertFalse(tensor_util.is_tensor(np_val))
self.assertTrue(tensor_util.is_tensor(tf_val))
class ConstantValueTest(test.TestCase):
def testConstant(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
@test_util.run_deprecated_v1
def testUnknown(self):
tf_val = gen_state_ops.variable(
shape=[3, 4, 7],
dtype=dtypes.float32,
name="tf_val",
container="",
shared_name="")
self.assertIs(None, tensor_util.constant_value(tf_val))
def testShape(self):
np_val = np.array([1, 2, 3], dtype=np.int32)
tf_val = array_ops.shape(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.int32, c_val.dtype)
def testFill(self):
np_val = np.array([-1, -1, -1], dtype=np.float32)
tf_val = array_ops.fill([3], constant_op.constant(-1.0))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.float32, c_val.dtype)
def testSize(self):
tf_val = array_ops.size(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(6, c_val)
@test_util.run_deprecated_v1
def testSizeOfScalar(self):
tf_val = array_ops.size(constant_op.constant(0.0))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(1, c_val)
self.assertEqual(np.ndarray, type(c_val))
@test_util.run_deprecated_v1
def testRank(self):
tf_val = array_ops.rank(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(np.ndarray, type(c_val))
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
# Repeat test using array_ops.rank_internal to avoid the optimization that
# happens in the rank function.
tf_val = array_ops.rank_internal(
constant_op.constant(
0.0, shape=[1, 2, 3]), optimize=False)
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(np.ndarray, type(c_val))
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
self.assertEqual([3], c_val)
def testCast(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
@test_util.run_deprecated_v1
def testConcat(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = array_ops.concat(
[np_val[0:1, :, :], np_val[1:2, :, :], np_val[2:3, :, :]], 0)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = array_ops.concat(
[np_val[0, :, :], np_val[1, :, :], np_val[2, :, :]],
array_ops.placeholder(dtypes.int32))
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
tf_val = array_ops.concat([
np_val[0, :, :], array_ops.placeholder(dtypes.float32), np_val[2, :, :]
], 1)
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
@test_util.run_deprecated_v1
def testPack_Axis0(self):
inputs = [np.random.rand(4, 7) for _ in range(3)]
np_val = np.array(inputs)
tf_val = array_ops.stack(inputs)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = array_ops.stack(
[inputs[0], array_ops.placeholder(dtypes.float32), inputs[2]])
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
@test_util.run_deprecated_v1
def testPack_Axis1(self):
inputs = [np.random.rand(4, 7) for _ in range(3)]
tf_val = array_ops.stack(inputs, axis=1)
c_val = tensor_util.constant_value(tf_val)
self.assertIsNone(c_val)
tf_val = array_ops.stack(
[inputs[0], array_ops.placeholder(dtypes.float32), inputs[2]], axis=1)
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
@test_util.run_deprecated_v1
def testPack_Partial_Axis0(self):
input_ = np.random.rand(4, 7)
tf_val = array_ops.stack([input_, array_ops.placeholder(dtypes.float32)])
c_val = tensor_util.constant_value(tf_val, partial=True)
self.assertAllClose(input_, c_val[0])
self.assertIsNone(c_val[1])
@test_util.run_deprecated_v1
def testPack_Partial_Axis1(self):
input_ = np.random.rand(4, 7)
tf_val = array_ops.stack([input_, array_ops.placeholder(dtypes.float32)],
axis=1)
c_val = tensor_util.constant_value(tf_val, partial=True)
self.assertIsNone(c_val)
def testEqual(self):
# Scalar inputs.
tf_val = math_ops.equal(constant_op.constant(1), constant_op.constant(1))
self.assertEqual(tensor_util.constant_value(tf_val), True)
tf_val = math_ops.equal(constant_op.constant(1), constant_op.constant(0))
self.assertEqual(tensor_util.constant_value(tf_val), False)
# Shaped inputs with broadcast semantics.
tf_val = math_ops.equal(constant_op.constant([[0, 1]]),
constant_op.constant([[0], [1]]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(c_val, [[True, False], [False, True]])
def testNotEqual(self):
# Scalar inputs.
tf_val = math_ops.not_equal(constant_op.constant(1),
constant_op.constant(1))
self.assertEqual(tensor_util.constant_value(tf_val), False)
tf_val = math_ops.not_equal(constant_op.constant(1),
constant_op.constant(0))
self.assertEqual(tensor_util.constant_value(tf_val), True)
# Shaped inputs with broadcast semantics.
tf_val = math_ops.not_equal(constant_op.constant([[0, 1]]),
constant_op.constant([[0], [1]]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(c_val, [[False, True], [True, False]])
def testLiteral(self):
x = "hi"
self.assertIs(x, tensor_util.constant_value(x))
def testNumpyNdarray(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
self.assertIs(np_val, tensor_util.constant_value(np_val))
def testVariable(self):
var = variables.Variable(1.0, name="variable_node")
self.assertIsNone(tensor_util.constant_value(var))
def testVariableV1(self):
var = variables.VariableV1(1.0, name="variable_node")
self.assertIsNone(tensor_util.constant_value(var))
class ConstantValueAsShapeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testConstant(self):
np_val = np.random.rand(3).astype(np.int32)
tf_val = constant_op.constant(np_val)
self.assertEqual(
tensor_shape.TensorShape(np_val),
tensor_util.constant_value_as_shape(tf_val))
tf_val = constant_op.constant([], dtype=dtypes.int32)
self.assertEqual(
tensor_shape.TensorShape([]),
tensor_util.constant_value_as_shape(tf_val))
@test_util.run_in_graph_and_eager_modes
def testShape(self):
tf_val = array_ops.shape(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual(tensor_shape.TensorShape([1, 2, 3]), c_val)
@test_util.run_in_graph_and_eager_modes
def testMinusOneBecomesNone(self):
tf_val = constant_op.constant([-1, 1, -1], shape=[3])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, 1, None], c_val.as_list())
@test_util.run_deprecated_v1
def testPack(self):
tf_val = array_ops.stack(
[constant_op.constant(16), 37, array_ops.placeholder(dtypes.int32)])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None], c_val.as_list())
@test_util.run_deprecated_v1
def testConcat(self):
tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(2,))], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, None], c_val.as_list())
tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(1,)), [48]], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
@test_util.run_deprecated_v1
def testSlice(self):
tf_val = array_ops.placeholder(dtypes.int32, shape=(4,))[0:2]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, None], c_val.as_list())
# begin:end
tf_val = constant_op.constant([10, 20, 30])[1:3]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([20, 30], c_val.as_list())
# begin:end:stride
tf_val = array_ops.strided_slice(
constant_op.constant([10, 20, 30]), [1], [3], strides=[2])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([20], c_val.as_list())
# [1, 2, 16, 37, None, 48]
tf_val_orig = array_ops.concat(
[[1, 2, 16, 37], array_ops.placeholder(
dtypes.int32, shape=(1,)), [48]], 0)
# begin: no end
tf_val = tf_val_orig[2:]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
# begin::negative slice
tf_val = tf_val_orig[2::-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 2, 1], c_val.as_list())
# :end:negative slice
tf_val = tf_val_orig[:1:-2]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([48, 37], c_val.as_list())
# begin:end:negative slice
tf_val = tf_val_orig[3:1:-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16], c_val.as_list())
# begin:negative end:slice
tf_val = tf_val_orig[1:-3:1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([2, 16], c_val.as_list())
# negative begin::slice
tf_val = tf_val_orig[-3::1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, None, 48], c_val.as_list())
# negative begin::negative slice
tf_val = tf_val_orig[-3::-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16, 2, 1], c_val.as_list())
# negative begin:negative end:negative slice
tf_val = tf_val_orig[-3:-5:-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16], c_val.as_list())
# Do not support shape inference for additional arguments
tf_val = constant_op.constant([10, 20, 30])[...]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, None, None], c_val.as_list())
# Do not support shape inference for tensor slices.
tf_val = constant_op.constant([10, 20, 30])[
array_ops.placeholder(dtypes.int32, shape=()):]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual(tensor_shape.unknown_shape(), c_val)
# Do not support shape inference for higher rank
with self.assertRaises(ValueError):
tf_val = constant_op.constant([[10], [20], [30]])[:, 0:]
c_val = tensor_util.constant_value_as_shape(tf_val)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/framework/tensor_util_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Python front-end supports for functions.
NOTE: At this time, functions are experimental and subject to change!. Proceed
with caution.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.eager import context
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import compat
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
class Defun(object):
"""Decorator used to define TensorFlow functions.
Use this decorator to make a Python function usable directly as a TensorFlow
function.
The decorated function must add ops to the default graph and return zero or
more `Tensor` objects. Call the decorator with named arguments, one for each
argument of the function to decorate, with the expected type of the argument
as value.
For example if the function to decorate accepts two `tf.float32` arguments
named `x` and `y`, call the decorator with:
@Defun(tf.float32, tf.float32)
def foo(x, y):
...
When you call the decorated function, it adds the `call` ops to the
default graph. In addition, it adds the definition of the function into the
default graph. Because the addition of the function into the graph
is deferred, the decorator can be used anywhere in the program.
Any variables created inside of the function are hoisted into the outer graph.
Note that the variables are created in the variable scope that was active
during the first call to the function. Subsequent function calls will refer to
the same set of variables.
Definitions of functions in a graph are frozen as soon as the graph is used to
create a session. However, new functions and new calls to existing functions
may be added to the graph, with the new functions themselves becoming
immediately frozen.
Example, but also see the [How To on functions](link_needed).
```python
# Defining the function.
@tf.Defun(tf.float32, tf.float32)
def MyFunc(x, y):
return x + y, x - y
# Building the graph.
a = tf.constant([1.0])
b = tf.constant([2.0])
c, d = MyFunc(a, b, name='mycall')
```
"""
def __init__(self, *input_types, **kwargs):
"""Create a `Defun` decorator.
Args:
*input_types: A list of `tf.DType`
**kwargs: Optional keyword arguments, including
func_name - (optional). A python string, the name to use to
declare this `Function` in the graph.
grad_func - (optional). A function implementing the gradient
of the function-to-register. This is must be a
`_DefinedFunction` object. The gradient
function must satisfy the criterion defined in
function.proto:GradientDef.
python_grad_func - (optional). A function implementing the
gradient of the function python-side. This function must
take the current op and the gradients w.r.t. its outputs,
and return the gradients w.r.t. the inputs. That is it must
implement the interface expected by `tf.RegisterGradient`).
This will be called by tf.gradients to add the gradient ops
to the graph. At most one of grad_func and python_grad_func
can be specified.
out_names = (optional). A list of strings, one per output
tensor.
shape_func - (optional). A function taking the op and returning a list
of static shapes to set for the function's outputs.
"""
self._input_types = input_types
self._func_name = kwargs.pop("func_name", None)
self._grad_func = kwargs.pop("grad_func", None)
self._python_grad_func = kwargs.pop("python_grad_func", None)
self._out_names = kwargs.pop("out_names", None)
self._extra_kwargs = kwargs
def __call__(self, func):
# Various sanity checks on the callable func.
if not callable(func):
raise ValueError("function %s must be callable" % func)
# Func should not use kwargs and defaults.
argspec = tf_inspect.getargspec(func)
if argspec.keywords or argspec.defaults:
raise ValueError(
"function with argument defaults or keywords arguments are not"
" supported. {} has defaults {} and keywords {}.".format(
func, argspec.defaults, argspec.keywords))
# Computes how many arguments 'func' has.
min_args = len(argspec.args)
max_args = min_args
if argspec.varargs:
max_args = 1000000
argnames = argspec.args
if tf_inspect.ismethod(func):
# 1st argument is the "class" type.
min_args -= 1
argnames = argnames[1:]
if self._input_types:
# If Defun is given a list of types for the inputs, the number
# of input types should be compatible with 'func'.
num = len(self._input_types)
if num < min_args or num > max_args:
raise ValueError(
"The function has fewer arguments than the number of specified "
"input types.")
return _DefinedFunction(
func,
argnames,
self._input_types,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# 'func' expects no arguments and input types is an empty list.
if min_args == 0 and max_args == 0:
return _DefinedFunction(
func, [], [],
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# Input types are unknown. It's an overloaded function and hence
# its definition needs to be deferred until it's called.
return _OverloadedFunction(
func,
argnames,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
class _DefinedFunctionDeleter(object):
"""Unregister function from eager context."""
def __init__(self, name):
self.name = name
def __del__(self):
try:
context.remove_function(self.name)
except TypeError:
# Suppress some exceptions, mainly for the case when we're running on
# module deletion. Things that can go wrong include the context module
# already being unloaded, self._handle._handle_data no longer being
# valid, and so on. Printing warnings in these cases is silly
# (exceptions raised from __del__ are printed as warnings to stderr).
pass # 'NoneType' object is not callable when the handle has been
# partially unloaded.
except AttributeError:
pass # 'NoneType' object has no attribute 'eager_mode' when context has
# been unloaded. Will catch other module unloads as well.
class _DefinedFunction(object):
"""_DefinedFunction encapsulates a function definition and its properties.
Attributes:
name: The function name.
definition: The definition of this function. A FunctionDef proto.
grad_func_name: If not None, the name of this function's gradient function.
python_grad_func: A python callable implementing the gradient of
the function python-side.
"""
def __init__(self,
func,
argnames,
input_types,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
shape_func=None,
capture_by_value=False,
whitelisted_stateful_ops=None,
capture_resource_var_by_value=True,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
input_types: The function's argument types. Can be a tuple, list of
tf data types.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: An optional list of strings for the function return value
names.
shape_func: An optional function mapping an op to a list of static
output shapes.
capture_by_value: Boolean (defaults to False). If True, captured values
will be copied into the function body.
whitelisted_stateful_ops: A set of ops that if stateful we ignore and
copy into the function body, when `capture_by_value` is True.
capture_resource_var_by_value: Boolean (defaults to True). If False,
captured resource variable returns the handle instead of value.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._input_types = input_types
self._func_name = func_name
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._shape_func = shape_func
self._capture_by_value = capture_by_value
self._whitelisted_stateful_ops = whitelisted_stateful_ops
if self._whitelisted_stateful_ops is None:
self._whitelisted_stateful_ops = set()
self._capture_resource_var_by_value = capture_resource_var_by_value
self._extra_kwargs = kwargs
# Constructed only when C API is disabled, lazily
self._definition = None
# Constructed only when C API is enabled, lazily
self._c_func = None
self._function_deleter = None
self._sub_functions = {} # Constructed with _definition or _c_func
# pylint: disable=protected-access
device_funcs = ops.get_default_graph()._device_functions_outer_to_inner
# pylint: enable=protected-access
# Get the innermost device if possbile.
self._caller_device = device_funcs[-1] if device_funcs else None
# Cached OpDef for this function. When C API is enabled, this is
# the only part of FunctionDef that we cache in Python. When C API
# is disabled the whole _definition is available and this is simply
# another reference to _definition.signature
self._op_def = None
assert isinstance(input_types, (list, tuple))
self._arg_types = input_types
self._arg_names = [argnames[i] if i < len(argnames) else ("arg%d" % i)
for i in range(len(input_types))]
@property
def name(self):
"""Function name."""
self._create_definition_if_needed()
return self._func_name
@property
def definition(self):
"""Function definition proto."""
self._create_definition_if_needed()
if self._c_func:
with c_api_util.tf_buffer() as buf:
c_api.TF_FunctionToFunctionDef(self._c_func.func, buf)
fdef = function_pb2.FunctionDef()
proto_data = c_api.TF_GetBuffer(buf)
fdef.ParseFromString(compat.as_bytes(proto_data))
with ops.init_scope():
if context.executing_eagerly():
context.add_function(self._c_func.func)
self._function_deleter = _DefinedFunctionDeleter(
fdef.signature.name)
return fdef
return self._definition
@property
def _signature(self):
self._create_definition_if_needed()
return self._op_def
def set_grad_func(self, grad_func):
"""Specifies the gradient function of this function."""
assert not self._grad_func
assert isinstance(grad_func, _DefinedFunction)
self._grad_func = grad_func
@property
def grad_func_name(self):
"""Returns the name of the gradient function."""
return self._grad_func.name if self._grad_func else None
@property
def python_grad_func(self):
"""Python gradient function callable."""
return self._python_grad_func
@property
def declared_input_types(self):
"""Returns the list of data types of explicit declared inputs."""
return self._input_types
@property
def captured_inputs(self):
"""Returns the list of implicitly captured inputs."""
self._create_definition_if_needed()
return self._extra_inputs
@property
def stateful_ops(self):
"""Returns the list of stateful ops in function definition.
Returns:
A list of (op.name, op.type) pairs.
"""
self._create_definition_if_needed()
return self._stateful_ops
def _create_definition_if_needed(self):
"""Creates the function definition if it's not created yet."""
with context.graph_mode():
self._create_definition_if_needed_impl()
def _create_definition_if_needed_impl(self):
"""This is not what you want, see _create_definition_if_needed."""
if self._definition is not None or self._c_func is not None:
return
# Copy variable collections (by reference) from the parent graph such that
# name based variable sharing (e.g. via tf.make_template) works between the
# func graph and parent graph.
variable_keys = []
variable_keys.extend(ops.GraphKeys._VARIABLE_COLLECTIONS) # pylint: disable=protected-access
variable_keys.append(vs._VARSTORE_KEY) # pylint: disable=protected-access
collections_ref = {}
parent_collections_ref = ops.get_default_graph()._collections # pylint: disable=protected-access
for key in variable_keys:
if key not in parent_collections_ref:
parent_collections_ref[key] = collections_ref[key] = []
else:
collections_ref[key] = parent_collections_ref[key]
temp_graph = func_graph_from_py_func(
self._func,
self._arg_names,
self._arg_types,
self._func_name,
self._capture_by_value,
self._caller_device,
collections_ref=collections_ref,
whitelisted_stateful_ops=self._whitelisted_stateful_ops,
capture_resource_var_by_value=self._capture_resource_var_by_value)
self._extra_inputs = temp_graph.extra_inputs
# pylint: disable=protected-access
self._sub_functions = temp_graph._functions
# pylint: enable=protected-access
# Extra kwargs are treated as attrs on the function def.
if self._func_name:
base_func_name = self._func_name
else:
base_func_name = function_utils.get_func_name(self._func)
if self._grad_func:
base_func_name += ("_%s" % self._grad_func.name)
kwargs_attr = _parse_kwargs_as_attrs(base_func_name, **self._extra_kwargs)
if not temp_graph._c_graph: # pylint: disable=protected-access
# Build the FunctionDef
self._definition = graph_to_function_def.graph_to_function_def(
temp_graph,
temp_graph.get_operations(),
temp_graph.inputs,
temp_graph.outputs,
out_names=self._out_names)
for k in kwargs_attr:
self._definition.attr[k].CopyFrom(kwargs_attr[k])
# Hash the definition and its dependencies.
self._hash_str = self._create_hash_str(
self._definition.signature.input_arg,
self._definition.signature.output_arg, self._definition.node_def)
# Finally, we decide the function name to use. If not specified,
# make up something which is almost certainly unique (but deterministic).
if not self._func_name:
self._func_name = "_".join([base_func_name, self._hash_str])
self._definition.signature.name = self._func_name
if self._func.__doc__:
self._definition.signature.description = self._func.__doc__
self._op_def = self._definition.signature
else: # C API is enabled
output_names = ([compat.as_bytes(x) for x in self._out_names]
if self._out_names else [])
description = self._func.__doc__ or None
# pylint: disable=protected-access
c_func = c_api.TF_GraphToFunction_wrapper(
temp_graph._c_graph,
base_func_name,
self._func_name is None, # append_hash_to_fn_name
None, # opers
[t._as_tf_output() for t in temp_graph.inputs],
[t._as_tf_output() for t in temp_graph.outputs],
output_names,
[], # control_outputs
[], # control_output_names
None, # opts
description)
self._c_func = c_api_util.ScopedTFFunction(c_func)
# pylint: enable=protected-access
self._set_c_attrs(kwargs_attr)
# Set cached fields: _op_def and _func_name (if not already set)
self._op_def = self.definition.signature
if self._func_name:
assert self._func_name == self._op_def.name
else:
self._func_name = compat.as_str(self._op_def.name)
self._stateful_ops = [(op.name, op.type)
for op in temp_graph.get_operations()
if op.op_def.is_stateful]
def _set_c_attrs(self, attrs):
"""Sets `attrs` as attributes of self._c_func.
Requires that self._c_func is not None.
Args:
attrs: a dictionary from attribute name to attribute proto value
"""
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_FunctionSetAttrValueProto(self._c_func.func, compat.as_str(name),
serialized)
def _create_hash_str(self, input_arg, output_arg, node_def):
"""Creates an 8-character string unique to this input.
Args:
input_arg: the input_arg field of an OpDef
(e.g. self._definition.signature.input_arg)
output_arg: the output_arg field of an OpDef
(e.g. self._definition.signature.output_arg)
node_def: the node_def field of a FunctionDef
(e.g. self._definition.node_def)
Returns:
The unique string for this input
"""
hasher = hashlib.sha1()
def update_num(n):
hasher.update(compat.as_bytes("%x" % n))
def update_str(s):
update_num(len(s))
hasher.update(compat.as_bytes(s))
def update_strs(slist):
update_num(len(slist))
for s in slist:
update_str(s)
for adef in input_arg:
update_str(adef.SerializeToString())
for adef in output_arg:
update_str(adef.SerializeToString())
for n in sorted(node_def, key=lambda n: n.name):
update_str(n.name)
update_str(n.op)
update_strs(n.input)
update_num(len(n.attr))
# NOTE: protobuf map serialization does not guarantee ordering.
for k in sorted(n.attr):
update_str(k)
update_str(n.attr[k].SerializeToString())
return hasher.hexdigest()[:8]
def add_to_graph(self, g):
"""Adds this function into the graph g."""
self._create_definition_if_needed()
# Adds this function into 'g'.
# pylint: disable=protected-access
if context.executing_eagerly():
context.context().add_function_def(self.definition)
else:
g._add_function(self)
# pylint: enable=protected-access
# Ensures related sub-routines are defined in 'g', too.
for f in self._sub_functions.values():
f.add_to_graph(g)
# Adds its gradient function, too.
if self._grad_func:
self._grad_func.add_to_graph(g)
def __call__(self, *args, **kwargs):
self.add_to_graph(ops.get_default_graph())
args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs
ret, op = _call(self._signature, *args, **kwargs)
# Set a hidden attr in 'op' so that gradients_impl can refer back
# to this _DefinedFunction instance to access python_grad_func.
assert isinstance(op, ops.Operation)
setattr(op, "__defun", self)
if self._shape_func is not None:
shapes = self._shape_func(op)
if len(shapes) != len(op.outputs):
raise ValueError("shape_func produced %d shapes for %d outputs" %
(len(shapes), len(op.outputs)))
for (t, shape) in zip(op.outputs, shapes):
t.set_shape(shape)
return ret
class _OverloadedFunction(object):
"""_OverloadedFunction encapsulates an overloaded function.
_OverloadedFunction maintains a mapping from input types to
instantiated _DefinedFunction in self._overload.
"""
def __init__(self,
func,
argnames,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: A list of strings for the function return value names.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._argnames = argnames
self._func_name = func_name
assert grad_func is None or isinstance(grad_func, _OverloadedFunction)
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._extra_kwargs = kwargs
self._overload = {}
def instantiate(self, input_types):
"""Instantiate this function given input argument types.
Args:
input_types: A list of data types for the inputs.
Returns:
_DefinedFunction for the given input types.
"""
# Stringify the type list.
key = _type_list_to_str(input_types)
defined = self._overload.get(key)
if not defined:
# If not defined yet, define the function given the input types.
name = self._func_name
if name is not None:
name = "_".join([name, key])
defined = _DefinedFunction(
self._func,
self._argnames,
input_types,
name,
None,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
_ = defined.name # Fully instantiate the function definition.
if self._grad_func:
# If _grad_func is given, it is another
# _OverloadedFunction. We need to instantiate it with the
# right input types.
output_types = [
dtypes.DType(_.type) for _ in defined._signature.output_arg # pylint: disable=protected-access
]
# pylint: disable=protected-access
defined._grad_func = self._grad_func.instantiate(input_types +
output_types)
# pylint: enable=protected-access
self._overload[key] = defined
return defined
def __call__(self, *args, **kwargs):
input_types = []
args = list(args)
for (i, x) in enumerate(args):
x = ops.convert_to_tensor(x)
if not isinstance(x, ops.Tensor):
raise ValueError("Expect a Tensor but get ", x)
input_types.append(x.dtype)
args[i] = x
return self.instantiate(input_types)(*args, **kwargs)
class _FuncGraph(ops.Graph):
"""A helper for constructing a function.
_FuncGraph overrides ops.Graph's create_op() so that we can keep
track of all inputs into every op created inside the function. If
any input is from other graphs, we keep track of it in self.capture
and substitute the input with a place holder.
Each captured input's corresponding place holder is converted into a
function argument and the caller passes in the captured tensor.
"""
def __init__(self, name, capture_by_value, whitelisted_stateful_ops,
capture_resource_var_by_value, *args, **kwargs):
super(_FuncGraph, self).__init__(*args, **kwargs)
self._capture_by_value = capture_by_value
self._whitelisted_stateful_ops = whitelisted_stateful_ops
self._capture_resource_var_by_value = capture_resource_var_by_value
self._building_function = True
self._outer_graph = ops.get_default_graph()
self._vscope = vs.get_variable_scope()
self._old_custom_getter = self._vscope.custom_getter
# The name of the function.
self.name = name
# Placeholder tensors representing the inputs to this function. The tensors
# are in this _FuncGraph.
self.inputs = []
# Tensors that will be returned this function. The tensors are in this
# _FuncGraph.
self.outputs = []
# Maps external tensor -> internal tensor (e.g. input placeholder).
self._captured = {}
# The external tensors that have been captured as inputs and must be passed
# to this function (empty if capturing by value, otherwise these are the
# keys of _captured).
self.extra_inputs = []
# Input placeholders that been added for captured values (empty if capturing
# by value).
self.extra_args = []
# Captured variables.
# TODO(skyewm): is this needed?
self.extra_vars = []
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Overridden from `tf.Graph` to update both the init_scope container
and the present inner container. This is necessary to make sure setting
containers applies correctly both to created variables and to stateful
ops.
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
# pylint: disable=protected-access
with ops.init_scope():
original_init_container = ops.get_default_graph()._container
try:
self._container = container_name
with ops.init_scope():
ops.get_default_graph()._container = container_name
yield self._container
finally:
self._container = original_container
with ops.init_scope():
ops.get_default_graph()._container = original_init_container
# pylint: enable=protected-access
# pylint: enable=g-doc-return-or-yield
def getvar(
self,
getter,
name,
shape=None,
dtype=None,
initializer=None,
reuse=None,
trainable=True,
collections=None, # pylint: disable=redefined-outer-name
use_resource=None,
**kwargs):
"""A custom variable getter."""
# Here, we switch the default graph to the outer graph and ask the
# variable scope in which the function is defined to give us the
# variable. The variable is stashed in extra_vars and returned to
# the caller.
#
# We capture these variables so that the variable definition is
# hoisted upward to the outer most graph.
with self._outer_graph.as_default():
# pylint: disable=protected-access
var = self._vscope.get_variable(
vs._get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
reuse=reuse,
trainable=trainable,
collections=collections,
use_resource=use_resource)
self.extra_vars.append(var)
if (isinstance(var, resource_variable_ops.BaseResourceVariable) and
self._capture_resource_var_by_value):
# For resource-based variables read the variable outside the function
# and pass in the value. This ensures that the function is pure and
# differentiable. TODO(apassos) this may have performance problems if
# the function will only do embedding lookups on the variable.
return var.value()
return var
def create_op(self, op_type, inputs, dtypes=None, **kwargs): # pylint: disable=redefined-outer-name
for i, x in enumerate(inputs):
if isinstance(x, ops.EagerTensor) or x.graph is not self:
inputs[i] = self.capture(x)
return super(_FuncGraph, self).create_op(op_type, inputs,
dtypes=dtypes, **kwargs)
def capture(self, tensor, name=None):
"""Adds the given tensor to this graph and returns the captured tensor."""
if tensor in self._captured:
# Captured already.
return self._captured[tensor]
elif self._capture_by_value:
return self._add_tensor_and_parents(tensor)
else:
return self._capture_tensor_as_extra_input(tensor, name)
def _capture_tensor_as_extra_input(self, tensor, name=None):
# Substitute with a placeholder.
self.extra_inputs.append(tensor)
# Hoist the new input placeholder out of any control flow context
# we're currently in.
with ops.control_dependencies(None):
ph = array_ops.placeholder(
tensor.dtype, shape=tensor.get_shape(), name=name)
# pylint: disable=protected-access
if isinstance(tensor, ops.EagerTensor):
handle_data = tensor._handle_data
if handle_data:
handle_data = handle_data.SerializeToString()
else:
handle_data = c_api.GetHandleShapeAndType(tensor.graph._c_graph,
tensor._as_tf_output())
if handle_data:
c_api.SetHandleShapeAndType(ph.graph._c_graph, ph._as_tf_output(),
compat.as_bytes(handle_data))
# pylint: enable=protected-access
self.inputs.append(ph)
self._captured[tensor] = ph
self.extra_args.append(ph)
if _is_guaranteed_const(tensor):
with ops.control_dependencies(None):
return array_ops.guarantee_const(ph)
else:
return ph
def _add_tensor_and_parents(self, tensor):
op = self._add_op_and_parents(tensor.op)
return op.outputs[tensor.value_index]
def _add_op_and_parents(self, op):
# pylint: disable=protected-access
op_def = graph_to_function_def._get_op_def(op)
# pylint: enable=protected-access
if op_def.is_stateful and op not in self._whitelisted_stateful_ops:
raise ValueError("Cannot capture a stateful node (name:%s, type:%s) "
"by value." % (op.name, op.type))
elif op.type in ("Placeholder", "PlaceholderV2"):
raise ValueError("Cannot capture a placeholder (name:%s, type:%s) "
"by value." % (op.name, op.type))
captured_inputs = [self._add_tensor_and_parents(x) for x in op.inputs]
captured_op = self.create_op(
op.type,
captured_inputs, [o.dtype for o in op.outputs],
name=op.name,
attrs=op.node_def.attr,
op_def=op_def)
for t, captured_t in zip(op.outputs, captured_op.outputs):
self._captured[t] = captured_t
return captured_op
def func_graph_from_py_func(func,
arg_names,
arg_types,
name=None,
capture_by_value=False,
device=None,
colocation_stack=None,
container=None,
collections_ref=None,
arg_shapes=None,
whitelisted_stateful_ops=None,
capture_resource_var_by_value=True):
"""Returns a _FuncGraph generated from `func`.
Args:
func: A Python callable which constructs a TF function body. The arguments
must correspond to `arg_types`. Returns a value or list/tuple of values.
No returned value can be None.
arg_names: A sequence of strings for the function argument names.
arg_types: A sequence of the function's argument types.
name: The function name. If None, the name is derived from `func`.
capture_by_value: boolean. If True, captured values will be copied into the
function body.
device: device name or function.
colocation_stack: A colocation stack (list) the _FuncGraph should use.
container: A container name the _FuncGraph should start with.
collections_ref: A reference to a collections dict the _FuncGraph should
use internally.
arg_shapes: A sequence of the function's argument shapes.
whitelisted_stateful_ops: A set of ops that if stateful we ignore and
re-create.
capture_resource_var_by_value: Boolean (defaults to True). If False,
captured resource variable returns the handle instead of value.
Returns:
A _FuncGraph.
Raises:
ValueError: if func returns None.
"""
if not name:
name = function_utils.get_func_name(func)
func_graph = _FuncGraph(name, capture_by_value, whitelisted_stateful_ops,
capture_resource_var_by_value)
with func_graph.as_default(), ops.device(device):
# pylint: disable=protected-access
if collections_ref is not None:
func_graph._collections = collections_ref
if container is not None:
func_graph._container = container
if colocation_stack is not None:
func_graph._colocation_stack = colocation_stack
# pylint: enable=protected-access
if arg_shapes is None:
arg_shapes = [None] * len(arg_types)
# Create placeholders for the function arguments.
for (argname, argtype, argshape) in zip(arg_names, arg_types, arg_shapes):
argholder = array_ops.placeholder(argtype, shape=argshape, name=argname)
func_graph.inputs.append(argholder)
# Call func and gather the output tensors.
with vs.variable_scope("", custom_getter=func_graph.getvar):
outputs = func(*func_graph.inputs)
# There is no way of distinguishing between a function not returning
# anything and a function returning None in Python.
# We need to allow the former and ideally want to forbid the latter as
# it is most likely user error.
# TODO(iga): Consider adding a @NoOutput decorator on top of @Defun to
# allow users to explicitly mark the function as not returning anything.
# For now, we allow a single None return and interpret it as a function
# with no output.
if outputs is None:
outputs = []
else:
# If func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
if any(_ is None for _ in outputs):
raise ValueError("Function %s can not return None." % name)
# Ensures each output is a Tensor in the function graph.
outputs = [ops.convert_to_tensor(t) for t in outputs]
outputs = [func_graph.capture(t) if t.graph is not func_graph else t
for t in outputs]
func_graph.outputs = outputs
return func_graph
def _is_guaranteed_const(tensor):
"""Determines whether `tensor` is guaranteed to be a constant.
A tensor is guaranteed to be a constant if either it was produced by
a `GuaranteeConst` op or if all of its children are guaranteed to be
constants.
Args:
tensor: The tensor for which to determine const-ness.
Returns:
True if `tensor` is guaranteed to be a constant, False otherwise.
"""
if isinstance(tensor, ops.EagerTensor):
return False
class Work(object):
def __init__(self, op, leaving):
self.op = op
self.leaving = leaving
is_guaranteed_const = lambda op: op.node_def.op == "GuaranteeConst"
constants = set([])
def all_inputs_const(op):
# If all inputs of an op are guaranteed constants, then we can infer that
# the op produces a constant as well.
return op.inputs and all(inp.op in constants for inp in op.inputs)
visited = set([])
stack = [Work(tensor.op, leaving=False)]
while stack:
work = stack.pop()
if work.leaving:
if all_inputs_const(work.op):
constants.add(work.op)
continue
visited.add(work.op)
if is_guaranteed_const(work.op):
constants.add(work.op)
continue
# This op will be revisited after all its inputs are checked for const-ness.
stack.append(Work(work.op, leaving=True))
for inp in work.op.inputs:
if inp.op not in visited:
stack.append(Work(inp.op, leaving=False))
return tensor.op in constants
def _call(sig, *inputs, **kwargs):
"""Adds a node calling a function.
This adds a `call` op to the default graph that calls the function
of signature `sig`, passing the tensors in `inputs` as arguments.
It returns the outputs of the call, which are one or more tensors.
`sig` is OpDefArg.a `_DefinedFunction` object.
You can pass an optional keyword parameter `name=string` to name the
added operation.
You can pass an optional keyword parameter `noinline=True|False` to
instruct the runtime not to inline the function body into the call
site.
Args:
sig: OpDefArg. The signature of the function.
*inputs: arguments to the function.
**kwargs: Optional keyword arguments. Can only contain 'name' or
'noinline'.
Returns:
A 2-element tuple. First element: a Tensor if the function returns a single
value; a list of Tensors if the function returns multiple value; the
Operation if the function returns no values. Second element: the Operation.
Raises:
ValueError: if the arguments are invalid.
"""
if len(inputs) != len(sig.input_arg):
raise ValueError("Expected number of arguments: %d, received: %d" % (len(
sig.input_arg), len(inputs)))
name = kwargs.pop("name", None)
g = ops.get_default_graph()
func_name = sig.name
if name is None:
name = func_name
attrs = _parse_kwargs_as_attrs(func_name, **kwargs)
output_types = [dtypes.DType(x.type) for x in sig.output_arg]
op = g.create_op(
func_name, list(inputs), output_types, name=name, attrs=attrs, op_def=sig)
if op.outputs:
if len(op.outputs) == 1:
ret = op.outputs[0]
else:
ret = tuple(op.outputs)
else:
ret = op
return ret, op
def _from_definition(fdef, grad_func=None):
"""Creates a _DefinedFunction initialized from a FunctionDef proto.
Args:
fdef: a FunctionDef
grad_func: a _DefinedFunction or None
Returns:
A _DefinedFunction representing fdef
"""
# TODO(iga): This method does major surgery on _DefinedFunction.
# Make it a named constructor using @classmethod of _DefinedFunction.
# The Python callable is only needed to create a FunctionDef. Since we have
# the FunctionDef here, we don't need to set _DefinedFunction._func (nor do we
# have access to such a callable here).
func = None
argnames = [arg.name for arg in fdef.signature.input_arg]
input_types = tuple(
dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg)
func_name = fdef.signature.name
# Note: FunctionDefs do not include python gradient functions, so if the
# original _DefinedFunction included one it will not be reflected here.
python_grad_func = None
out_names = [arg.name for arg in fdef.signature.output_arg]
result = _DefinedFunction(func, argnames, input_types, func_name, grad_func,
python_grad_func, out_names)
# pylint: disable=protected-access
serialized = fdef.SerializeToString()
c_func = c_api.TF_FunctionImportFunctionDef(serialized)
result._c_func = c_api_util.ScopedTFFunction(c_func)
result._extra_inputs = []
result._op_def = fdef.signature
# pylint: enable=protected-access
return result
def from_library(lib):
"""Creates _DefinedFunctions initialized from a FunctionDefLibrary proto.
This method handles assigning the correct gradient functions to each
function.
Args:
lib: a FunctionDefLibrary
Returns:
A list of _DefinedFunctions
Raises:
ValueError: `lib` is invalid
"""
if not lib.function and not lib.gradient:
return []
# function name -> FunctionDef proto
funcs = {fdef.signature.name: fdef for fdef in lib.function}
# Validate that all references function names have function defs
for g in lib.gradient:
if g.function_name not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.function_name, str(lib)))
if g.gradient_func not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.gradient_func, str(lib)))
# function name -> gradient function name
func_to_grad = collections.defaultdict(lambda: None)
# gradient function name -> names of functions having that grad function
grad_to_funcs = collections.defaultdict(list)
for gdef in lib.gradient:
func_to_grad[gdef.function_name] = gdef.gradient_func
grad_to_funcs[gdef.gradient_func].append(gdef.function_name)
# Start with functions without gradients
ready = [
fdef for fdef in lib.function if func_to_grad[fdef.signature.name] is None
]
if not ready:
raise ValueError(
"FunctionDefLibrary contains cyclic gradient functions!\n" + str(lib))
# function name -> _DefinedFunction
initialized = {}
while ready:
fdef = ready.pop()
name = fdef.signature.name
grad = initialized.get(func_to_grad[name])
if func_to_grad[name]:
assert grad
defined_func = _from_definition(fdef, grad_func=grad)
initialized[name] = defined_func
ready.extend(funcs[f] for f in grad_to_funcs[name])
return initialized.values()
def _get_experimental_kwarg_as_attr(attr_name, value):
"""Creates an AttrValue for a python object."""
if isinstance(value, bool):
return attr_value_pb2.AttrValue(b=value)
elif isinstance(value, int):
return attr_value_pb2.AttrValue(i=value)
elif isinstance(value, float):
return attr_value_pb2.AttrValue(f=value)
elif isinstance(value, str):
return attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError("Unsupported attribute type for %s with type %s" %
(attr_name, type(value)))
def _parse_kwargs_as_attrs(func_name, **kwargs):
"""Parses **kwargs into a node's attributes."""
attrs = {}
noinline = kwargs.pop("noinline", None)
if noinline is not None:
attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline))
# For compatibility with previous behavior, Defun does not perform shape
# inference through its function call operations.
attrs["_disable_call_shape_inference"] = attr_value_pb2.AttrValue(b=True)
compiled = kwargs.pop("compiled", None)
separate_compiled_gradients = kwargs.pop("separate_compiled_gradients", None)
if compiled is not None:
attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled))
attrs["_XlaSeparateCompiledGradients"] = attr_value_pb2.AttrValue(
b=bool(separate_compiled_gradients))
# Forward _XlaScope from enclosing context (if set), otherwise create new.
# pylint: disable=protected-access
if "_XlaScope" in ops.get_default_graph()._attr_scope_map:
attrs["_XlaScope"] = ops.get_default_graph()._attr_scope_map["_XlaScope"]
else:
attrs["_XlaScope"] = attr_value_pb2.AttrValue(
s=("function_%s" % func_name).encode())
# pylint: enable=protected-access
kwargs_keys = list(kwargs.keys())
for key in kwargs_keys:
if key.startswith("experimental_"):
attrs[key] = _get_experimental_kwarg_as_attr(key, kwargs[key])
del kwargs[key]
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % kwargs.keys())
return attrs
def get_extra_vars():
"""Returns the captured variables by the function.
Returns:
If the default graph is being used to define a function, the
returned list of variables are those created inside the function
body so far. Otherwise, returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_vars
else:
return []
def get_extra_inputs():
"""Returns the captured input tensors by the function.
Returns:
If the default graph is being used to define a function, the
returned list of tensors are those accessed inside the function body
but defined outside the function body so far. Otherwise, returns an
empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_inputs
else:
return []
def get_extra_args():
"""Returns the corresponding function arguments for the captured inputs.
Returns:
If the default graph is being used to define a function, the
returned list of place holders are those used inside the function
body corresponding those returned by get_extra_inputs(). Otherwise,
returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_args
else:
return []
def _type_list_to_str(types):
if any(_ not in _DTYPE_TO_STR for _ in types):
raise ValueError("Unsupported dtypes: %s" % types)
return "".join([_DTYPE_TO_STR[_] for _ in types])
# NOTE: The list needs to be extended when more data types are added.
_DTYPE_TO_STR = {
dtypes.float16: "f16",
dtypes.float32: "f32",
dtypes.float64: "f64",
dtypes.int32: "i32",
dtypes.uint8: "i8",
dtypes.uint16: "u16",
dtypes.uint32: "u32",
dtypes.uint64: "u64",
dtypes.int16: "i16",
dtypes.int8: "i8",
dtypes.string: "s",
dtypes.complex64: "c64",
dtypes.complex128: "c128",
dtypes.int64: "i64",
dtypes.bool: "b",
dtypes.qint8: "qi8",
dtypes.quint8: "qu8",
dtypes.qint16: "qi16",
dtypes.quint16: "qu16",
dtypes.qint32: "qi32",
dtypes.bfloat16: "b16"
}
def function_def_from_tf_function(c_func):
"""Converts a SWIG-wrapped TF_Function* to a FunctionDef proto."""
with c_api_util.tf_buffer() as buf:
c_api.TF_FunctionToFunctionDef(c_func, buf)
data = c_api.TF_GetBuffer(buf)
fdef = function_pb2.FunctionDef()
fdef.ParseFromString(compat.as_bytes(data))
return fdef
|
tensorflow-master
|
tensorflow/python/framework/function.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.framework.meta_graph.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os.path
import random
import shutil
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner_impl
# pylint: disable=invalid-name
def _TestDir(test_name):
test_dir = os.path.join(test.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
gfile.MakeDirs(test_dir)
return test_dir
# pylint: enable=invalid-name
class SimpleMetaGraphTest(test.TestCase):
@test_util.run_deprecated_v1
def testNoVariables(self):
test_dir = _TestDir("no_variables")
filename = os.path.join(test_dir, "metafile")
input_feed_value = -10 # Arbitrary input value for feed_dict.
orig_graph = ops.Graph()
with self.session(graph=orig_graph) as sess:
# Create a minimal graph with zero variables.
input_tensor = array_ops.placeholder(
dtypes.float32, shape=[], name="input")
offset = constant_op.constant(42, dtype=dtypes.float32, name="offset")
output_tensor = math_ops.add(input_tensor, offset, name="add_offset")
# Add input and output tensors to graph collections.
ops.add_to_collection("input_tensor", input_tensor)
ops.add_to_collection("output_tensor", output_tensor)
output_value = sess.run(output_tensor, {input_tensor: input_feed_value})
self.assertEqual(output_value, 32)
# Generates MetaGraphDef.
meta_graph_def, var_list = meta_graph.export_scoped_meta_graph(
filename=filename,
graph_def=ops.get_default_graph().as_graph_def(add_shapes=True),
collection_list=["input_tensor", "output_tensor"],
saver_def=None)
self.assertTrue(meta_graph_def.HasField("meta_info_def"))
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, "")
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,
"")
self.assertEqual({}, var_list)
# Create a clean graph and import the MetaGraphDef nodes.
new_graph = ops.Graph()
with self.session(graph=new_graph) as sess:
# Import the previously export meta graph.
meta_graph.import_scoped_meta_graph(filename)
# Re-exports the current graph state for comparison to the original.
new_meta_graph_def, _ = meta_graph.export_scoped_meta_graph(filename +
"_new")
test_util.assert_meta_graph_protos_equal(self, meta_graph_def,
new_meta_graph_def)
# Ensures that we can still get a reference to our graph collections.
new_input_tensor = ops.get_collection("input_tensor")[0]
new_output_tensor = ops.get_collection("output_tensor")[0]
# Verifies that the new graph computes the same result as the original.
new_output_value = sess.run(new_output_tensor,
{new_input_tensor: input_feed_value})
self.assertEqual(new_output_value, output_value)
@test_util.run_deprecated_v1
def testStrippedOpListNestedFunctions(self):
with self.cached_session():
# Square two levels deep
@function.Defun(dtypes.int32)
def f0(x):
return math_ops.square(x)
@function.Defun(dtypes.int32)
def f1(x):
return f0(x)
# At this point we've defined two functions but haven't called them, so
# there should be no used ops.
op_list = meta_graph.stripped_op_list_for_graph(ops.get_default_graph()
.as_graph_def())
self.assertEqual(len(op_list.op), 0)
# If we call the function on a constant, there should be two ops
_ = f1(constant_op.constant(7))
op_list = meta_graph.stripped_op_list_for_graph(ops.get_default_graph()
.as_graph_def())
self.assertEqual(["Const", "Square"], [op.name for op in op_list.op])
def testStrippedOpListRecursiveFunctions(self):
# The function module doesn't support recursive functions, so we build a
# recursive function situation by ourselves: A calls B calls A and Const.
graph = graph_pb2.GraphDef()
a = graph.library.function.add()
b = graph.library.function.add()
a.signature.name = "A"
b.signature.name = "B"
a.node_def.add().op = "B"
b.node_def.add().op = "Const"
b.node_def.add().op = "A"
# Use A in the graph
graph.node.add().op = "A"
# The stripped op list should contain just Const.
op_list = meta_graph.stripped_op_list_for_graph(graph)
self.assertEqual(["Const"], [op.name for op in op_list.op])
@test_util.run_deprecated_v1
def testDefaultAttrStripping(self):
"""Verifies that default attributes are stripped from a graph def."""
# Complex Op has 2 attributes with defaults:
# o "T" : float32.
# o "Tout" : complex64.
# When inputs to the Complex Op are float32 instances, "T" maps to float32
# and "Tout" maps to complex64. Since these attr values map to their
# defaults, they must be stripped unless stripping of default attrs is
# disabled.
with self.cached_session():
real_num = constant_op.constant(1.0, dtype=dtypes.float32, name="real")
imag_num = constant_op.constant(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
# strip_default_attrs is enabled.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
# strip_default_attrs is disabled.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=False)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
self.assertFalse(meta_graph_def.meta_info_def.stripped_default_attrs)
# When inputs to the Complex Op are float64 instances, "T" maps to float64
# and "Tout" maps to complex128. Since these attr values don't map to their
# defaults, they must not be stripped.
with self.session(graph=ops.Graph()):
real_num = constant_op.constant(1.0, dtype=dtypes.float64, name="real")
imag_num = constant_op.constant(2.0, dtype=dtypes.float64, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertEqual(node_def.attr["T"].type, dtypes.float64)
self.assertEqual(node_def.attr["Tout"].type, dtypes.complex128)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
@test_util.run_deprecated_v1
def testDefaultAttrStrippingNestedFunctions(self):
"""Verifies that default attributes are stripped from function node defs."""
with self.cached_session():
@function.Defun(dtypes.float32, dtypes.float32)
def f0(i, j):
return math_ops.complex(i, j, name="double_nested_complex")
@function.Defun(dtypes.float32, dtypes.float32)
def f1(i, j):
return f0(i, j)
_ = f1(constant_op.constant(1.0), constant_op.constant(2.0))
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=True)
double_nested_complex_node_def = None
for function_def in meta_graph_def.graph_def.library.function:
for node_def in function_def.node_def:
if node_def.name.startswith("double_nested_complex"):
double_nested_complex_node_def = node_def
break
if double_nested_complex_node_def:
break
self.assertIsNotNone(double_nested_complex_node_def)
self.assertNotIn("T", double_nested_complex_node_def.attr)
self.assertNotIn("Tout", double_nested_complex_node_def.attr)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
def testDefaultAttrStrippingUnregisteredOps(self):
"""Verifies that nodes with un-registered ops are not stripped."""
graph_def = graph_pb2.GraphDef()
node = graph_def.node.add()
node.name = "node_with_unreg_op"
node.op = "unreg_op"
node.attr["attr_1"].i = 1
meta_info_def = meta_graph_pb2.MetaGraphDef.MetaInfoDef()
meta_info_def.stripped_op_list.op.add()
with self.cached_session():
meta_graph_def = meta_graph.create_meta_graph_def(
meta_info_def=meta_info_def, graph_def=graph_def,
strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("node_with_unreg_op",
meta_graph_def.graph_def)
self.assertEqual(node_def.attr["attr_1"].i, 1)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
@test_util.run_deprecated_v1
def testVariableObjectsAreSharedAmongCollections(self):
with ops.Graph().as_default() as graph1:
v = variables.Variable(3.0)
# A single instance of Variable is shared among the collections:
global_vars = graph1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
trainable_vars = graph1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(global_vars), 1)
self.assertEqual(len(trainable_vars), 1)
self.assertIs(global_vars[0], trainable_vars[0])
self.assertIs(v, global_vars[0])
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(graph=graph1)
del graph1 # To avoid accidental references in code involving graph2.
with ops.Graph().as_default() as graph2:
meta_graph.import_scoped_meta_graph(orig_meta_graph)
global_vars = graph2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
trainable_vars = graph2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(global_vars), 1)
self.assertEqual(len(trainable_vars), 1)
# A single instance of Variable is shared among the collections:
self.assertIs(global_vars[0], trainable_vars[0])
@test_util.run_deprecated_v1
def testMetricVariablesCollectionLoadsBytesList(self):
with ops.Graph().as_default() as graph1:
v1 = variables.Variable(
[1, 2, 3], shape=[3], dtype=dtypes.float64, name="v")
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(graph=graph1)
# Copy bytes list from global variables collection to metric variables.
orig_meta_graph.collection_def[ops.GraphKeys.METRIC_VARIABLES].CopyFrom(
orig_meta_graph.collection_def["variables"])
with ops.Graph().as_default() as graph2:
meta_graph.import_scoped_meta_graph(orig_meta_graph)
var_list = graph2.get_collection(ops.GraphKeys.METRIC_VARIABLES)
self.assertEqual(len(var_list), 1)
v2 = var_list[0]
self.assertIsInstance(v2, variables.Variable)
self.assertEqual(v1.name, v2.name)
self.assertEqual(v1.dtype, v2.dtype)
self.assertEqual(v1.shape, v2.shape)
class ScopedMetaGraphTest(test.TestCase):
def _testScopedExport(self, test_dir, exported_filenames):
graph = ops.Graph()
with graph.as_default():
# Creates an inference graph.
# Hidden 1
colocate_constraint = constant_op.constant(1.2, name="constraint")
images = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with ops.name_scope("hidden1"):
with graph.colocate_with(colocate_constraint.op):
weights1 = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
biases1 = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)
# Hidden 2
with ops.name_scope("hidden2"):
weights2 = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases2):
biases2 += constant_op.constant(0.1, shape=[32])
return it + 1, biases2
_, biases2 = control_flow_ops.while_loop(
loop_cond,
loop_body, [
constant_op.constant(0), variables.Variable(
array_ops.zeros([32]), name="biases")
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)
# Linear
with ops.name_scope("softmax_linear"):
weights3 = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases3 = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights3) + biases3
ops.add_to_collection("logits", logits)
# Exports each sub-graph.
# Exports the first one with unbound_inputs_col_name set to default.
orig_meta_graph1, var_list = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[0]),
graph=ops.get_default_graph(),
export_scope="hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["hidden1/biases:0", "hidden1/weights:0"],
sorted(var_names))
# Exports the rest with no unbound_inputs_col_name.
orig_meta_graph2, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[1]),
graph=ops.get_default_graph(),
export_scope="hidden2",
unbound_inputs_col_name=None)
orig_meta_graph3, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[2]),
graph=ops.get_default_graph(),
export_scope="softmax_linear",
unbound_inputs_col_name=None)
return [orig_meta_graph1, orig_meta_graph2, orig_meta_graph3]
def _testScopedImport(self, test_dir, exported_filenames):
graph = ops.Graph()
# Create all the missing inputs.
with graph.as_default():
new_image = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
import_scope="new_hidden1")
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
input_map={"image:0": new_image},
import_scope="new_hidden1")
# Verifies we can import the original "hidden1" into "new_hidden1".
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
input_map={"$unbound_inputs_images": new_image},
import_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["new_hidden1/biases:0", "new_hidden1/weights:0"],
sorted(new_var_names))
# Verifies we can import the original "hidden2" into "new_hidden2".
hidden1 = array_ops.identity(
graph.as_graph_element("new_hidden1/Relu:0"), name="hidden1/Relu")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[1]),
graph=graph,
input_map={"$unbound_inputs_hidden1/Relu": hidden1},
import_scope="new_hidden2",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["new_hidden2/biases:0", "new_hidden2/weights:0"],
sorted(new_var_names))
# Verifies we can import the original "softmax_linear" into
# "new_softmax_linear".
hidden2 = array_ops.identity(
graph.as_graph_element("new_hidden2/Relu:0"), name="hidden2/Relu")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[2]),
graph=graph,
input_map={"$unbound_inputs_hidden2/Relu": hidden2},
import_scope="new_softmax_linear",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(
["new_softmax_linear/biases:0", "new_softmax_linear/weights:0"],
sorted(new_var_names))
# Exports the scoped meta graphs again.
new_meta_graph1, var_list = meta_graph.export_scoped_meta_graph(
graph=graph, export_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_meta_graph2, var_list = meta_graph.export_scoped_meta_graph(
graph=graph, export_scope="new_hidden2", unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_meta_graph3, var_list = meta_graph.export_scoped_meta_graph(
graph=graph,
export_scope="new_softmax_linear",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
return [new_meta_graph1, new_meta_graph2, new_meta_graph3]
# Verifies that we can export the subgraph under each layer and import
# them into new layers in a new graph.
@test_util.run_deprecated_v1
def testScopedExportAndImport(self):
test_dir = _TestDir("scoped_export_import")
filenames = [
"exported_hidden1.pbtxt", "exported_hidden2.pbtxt",
"exported_softmax_linear.pbtxt"
]
orig_meta_graphs = self._testScopedExport(test_dir, filenames)
new_meta_graphs = self._testScopedImport(test_dir, filenames)
for a, b in zip(orig_meta_graphs, new_meta_graphs):
# The unbound input strings are slightly different with the C API enabled
# ("images" vs "images:0") due to the original import_graph_def code
# vs. ImportGraphDef in C++.
# TODO(skyewm): update the pbtxts once _USE_C_API is removed.
del a.collection_def["unbound_inputs"]
del b.collection_def["unbound_inputs"]
test_util.assert_meta_graph_protos_equal(self, a, b)
def testWhileLoopGradients(self):
# Create a simple while loop.
with ops.Graph().as_default():
with ops.name_scope("export"):
var = variables.Variable(0.)
var_name = var.name
_, output = control_flow_ops.while_loop(
lambda i, x: i < 5,
lambda i, x: (i + 1, x + math_ops.cast(i, dtypes.float32)),
[0, var])
output_name = output.name
# Generate a MetaGraphDef containing the while loop with an export scope.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
export_scope="export")
# Build and run the gradients of the while loop. We use this below to
# verify that the gradients are correct with the imported MetaGraphDef.
init_op = variables.global_variables_initializer()
grad = gradients_impl.gradients([output], [var])
with session.Session() as sess:
self.evaluate(init_op)
expected_grad_value = self.evaluate(grad)
# Restore the MetaGraphDef into a new Graph with an import scope.
with ops.Graph().as_default():
meta_graph.import_scoped_meta_graph(meta_graph_def, import_scope="import")
# Re-export and make sure we get the same MetaGraphDef.
new_meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
export_scope="import")
test_util.assert_meta_graph_protos_equal(
self, meta_graph_def, new_meta_graph_def)
# Make sure we can still build gradients and get the same result.
def new_name(tensor_name):
base_tensor_name = tensor_name.replace("export/", "")
return "import/" + base_tensor_name
var = ops.get_default_graph().get_tensor_by_name(new_name(var_name))
output = ops.get_default_graph().get_tensor_by_name(new_name(output_name))
grad = gradients_impl.gradients([output], [var])
init_op = variables.global_variables_initializer()
with session.Session() as sess:
self.evaluate(init_op)
actual_grad_value = self.evaluate(grad)
self.assertEqual(expected_grad_value, actual_grad_value)
@test_util.run_v1_only("b/120545219")
def testImportWhileLoopInWhileLoop(self):
# Create a simple while loop.
with ops.Graph().as_default():
var = variables.Variable(0.0)
_, output = control_flow_ops.while_loop(lambda i, x: i < 5,
lambda i, x: (i + 1, x * 2.0),
[0, var])
output_name = output.name
# Generate a MetaGraphDef containing the while loop with an export scope.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph()
# Restore the MetaGraphDef in a while loop in a new graph.
with ops.Graph().as_default():
def body(i, _):
meta_graph.import_scoped_meta_graph(meta_graph_def)
return i + 1, ops.get_default_graph().get_tensor_by_name(output_name)
_, x = control_flow_ops.while_loop(lambda i, x: i < 2, body, [0, 0.0],
name="")
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
self.evaluate(x)
@test_util.run_deprecated_v1
def testScopedImportUnderNameScope(self):
graph = ops.Graph()
with graph.as_default():
variables.Variable(initial_value=1.0, trainable=True, name="myvar")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(graph=graph)
graph = ops.Graph()
with graph.as_default():
with ops.name_scope("foo"):
imported_variables = meta_graph.import_scoped_meta_graph(
meta_graph_def, import_scope="bar")
self.assertEqual(len(imported_variables), 1)
self.assertEqual(list(imported_variables.values())[0].name,
"foo/bar/myvar:0")
@test_util.run_deprecated_v1
def testScopedImportUnderNameScopeNoVarScope(self):
graph = ops.Graph()
with graph.as_default():
variables.Variable(initial_value=1.0, trainable=True, name="myvar")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(graph=graph)
graph = ops.Graph()
with graph.as_default():
with ops.name_scope("foo"):
imported_variables = meta_graph.import_scoped_meta_graph(
meta_graph_def)
self.assertEqual(len(imported_variables), 1)
self.assertEqual(list(imported_variables.values())[0].name,
"foo/myvar:0")
def testImportsUsingSameScopeName(self):
with ops.Graph().as_default():
variables.Variable(0, name="v")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph()
with ops.Graph().as_default():
for suffix in ["", "_1"]:
imported_variables = meta_graph.import_scoped_meta_graph(
meta_graph_def, import_scope="s")
self.assertEqual(len(imported_variables), 1)
self.assertEqual(list(imported_variables.keys())[0], "v:0")
self.assertEqual(list(imported_variables.values())[0].name,
"s" + suffix + "/v:0")
@test_util.run_deprecated_v1
def testScopedImportWithSelectedCollections(self):
meta_graph_filename = os.path.join(
_TestDir("selected_collections_import"), "meta_graph.pb")
graph = ops.Graph()
# Add a variable to populate two collections. The functionality tested is
# not specific to variables, but using variables in the test is convenient.
with graph.as_default():
variables.Variable(initial_value=1.0, trainable=True)
self.assertTrue(
all(
graph.get_collection(key)
for key in
[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES]
))
meta_graph.export_scoped_meta_graph(
filename=meta_graph_filename, graph=graph)
def _test_import(include_collection_keys, omit_collection_keys):
assert set(include_collection_keys).isdisjoint(omit_collection_keys)
newgraph = ops.Graph()
import_scope = "some_scope_name"
def _restore_collections_predicate(collection_key):
return (collection_key in include_collection_keys and
collection_key not in omit_collection_keys)
meta_graph.import_scoped_meta_graph(
meta_graph_filename,
graph=newgraph,
import_scope=import_scope,
restore_collections_predicate=_restore_collections_predicate)
collection_values = [
newgraph.get_collection(name=key, scope=import_scope)
for key in include_collection_keys
]
self.assertTrue(all(collection_values))
collection_values = [
newgraph.get_collection(name=key, scope=import_scope)
for key in omit_collection_keys
]
self.assertFalse(any(collection_values))
_test_import(
include_collection_keys=[
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES
],
omit_collection_keys=[])
_test_import(
include_collection_keys=[ops.GraphKeys.GLOBAL_VARIABLES],
omit_collection_keys=[ops.GraphKeys.TRAINABLE_VARIABLES])
_test_import(
include_collection_keys=[ops.GraphKeys.TRAINABLE_VARIABLES],
omit_collection_keys=[ops.GraphKeys.GLOBAL_VARIABLES])
_test_import(
include_collection_keys=[],
omit_collection_keys=[
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES
])
def _testScopedExportWithQueue(self, test_dir, exported_filename):
graph = ops.Graph()
with graph.as_default():
with ops.name_scope("queue1"):
input_queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
enqueue = input_queue.enqueue((9876), name="enqueue")
close = input_queue.close(name="close")
qr = queue_runner_impl.QueueRunner(input_queue, [enqueue], close)
queue_runner_impl.add_queue_runner(qr)
input_queue.dequeue(name="dequeue")
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filename),
graph=ops.get_default_graph(),
export_scope="queue1")
return orig_meta_graph
def _testScopedImportWithQueue(self, test_dir, exported_filename,
new_exported_filename):
graph = ops.Graph()
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filename),
graph=graph,
import_scope="new_queue1")
graph.as_graph_element("new_queue1/dequeue:0")
graph.as_graph_element("new_queue1/close")
with graph.as_default():
new_meta_graph, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, new_exported_filename),
graph=graph,
export_scope="new_queue1")
return new_meta_graph
# Verifies that we can export the subgraph containing a FIFOQueue under
# "queue1" and import it into "new_queue1" in a new graph.
@test_util.run_deprecated_v1
def testScopedWithQueue(self):
test_dir = _TestDir("scoped_with_queue")
orig_meta_graph = self._testScopedExportWithQueue(test_dir,
"exported_queue1.pbtxt")
new_meta_graph = self._testScopedImportWithQueue(
test_dir, "exported_queue1.pbtxt", "exported_new_queue1.pbtxt")
test_util.assert_meta_graph_protos_equal(self, orig_meta_graph,
new_meta_graph)
def testExportDebugInfo(self):
graph1 = ops.Graph()
with graph1.as_default():
with ops.name_scope("hidden1/hidden2/hidden3"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
name="weights")
biases1 = resource_variable_ops.ResourceVariable(
[0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
debug_info_def = meta_graph.create_graph_debug_info_def(
operations=graph1.get_operations())
# The unique file names in all the stack traces should be larger or equal
# than 1.
self.assertTrue(len(debug_info_def.files) >= 1)
# All the nodes from the exported graphdef are included.
self.assertEqual(len(debug_info_def.traces), len(graph1.get_operations()))
# Verifies that we can export a subgraph in a nested name scope containing a
# "hidden1/hidden2" and import it into "new_hidden1/new_hidden2" in a new
# graph.
def doTestExportNestedNames(self, use_resource=False):
graph1 = ops.Graph()
with graph1.as_default():
with ops.name_scope("hidden1/hidden2/hidden3"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
if use_resource:
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = resource_variable_ops.ResourceVariable(
[0.1] * 3, name="biases")
else:
biases1 = variables.Variable([0.1] * 3, name="biases")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
orig_meta_graph, var_list = meta_graph.export_scoped_meta_graph(
export_scope="hidden1/hidden2", graph=graph1)
var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["hidden3/biases:0", "hidden3/weights:0"],
sorted(var_list.keys()))
self.assertEqual([
"hidden1/hidden2/hidden3/biases:0", "hidden1/hidden2/hidden3/weights:0"
], sorted(var_names))
for node in orig_meta_graph.graph_def.node:
self.assertTrue(node.name.startswith("hidden3"))
graph2 = ops.Graph()
new_var_list = meta_graph.import_scoped_meta_graph(
orig_meta_graph, import_scope="new_hidden1/new_hidden2", graph=graph2)
self.assertEqual(["hidden3/biases:0", "hidden3/weights:0"],
sorted(new_var_list.keys()))
new_var_names = [v.name for _, v in new_var_list.items()]
self.assertEqual([
"new_hidden1/new_hidden2/hidden3/biases:0",
"new_hidden1/new_hidden2/hidden3/weights:0"
], sorted(new_var_names))
nodes = [
"new_hidden1/new_hidden2/hidden3/biases/Assign",
"new_hidden1/new_hidden2/hidden3/weights/Assign"
]
expected = [
b"loc:@new_hidden1/new_hidden2/hidden3/biases",
b"loc:@new_hidden1/new_hidden2/hidden3/weights"
]
for n, e in zip(nodes, expected):
self.assertEqual([e], graph2.get_operation_by_name(n).get_attr("_class"))
@test_util.run_deprecated_v1
def testExportNestedNames(self):
self.doTestExportNestedNames(use_resource=False)
@test_util.run_deprecated_v1
def testExportNestedNamesResource(self):
self.doTestExportNestedNames(use_resource=True)
@test_util.run_deprecated_v1
def testPotentialCycle(self):
graph1 = ops.Graph()
with graph1.as_default():
a = constant_op.constant(1.0, shape=[2, 2])
b = constant_op.constant(2.0, shape=[2, 2])
matmul = math_ops.matmul(a, b)
with ops.name_scope("hidden1"):
c = nn_ops.relu(matmul)
d = constant_op.constant(3.0, shape=[2, 2])
matmul = math_ops.matmul(c, d)
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
export_scope="hidden1", graph=graph1)
graph2 = ops.Graph()
with graph2.as_default():
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
orig_meta_graph, import_scope="new_hidden1")
meta_graph.import_scoped_meta_graph(
orig_meta_graph,
import_scope="new_hidden1",
input_map={
"$unbound_inputs_MatMul": constant_op.constant(
4.0, shape=[2, 2])
})
@test_util.run_deprecated_v1
def testClearDevices(self):
graph1 = ops.Graph()
with graph1.as_default():
with ops.device("/device:CPU:0"):
a = variables.Variable(
constant_op.constant(
1.0, shape=[2, 2]), name="a")
with ops.device("/job:ps/replica:0/task:0/device:GPU:0"):
b = variables.Variable(
constant_op.constant(
2.0, shape=[2, 2]), name="b")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
math_ops.matmul(a, b, name="matmul")
self.assertEqual("/device:CPU:0", str(graph1.as_graph_element("a").device))
self.assertEqual("/job:ps/replica:0/task:0/device:GPU:0",
str(graph1.as_graph_element("b").device))
self.assertEqual("/job:localhost/replica:0/task:0/device:CPU:0",
str(graph1.as_graph_element("matmul").device))
# Verifies that devices are cleared on export.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph=graph1, clear_devices=True)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=False)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
# Verifies that devices are cleared on export when passing in graph_def.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph_def=graph1.as_graph_def(), clear_devices=True)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=False)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
# Verifies that devices are cleared on import.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph=graph1, clear_devices=False)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=True)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
class MetaGraphWithVariableScopeTest(test.TestCase):
@test_util.run_deprecated_v1
def testMetricsCollection(self):
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
meta_graph_filename = os.path.join(
_TestDir("metrics_export"), "meta_graph.pb")
graph = ops.Graph()
with self.session(graph=graph) as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
_, update_op = metrics.mean(values)
initializer = variables.local_variables_initializer()
self.evaluate(initializer)
self.evaluate(update_op)
meta_graph.export_scoped_meta_graph(
filename=meta_graph_filename, graph=graph)
# Verifies that importing a meta_graph with LOCAL_VARIABLES collection
# works correctly.
graph = ops.Graph()
with self.session(graph=graph) as sess:
meta_graph.import_scoped_meta_graph(meta_graph_filename)
initializer = variables.local_variables_initializer()
self.evaluate(initializer)
# Verifies that importing an old meta_graph where "local_variables"
# collection is of node_list type works, but cannot build initializer
# with the collection.
graph = ops.Graph()
with self.session(graph=graph) as sess:
meta_graph.import_scoped_meta_graph(
test.test_src_dir_path(
"python/framework/testdata/metrics_export_meta_graph.pb"))
self.assertEqual(len(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)),
2)
with self.assertRaisesRegexp(
AttributeError, "'Tensor' object has no attribute 'initializer'"):
initializer = variables.local_variables_initializer()
class ExportImportAcrossScopesTest(test.TestCase):
@test_util.run_deprecated_v1
def testPartitionedVariables(self):
def make_graph_with_partitioned_variables(use_resource):
variable_scope.get_variable(
name="weights",
partitioner=partitioned_variables.fixed_size_partitioner(3, axis=0),
initializer=random_ops.truncated_normal([100, 10]),
use_resource=use_resource)
# The next variable illustrates the necessity of restoring collections
# in a deterministic fashion when using ResourceVariables.
variable_scope.get_variable(
name="another",
shape=[],
collections=["a", "b", "z", "f", "e", "d", "g"],
use_resource=use_resource)
self._testExportImportAcrossScopes(
make_graph_with_partitioned_variables, use_resource=False)
self._testExportImportAcrossScopes(
make_graph_with_partitioned_variables, use_resource=True)
def _testExportImportAcrossScopes(self, graph_fn, use_resource):
"""Tests export and importing a graph across scopes.
Args:
graph_fn: A closure that creates a graph on the current scope.
use_resource: A bool indicating whether or not to use ResourceVariables.
"""
with ops.Graph().as_default() as original_graph:
with variable_scope.variable_scope("dropA/dropB/keepA"):
graph_fn(use_resource=use_resource)
exported_meta_graph_def = meta_graph.export_scoped_meta_graph(
graph=original_graph,
export_scope="dropA/dropB")[0]
with ops.Graph().as_default() as imported_graph:
meta_graph.import_scoped_meta_graph(
exported_meta_graph_def,
import_scope="importA")
with ops.Graph().as_default() as expected_graph:
with variable_scope.variable_scope("importA/keepA"):
graph_fn(use_resource=use_resource)
result = meta_graph.export_scoped_meta_graph(graph=imported_graph)[0]
expected = meta_graph.export_scoped_meta_graph(graph=expected_graph)[0]
if use_resource:
# Clear all shared_name attributes before comparing, since they are
# orthogonal to scopes and are not updated on export/import.
for meta_graph_def in [result, expected]:
for node in meta_graph_def.graph_def.node:
shared_name_attr = "shared_name"
shared_name_value = node.attr.get(shared_name_attr, None)
if shared_name_value and shared_name_value.HasField("s"):
if shared_name_value.s:
node.attr[shared_name_attr].s = b""
test_util.assert_meta_graph_protos_equal(self, expected, result)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/framework/meta_graph_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
import numpy as np
from google.protobuf import json_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Helpers for creating Example objects
example = example_pb2.Example
feature = feature_pb2.Feature
features = lambda d: feature_pb2.Features(feature=d)
bytes_feature = lambda v: feature(bytes_list=feature_pb2.BytesList(value=v))
int64_feature = lambda v: feature(int64_list=feature_pb2.Int64List(value=v))
float_feature = lambda v: feature(float_list=feature_pb2.FloatList(value=v))
# Helpers for creating SequenceExample objects
feature_list = lambda l: feature_pb2.FeatureList(feature=l)
feature_lists = lambda d: feature_pb2.FeatureLists(feature_list=d)
sequence_example = example_pb2.SequenceExample
def flatten(list_of_lists):
"""Flatten one level of nesting."""
return itertools.chain.from_iterable(list_of_lists)
def flatten_values_tensors_or_sparse(tensors_list):
"""Flatten each SparseTensor object into 3 Tensors for session.run()."""
return list(
flatten([[v.indices, v.values, v.dense_shape]
if isinstance(v, sparse_tensor.SparseTensor) else [v]
for v in tensors_list]))
def _compare_output_to_expected(tester, dict_tensors, expected_tensors,
flat_output):
tester.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))
i = 0 # Index into the flattened output of session.run()
for k, v in dict_tensors.items():
expected_v = expected_tensors[k]
tf_logging.info("Comparing key: %s", k)
if isinstance(v, sparse_tensor.SparseTensor):
# Three outputs for SparseTensor : indices, values, shape.
tester.assertEqual([k, len(expected_v)], [k, 3])
tester.assertAllEqual(expected_v[0], flat_output[i])
tester.assertAllEqual(expected_v[1], flat_output[i + 1])
tester.assertAllEqual(expected_v[2], flat_output[i + 2])
i += 3
else:
# One output for standard Tensor.
tester.assertAllEqual(expected_v, flat_output[i])
i += 1
class ParseExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.cached_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
out = parsing_ops.parse_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
return
else:
# Returns dict w/ Tensors and SparseTensors.
out = parsing_ops.parse_example(**kwargs)
result = flatten_values_tensors_or_sparse(out.values())
# Check values.
tf_result = self.evaluate(result)
_compare_output_to_expected(self, out, expected_values, tf_result)
# Check shapes; if serialized is a Tensor we need its size to
# properly check.
serialized = kwargs["serialized"]
batch_size = (
self.evaluate(serialized).size if isinstance(serialized, ops.Tensor)
else np.asarray(serialized).size)
for k, f in kwargs["features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(
tuple(out[k].get_shape().as_list()), (batch_size,) + f.shape)
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 2))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(out[k].dense_shape.get_shape().as_list()), (2,))
@test_util.run_deprecated_v1
def testEmptySerializedWithAllDefaults(self):
sparse_name = "st_a"
a_name = "a"
b_name = "b"
c_name = "c:has_a_tricky_name"
a_default = [0, 42, 0]
b_default = np.random.rand(3, 3).astype(bytes)
c_default = np.random.rand(2).astype(np.float32)
expected_st_a = ( # indices, values, shape
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
expected_output = {
sparse_name: expected_st_a,
a_name: np.array(2 * [[a_default]]),
b_name: np.array(2 * [b_default]),
c_name: np.array(2 * [c_default]),
}
self._test({
"example_names": np.empty((0,), dtype=bytes),
"serialized": ops.convert_to_tensor(["", ""]),
"features": {
sparse_name:
parsing_ops.VarLenFeature(dtypes.int64),
a_name:
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
b_name:
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
c_name:
parsing_ops.FixedLenFeature(
(2,), dtypes.float32, default_value=c_default),
}
}, expected_output)
def testEmptySerializedWithoutDefaultsShouldFail(self):
input_features = {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=[0, 42, 0]),
"b":
parsing_ops.FixedLenFeature(
(3, 3),
dtypes.string,
default_value=np.random.rand(3, 3).astype(bytes)),
# Feature "c" is missing a default, this gap will cause failure.
"c":
parsing_ops.FixedLenFeature((2,), dtype=dtypes.float32),
}
# Edge case where the key is there but the feature value is empty
original = example(features=features({"c": feature()}))
self._test(
{
"example_names": ["in1"],
"serialized": [original.SerializeToString()],
"features": input_features,
},
expected_err=(
errors_impl.OpError,
"Name: in1, Feature: c \\(data type: float\\) is required"))
# Standard case of missing key and value.
self._test(
{
"example_names": ["in1", "in2"],
"serialized": ["", ""],
"features": input_features,
},
expected_err=(
errors_impl.OpError,
"Name: in1, Feature: c \\(data type: float\\) is required"))
def testDenseNotMatchingShapeShouldFail(self):
original = [
example(features=features({
"a": float_feature([1, 1, 3]),
})),
example(features=features({
"a": float_feature([-1, -1]),
}))
]
names = ["passing", "failing"]
serialized = [m.SerializeToString() for m in original]
self._test(
{
"example_names": names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature((1, 3), dtypes.float32)
}
},
expected_err=(errors_impl.OpError,
"Name: failing, Key: a, Index: 1. Number of float val"))
def testDenseDefaultNoShapeShouldFail(self):
original = [
example(features=features({
"a": float_feature([1, 1, 3]),
})),
]
serialized = [m.SerializeToString() for m in original]
self._test(
{
"example_names": ["failing"],
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature(None, dtypes.float32)
}
},
expected_err=(ValueError, "Missing shape for feature a"))
@test_util.run_deprecated_v1
def testSerializedContainingSparse(self):
original = [
example(features=features({
"st_c": float_feature([3, 4])
})),
example(
features=features({
"st_c": float_feature([]), # empty float list
})),
example(
features=features({
"st_d": feature(), # feature with nothing in it
})),
example(
features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
}))
]
serialized = [m.SerializeToString() for m in original]
expected_st_c = ( # indices, values, shape
np.array([[0, 0], [0, 1], [3, 0], [3, 1], [3, 2]], dtype=np.int64),
np.array([3.0, 4.0, 1.0, 2.0, -1.0], dtype=np.float32),
np.array([4, 3], dtype=np.int64)) # batch == 2, max_elems = 3
expected_st_d = ( # indices, values, shape
np.array([[3, 0]], dtype=np.int64), np.array(["hi"], dtype=bytes),
np.array([4, 1], dtype=np.int64)) # batch == 2, max_elems = 1
expected_output = {
"st_c": expected_st_c,
"st_d": expected_st_d,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"st_c": parsing_ops.VarLenFeature(dtypes.float32),
"st_d": parsing_ops.VarLenFeature(dtypes.string)
}
}, expected_output)
def testSerializedContainingSparseFeature(self):
original = [
example(
features=features({
"val": float_feature([3, 4]),
"idx": int64_feature([5, 10])
})),
example(
features=features({
"val": float_feature([]), # empty float list
"idx": int64_feature([])
})),
example(
features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(
features=features({
"val": float_feature([1, 2, -1]),
"idx":
int64_feature([0, 9, 3]) # unsorted
}))
]
serialized = [m.SerializeToString() for m in original]
expected_sp = ( # indices, values, shape
np.array([[0, 5], [0, 10], [3, 0], [3, 3], [3, 9]], dtype=np.int64),
np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32),
np.array([4, 13], dtype=np.int64)) # batch == 4, max_elems = 13
expected_output = {
"sp": expected_sp,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.float32, [13])
}
}, expected_output)
def testSerializedContainingSparseFeatureReuse(self):
original = [
example(
features=features({
"val1": float_feature([3, 4]),
"val2": float_feature([5, 6]),
"idx": int64_feature([5, 10])
})),
example(
features=features({
"val1": float_feature([]), # empty float list
"idx": int64_feature([])
})),
]
serialized = [m.SerializeToString() for m in original]
expected_sp1 = ( # indices, values, shape
np.array([[0, 5], [0, 10]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32), np.array(
[2, 13], dtype=np.int64)) # batch == 2, max_elems = 13
expected_sp2 = ( # indices, values, shape
np.array([[0, 5], [0, 10]], dtype=np.int64),
np.array([5.0, 6.0], dtype=np.float32), np.array(
[2, 7], dtype=np.int64)) # batch == 2, max_elems = 13
expected_output = {
"sp1": expected_sp1,
"sp2": expected_sp2,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"sp1":
parsing_ops.SparseFeature("idx", "val1", dtypes.float32, 13),
"sp2":
parsing_ops.SparseFeature(
"idx", "val2", dtypes.float32, size=7, already_sorted=True)
}
}, expected_output)
def testSerializedContaining3DSparseFeature(self):
original = [
example(
features=features({
"val": float_feature([3, 4]),
"idx0": int64_feature([5, 10]),
"idx1": int64_feature([0, 2]),
})),
example(
features=features({
"val": float_feature([]), # empty float list
"idx0": int64_feature([]),
"idx1": int64_feature([]),
})),
example(
features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(
features=features({
"val": float_feature([1, 2, -1]),
"idx0": int64_feature([0, 9, 3]), # unsorted
"idx1": int64_feature([1, 0, 2]),
}))
]
serialized = [m.SerializeToString() for m in original]
expected_sp = (
# indices
np.array(
[[0, 5, 0], [0, 10, 2], [3, 0, 1], [3, 3, 2], [3, 9, 0]],
dtype=np.int64),
# values
np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32),
# shape batch == 4, max_elems = 13
np.array([4, 13, 3], dtype=np.int64))
expected_output = {
"sp": expected_sp,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"sp":
parsing_ops.SparseFeature(["idx0", "idx1"], "val",
dtypes.float32, [13, 3])
}
}, expected_output)
def testSerializedContainingDense(self):
aname = "a"
bname = "b*has+a:tricky_name"
original = [
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
})),
example(
features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b""]),
}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
aname:
np.array([[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(["b0_str", ""], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string),
}
}, expected_output)
# This test is identical as the previous one except
# for the creation of 'serialized'.
def testSerializedContainingDenseWithConcat(self):
aname = "a"
bname = "b*has+a:tricky_name"
# TODO(lew): Feature appearing twice should be an error in future.
original = [
(example(features=features({
aname: float_feature([10, 10]),
})),
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
}))),
(
example(features=features({
bname: bytes_feature([b"b100"]),
})),
example(
features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"]),
})),
),
]
serialized = [
m.SerializeToString() + n.SerializeToString() for (m, n) in original
]
expected_output = {
aname:
np.array([[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(["b0_str", "b1"], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string),
}
}, expected_output)
def testSerializedContainingDenseScalar(self):
original = [
example(features=features({
"a": float_feature([1]),
})),
example(features=features({}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"a":
np.array([[1], [-1]], dtype=np.float32) # 2x1 (column vector)
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a":
parsing_ops.FixedLenFeature(
(1,), dtype=dtypes.float32, default_value=-1),
}
}, expected_output)
def testSerializedContainingDenseWithDefaults(self):
original = [
example(features=features({
"a": float_feature([1, 1]),
})),
example(features=features({
"b": bytes_feature([b"b1"]),
})),
example(features=features({
"b": feature()
})),
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"a":
np.array([[1, 1], [3, -3], [3, -3]], dtype=np.float32).reshape(
3, 1, 2, 1),
"b":
np.array(["tmp_str", "b1", "tmp_str"], dtype=bytes).reshape(
3, 1, 1, 1, 1),
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a":
parsing_ops.FixedLenFeature(
(1, 2, 1), dtype=dtypes.float32, default_value=[3.0, -3.0]),
"b":
parsing_ops.FixedLenFeature(
(1, 1, 1, 1), dtype=dtypes.string, default_value="tmp_str"),
}
}, expected_output)
@test_util.run_deprecated_v1
def testSerializedContainingSparseAndSparseFeatureAndDenseWithNoDefault(self):
expected_st_a = ( # indices, values, shape
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
expected_sp = ( # indices, values, shape
np.array([[0, 0], [0, 3], [1, 7]], dtype=np.int64),
np.array(["a", "b", "c"], dtype="|S"), np.array(
[2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(
features=features({
"c": float_feature([3, 4]),
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})),
example(
features=features({
"c": float_feature([1, 2]),
"val": bytes_feature([b"c"]),
"idx": int64_feature([7])
}))
]
names = ["in1", "in2"]
serialized = [m.SerializeToString() for m in original]
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
expected_output = {
"st_a": expected_st_a,
"sp": expected_sp,
"a": np.array(2 * [[a_default]]),
"b": np.array(2 * [b_default]),
"c": np.array([[3, 4], [1, 2]], dtype=np.float32),
}
self._test(
{
"example_names": names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature("idx", "val", dtypes.string, 13),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature((2,), dtypes.float32),
}
},
expected_output)
@test_util.run_deprecated_v1
def testSerializedContainingSparseAndSparseFeatureWithReuse(self):
expected_idx = ( # indices, values, shape
np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.int64),
np.array([0, 3, 7, 1]),
np.array([2, 2], dtype=np.int64)) # batch == 4, max_elems = 2
expected_sp = ( # indices, values, shape
np.array([[0, 0], [0, 3], [1, 1], [1, 7]], dtype=np.int64),
np.array(["a", "b", "d", "c"], dtype="|S"),
np.array([2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(
features=features({
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})),
example(
features=features({
"val": bytes_feature([b"c", b"d"]),
"idx": int64_feature([7, 1])
}))
]
names = ["in1", "in2"]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"idx": expected_idx,
"sp": expected_sp,
}
self._test({
"example_names": names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
"idx":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.string, [13]),
}
}, expected_output)
def _testSerializedContainingVarLenDenseLargerBatch(self, batch_size):
# During parsing, data read from the serialized proto is stored in buffers.
# For small batch sizes, a buffer will contain one minibatch entry.
# For larger batch sizes, a buffer may contain several minibatch
# entries. This test identified a bug where the code that copied
# data out of the buffers and into the output tensors assumed each
# buffer only contained one minibatch entry. The bug has since been fixed.
truth_int = [i for i in range(batch_size)]
truth_str = [[("foo%d" % i).encode(), ("bar%d" % i).encode()]
for i in range(batch_size)]
expected_str = copy.deepcopy(truth_str)
# Delete some intermediate entries
for i in range(batch_size):
col = 1
if np.random.rand() < 0.25:
# w.p. 25%, drop out the second entry
expected_str[i][col] = b"default"
col -= 1
truth_str[i].pop()
if np.random.rand() < 0.25:
# w.p. 25%, drop out the second entry (possibly again)
expected_str[i][col] = b"default"
truth_str[i].pop()
expected_output = {
# Batch size batch_size, 1 time step.
"a": np.array(truth_int, dtype=np.int64).reshape(batch_size, 1),
# Batch size batch_size, 2 time steps.
"b": np.array(expected_str, dtype="|S").reshape(batch_size, 2),
}
original = [
example(
features=features({
"a": int64_feature([truth_int[i]]),
"b": bytes_feature(truth_str[i])
})) for i in range(batch_size)
]
serialized = [m.SerializeToString() for m in original]
self._test({
"serialized": ops.convert_to_tensor(serialized, dtype=dtypes.string),
"features": {
"a":
parsing_ops.FixedLenSequenceFeature(
shape=(),
dtype=dtypes.int64,
allow_missing=True,
default_value=-1),
"b":
parsing_ops.FixedLenSequenceFeature(
shape=[],
dtype=dtypes.string,
allow_missing=True,
default_value="default"),
}
}, expected_output)
def testSerializedContainingVarLenDenseLargerBatch(self):
np.random.seed(3456)
for batch_size in (1, 10, 20, 100, 256):
self._testSerializedContainingVarLenDenseLargerBatch(batch_size)
@test_util.run_deprecated_v1
def testSerializedContainingVarLenDense(self):
aname = "a"
bname = "b"
cname = "c"
dname = "d"
example_names = ["in1", "in2", "in3", "in4"]
original = [
example(features=features({
cname: int64_feature([2]),
})),
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str", b"b1_str"]),
})),
example(
features=features({
aname: float_feature([-1, -1, 2, 2]),
bname: bytes_feature([b"b1"]),
})),
example(
features=features({
aname: float_feature([]),
cname: int64_feature([3]),
})),
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
aname:
np.array(
[
[0, 0, 0, 0],
[1, 1, 0, 0],
[-1, -1, 2, 2],
[0, 0, 0, 0],
],
dtype=np.float32).reshape(4, 2, 2, 1),
bname:
np.array(
[["", ""], ["b0_str", "b1_str"], ["b1", ""], ["", ""]],
dtype=bytes).reshape(4, 2, 1, 1, 1),
cname:
np.array([2, 0, 0, 3], dtype=np.int64).reshape(4, 1),
dname:
np.empty(shape=(4, 0), dtype=bytes),
}
self._test({
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
}, expected_output)
# Test with padding values.
expected_output_custom_padding = dict(expected_output)
expected_output_custom_padding[aname] = np.array(
[
[-2, -2, -2, -2],
[1, 1, -2, -2],
[-1, -1, 2, 2],
[-2, -2, -2, -2],
],
dtype=np.float32).reshape(4, 2, 2, 1)
self._test({
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=-2.0),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
}, expected_output_custom_padding)
# Change number of required values so the inputs are not a
# multiple of this size.
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(
errors_impl.OpError, "Name: in3, Key: b, Index: 2. "
"Number of bytes values is not a multiple of stride length."))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=[]),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"Cannot reshape a tensor with 0 elements to shape"))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenFeature(
(None, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"First dimension of shape for feature a unknown. "
"Consider using FixedLenSequenceFeature."))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
cname:
parsing_ops.FixedLenFeature(
(1, None), dtype=dtypes.int64, default_value=[[1]]),
}
},
expected_err=(ValueError,
"All dimensions of shape for feature c need to be known "
r"but received \(1, None\)."))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=False),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"Unsupported: FixedLenSequenceFeature requires "
"allow_missing to be True."))
class ParseSingleExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.cached_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
out = parsing_ops.parse_single_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
else:
# Returns dict w/ Tensors and SparseTensors.
out = parsing_ops.parse_single_example(**kwargs)
# Check values.
tf_result = sess.run(flatten_values_tensors_or_sparse(out.values()))
_compare_output_to_expected(self, out, expected_values, tf_result)
# Check shapes.
for k, f in kwargs["features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(
tuple(out[k].get_shape()), tensor_shape.as_shape(f.shape))
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(out[k].dense_shape.get_shape().as_list()), (1,))
@test_util.run_deprecated_v1
def testSingleExampleWithSparseAndSparseFeatureAndDense(self):
original = example(
features=features({
"c": float_feature([3, 4]),
"d": float_feature([0.0, 1.0]),
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3]),
"st_a": float_feature([3.0, 4.0])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0], [1]], dtype=np.int64), # indices
np.array([3.0, 4.0], dtype=np.float32), # values
np.array([2], dtype=np.int64)) # shape: max_values = 2
expected_sp = ( # indices, values, shape
np.array([[0], [3]], dtype=np.int64), np.array(["a", "b"], dtype="|S"),
np.array([13], dtype=np.int64)) # max_values = 13
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
expected_output = {
"st_a": expected_st_a,
"sp": expected_sp,
"a": [a_default],
"b": b_default,
"c": np.array([3, 4], dtype=np.float32),
"d": np.array([0.0, 1.0], dtype=np.float32),
}
self._test(
{
"example_names": ops.convert_to_tensor("in1"),
"serialized": ops.convert_to_tensor(serialized),
"features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.float32),
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.string,
[13]),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature(2, dtypes.float32),
"d":
parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True)
}
},
expected_output)
class ParseSequenceExampleTest(test.TestCase):
def testCreateSequenceExample(self):
value = sequence_example(
context=features({
"global_feature": float_feature([1, 2, 3]),
}),
feature_lists=feature_lists({
"repeated_feature_2_frames":
feature_list([
bytes_feature([b"a", b"b", b"c"]),
bytes_feature([b"a", b"d", b"e"])
]),
"repeated_feature_3_frames":
feature_list([
int64_feature([3, 4, 5, 6, 7]),
int64_feature([-1, 0, 0, 0, 0]),
int64_feature([1, 2, 3, 4, 5])
])
}))
value.SerializeToString() # Smoke test
def _test(self,
kwargs,
expected_context_values=None,
expected_feat_list_values=None,
expected_length_values=None,
expected_err=None,
batch=False):
expected_context_values = expected_context_values or {}
expected_feat_list_values = expected_feat_list_values or {}
expected_length_values = expected_length_values or {}
with self.cached_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
if batch:
c_out, fl_out, _ = parsing_ops.parse_sequence_example(**kwargs)
else:
c_out, fl_out = parsing_ops.parse_single_sequence_example(**kwargs)
if c_out:
sess.run(flatten_values_tensors_or_sparse(c_out.values()))
if fl_out:
sess.run(flatten_values_tensors_or_sparse(fl_out.values()))
else:
# Returns dicts w/ Tensors and SparseTensors.
if batch:
(context_out, feat_list_out,
lengths_out) = parsing_ops.parse_sequence_example(**kwargs)
else:
(context_out,
feat_list_out) = parsing_ops.parse_single_sequence_example(**kwargs)
lengths_out = {}
context_result = sess.run(
flatten_values_tensors_or_sparse(
context_out.values())) if context_out else []
feat_list_result = sess.run(
flatten_values_tensors_or_sparse(
feat_list_out.values())) if feat_list_out else []
lengths_result = sess.run(
flatten_values_tensors_or_sparse(
lengths_out.values())) if lengths_out else []
# Check values.
_compare_output_to_expected(self, context_out, expected_context_values,
context_result)
_compare_output_to_expected(self, feat_list_out,
expected_feat_list_values, feat_list_result)
_compare_output_to_expected(self, lengths_out, expected_length_values,
lengths_result)
# Check shapes; if serialized is a Tensor we need its size to
# properly check.
if "context_features" in kwargs:
for k, f in kwargs["context_features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
if batch:
self.assertEqual(
tuple(context_out[k].get_shape().as_list()[1:]), f.shape)
else:
self.assertEqual(
tuple(context_out[k].get_shape().as_list()), f.shape)
elif isinstance(f, parsing_ops.VarLenFeature) and batch:
self.assertEqual(
tuple(context_out[k].indices.get_shape().as_list()), (None, 2))
self.assertEqual(
tuple(context_out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(context_out[k].dense_shape.get_shape().as_list()), (2,))
elif isinstance(f, parsing_ops.VarLenFeature) and not batch:
self.assertEqual(
tuple(context_out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(
tuple(context_out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(context_out[k].dense_shape.get_shape().as_list()), (1,))
def _testBoth(self,
kwargs,
expected_context_values=None,
expected_feat_list_values=None,
expected_err=None):
# Test using tf.io.parse_single_sequence_example
self._test(
kwargs,
expected_context_values=expected_context_values,
expected_feat_list_values=expected_feat_list_values,
expected_err=expected_err,
batch=False)
# Convert the input to a batch of size 1, and test using
# tf.parse_sequence_example.
# Some replacements are needed for the batch version.
kwargs["serialized"] = [kwargs.pop("serialized")]
kwargs["example_names"] = [kwargs.pop("example_name")
] if "example_name" in kwargs else None
# Disable error string matching; it's not consistent for batch mode.
if expected_err:
expected_err = (expected_err[0], "")
# Add a batch dimension to expected output
if expected_context_values:
new_values = {}
for k in expected_context_values:
v = expected_context_values[k]
if isinstance(kwargs["context_features"][k],
parsing_ops.FixedLenFeature):
new_values[k] = np.expand_dims(v, axis=0)
else:
# Sparse tensor.
new_values[k] = (np.insert(v[0], 0, 0, axis=1), v[1],
np.insert(v[2], 0, 1))
expected_context_values = new_values
expected_length_values = {}
if expected_feat_list_values:
new_values = {}
for k in expected_feat_list_values:
v = expected_feat_list_values[k]
if isinstance(kwargs["sequence_features"][k],
parsing_ops.FixedLenSequenceFeature):
expected_length_values[k] = [np.shape(v)[0]]
new_values[k] = np.expand_dims(v, axis=0)
else:
# Sparse tensor.
new_values[k] = (np.insert(v[0], 0, 0, axis=1), v[1],
np.insert(v[2], 0, 1))
expected_feat_list_values = new_values
self._test(
kwargs,
expected_context_values=expected_context_values,
expected_feat_list_values=expected_feat_list_values,
expected_length_values=expected_length_values,
expected_err=expected_err,
batch=True)
@test_util.run_deprecated_v1
def testSequenceExampleWithSparseAndDenseContext(self):
original = sequence_example(
context=features({
"c": float_feature([3, 4]),
"st_a": float_feature([3.0, 4.0])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0], [1]], dtype=np.int64), # indices
np.array([3.0, 4.0], dtype=np.float32), # values
np.array([2], dtype=np.int64)) # shape: num_features = 2
a_default = [[1, 2, 3]]
b_default = np.random.rand(3, 3).astype(bytes)
expected_context_output = {
"st_a": expected_st_a,
"a": a_default,
"b": b_default,
"c": np.array([3, 4], dtype=np.float32),
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"context_features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.float32),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature((2,), dtypes.float32),
}
},
expected_context_values=expected_context_output)
@test_util.run_deprecated_v1
def testSequenceExampleWithMultipleSizeFeatureLists(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([
int64_feature([-1, 0, 1]),
int64_feature([2, 3, 4]),
int64_feature([5, 6, 7]),
int64_feature([8, 9, 10]),
]),
"b":
feature_list([bytes_feature([b"r00", b"r01", b"r10", b"r11"])]),
"c":
feature_list([float_feature([3, 4]),
float_feature([-1, 2])]),
}))
serialized = original.SerializeToString()
expected_feature_list_output = {
"a":
np.array(
[ # outer dimension is time.
[[-1, 0, 1]], # inside are 1x3 matrices
[[2, 3, 4]],
[[5, 6, 7]],
[[8, 9, 10]]
],
dtype=np.int64),
"b":
np.array(
[ # outer dimension is time, inside are 2x2 matrices
[[b"r00", b"r01"], [b"r10", b"r11"]]
],
dtype=bytes),
"c":
np.array(
[ # outer dimension is time, inside are 2-vectors
[3, 4], [-1, 2]
],
dtype=np.float32),
"d":
np.empty(shape=(0, 5), dtype=np.float32), # empty_allowed_missing
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a":
parsing_ops.FixedLenSequenceFeature((1, 3), dtypes.int64),
"b":
parsing_ops.FixedLenSequenceFeature((2, 2), dtypes.string),
"c":
parsing_ops.FixedLenSequenceFeature(2, dtypes.float32),
"d":
parsing_ops.FixedLenSequenceFeature(
(5,), dtypes.float32, allow_missing=True),
}
},
expected_feat_list_values=expected_feature_list_output)
@test_util.run_deprecated_v1
def testSequenceExampleWithoutDebugName(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([int64_feature([3, 4]),
int64_feature([1, 0])]),
"st_a":
feature_list([
float_feature([3.0, 4.0]),
float_feature([5.0]),
float_feature([])
]),
"st_b":
feature_list([
bytes_feature([b"a"]),
bytes_feature([]),
bytes_feature([]),
bytes_feature([b"b", b"c"])
])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0, 0], [0, 1], [1, 0]], dtype=np.int64), # indices
np.array([3.0, 4.0, 5.0], dtype=np.float32), # values
np.array([3, 2], dtype=np.int64)) # shape: num_time = 3, max_feat = 2
expected_st_b = (
np.array([[0, 0], [3, 0], [3, 1]], dtype=np.int64), # indices
np.array(["a", "b", "c"], dtype="|S"), # values
np.array([4, 2], dtype=np.int64)) # shape: num_time = 4, max_feat = 2
expected_st_c = (
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # values
np.array([0, 0], dtype=np.int64)) # shape: num_time = 0, max_feat = 0
expected_feature_list_output = {
"a": np.array([[3, 4], [1, 0]], dtype=np.int64),
"st_a": expected_st_a,
"st_b": expected_st_b,
"st_c": expected_st_c,
}
self._testBoth(
{
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"st_a": parsing_ops.VarLenFeature(dtypes.float32),
"st_b": parsing_ops.VarLenFeature(dtypes.string),
"st_c": parsing_ops.VarLenFeature(dtypes.int64),
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64),
}
},
expected_feat_list_values=expected_feature_list_output)
@test_util.run_deprecated_v1
def testSequenceExampleWithSparseAndDenseFeatureLists(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([int64_feature([3, 4]),
int64_feature([1, 0])]),
"st_a":
feature_list([
float_feature([3.0, 4.0]),
float_feature([5.0]),
float_feature([])
]),
"st_b":
feature_list([
bytes_feature([b"a"]),
bytes_feature([]),
bytes_feature([]),
bytes_feature([b"b", b"c"])
])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0, 0], [0, 1], [1, 0]], dtype=np.int64), # indices
np.array([3.0, 4.0, 5.0], dtype=np.float32), # values
np.array([3, 2], dtype=np.int64)) # shape: num_time = 3, max_feat = 2
expected_st_b = (
np.array([[0, 0], [3, 0], [3, 1]], dtype=np.int64), # indices
np.array(["a", "b", "c"], dtype="|S"), # values
np.array([4, 2], dtype=np.int64)) # shape: num_time = 4, max_feat = 2
expected_st_c = (
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # values
np.array([0, 0], dtype=np.int64)) # shape: num_time = 0, max_feat = 0
expected_feature_list_output = {
"a": np.array([[3, 4], [1, 0]], dtype=np.int64),
"st_a": expected_st_a,
"st_b": expected_st_b,
"st_c": expected_st_c,
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"st_a": parsing_ops.VarLenFeature(dtypes.float32),
"st_b": parsing_ops.VarLenFeature(dtypes.string),
"st_c": parsing_ops.VarLenFeature(dtypes.int64),
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64),
}
},
expected_feat_list_values=expected_feature_list_output)
@test_util.run_deprecated_v1
def testSequenceExampleWithEmptyFeatureInFeatureLists(self):
original = sequence_example(
feature_lists=feature_lists({
"st_a":
feature_list([
float_feature([3.0, 4.0]),
feature(),
float_feature([5.0]),
]),
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64), # indices
np.array([3.0, 4.0, 5.0], dtype=np.float32), # values
np.array([3, 2], dtype=np.int64)) # shape: num_time = 3, max_feat = 2
expected_feature_list_output = {
"st_a": expected_st_a,
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"st_a": parsing_ops.VarLenFeature(dtypes.float32),
}
},
expected_feat_list_values=expected_feature_list_output)
def testSequenceExampleListWithInconsistentDataFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a": feature_list([int64_feature([-1, 0]),
float_feature([2, 3])])
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError, "Feature list: a, Index: 1."
" Data types don't match. Expected type: int64"))
def testSequenceExampleListWithWrongDataTypeFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a": feature_list([float_feature([2, 3])])
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError,
"Feature list: a, Index: 0. Data types don't match."
" Expected type: int64"))
def testSequenceExampleListWithWrongSparseDataTypeFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([
int64_feature([3, 4]),
int64_feature([1, 2]),
float_feature([2.0, 3.0])
])
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError,
"Name: in1, Feature list: a, Index: 2."
" Data types don't match. Expected type: int64"
" Feature is: float_list"))
def testSequenceExampleListWithWrongShapeFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([int64_feature([2, 3]),
int64_feature([2, 3, 4])]),
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError, r"Name: in1, Key: a, Index: 1."
r" Number of int64 values != expected."
r" values size: 3 but output shape: \[2\]"))
def testSequenceExampleWithMissingFeatureListFails(self):
original = sequence_example(feature_lists=feature_lists({}))
# Test fails because we didn't add:
# feature_list_dense_defaults = {"a": None}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(original.SerializeToString()),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(
errors_impl.OpError,
"Name: in1, Feature list 'a' is required but could not be found."
" Did you mean to include it in"
" feature_list_dense_missing_assumed_empty or"
" feature_list_dense_defaults?"))
@test_util.run_deprecated_v1
def testSequenceExampleBatch(self):
first = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([
int64_feature([-1, 0, 1]),
int64_feature([2, 3, 4]),
int64_feature([5, 6, 7]),
int64_feature([8, 9, 10]),
])
}))
second = sequence_example(
feature_lists=feature_lists({
"a": feature_list([
int64_feature([21, 2, 11]),
])
}))
serialized = [first.SerializeToString(), second.SerializeToString()]
expected_feature_list_output = {
"a":
np.array(
[ # outermost dimension is example id
[ # middle dimension is time.
[[-1, 0, 1]], # inside are 1x3 matrices
[[2, 3, 4]],
[[5, 6, 7]],
[[8, 9, 10]]
],
[ # middle dimension is time.
[[21, 2, 11]], # inside are 1x3 matrices
[[0, 0, 0]], # additional entries are padded with 0
[[0, 0, 0]],
[[0, 0, 0]]
]
],
dtype=np.int64),
"d":
np.empty(shape=(2, 0, 5), dtype=np.float32), # allowed_missing
}
self._test(
{
"example_names": ops.convert_to_tensor(["in1", "in2"]),
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a":
parsing_ops.FixedLenSequenceFeature((1, 3), dtypes.int64),
"d":
parsing_ops.FixedLenSequenceFeature(
(5,), dtypes.float32, allow_missing=True),
}
},
expected_feat_list_values=expected_feature_list_output,
expected_length_values={
"a": [4, 1],
"d": [0, 0]
},
batch=True)
class DecodeRawTest(test.TestCase):
def _decode_v1(self, words):
with self.cached_session():
examples = np.array(words)
example_tensor = constant_op.constant(
examples, shape=examples.shape, dtype=dtypes.string)
byte_tensor = parsing_ops.decode_raw_v1(example_tensor, dtypes.uint8)
return self.evaluate(byte_tensor)
def _decode_v2(self, words, fixed_length=None):
with self.cached_session():
examples = np.array(words)
byte_tensor = parsing_ops.decode_raw(
examples, dtypes.uint8, fixed_length=fixed_length)
return self.evaluate(byte_tensor)
def _ordinalize(self, words, fixed_length=None):
outputs = []
if fixed_length is None:
fixed_length = len(words[0])
for word in words:
output = []
for i in range(fixed_length):
if i < len(word):
output.append(ord(word[i]))
else:
output.append(0)
outputs.append(output)
return np.array(outputs)
def testDecodeRawV1EqualLength(self):
words = ["string1", "string2"]
observed = self._decode_v1(words)
expected = self._ordinalize(words)
self.assertAllEqual(expected.shape, observed.shape)
self.assertAllEqual(expected, observed)
def testDecodeRawV2FallbackEqualLength(self):
words = ["string1", "string2"]
observed = self._decode_v2(words)
expected = self._ordinalize(words)
self.assertAllEqual(expected.shape, observed.shape)
self.assertAllEqual(expected, observed)
def testDecodeRawV1VariableLength(self):
words = ["string", "longer_string"]
with self.assertRaises(errors_impl.InvalidArgumentError):
self._decode_v1(words)
def testDecodeRawV2FallbackVariableLength(self):
words = ["string", "longer_string"]
with self.assertRaises(errors_impl.InvalidArgumentError):
self._decode_v2(words)
def testDecodeRawV2VariableLength(self):
words = ["string", "longer_string"]
observed = self._decode_v2(words, fixed_length=8)
expected = self._ordinalize(words, fixed_length=8)
self.assertAllEqual(expected.shape, observed.shape)
self.assertAllEqual(expected, observed)
class DecodeJSONExampleTest(test.TestCase):
def _testRoundTrip(self, examples):
with self.cached_session() as sess:
examples = np.array(examples, dtype=np.object)
json_tensor = constant_op.constant(
[json_format.MessageToJson(m) for m in examples.flatten()],
shape=examples.shape,
dtype=dtypes.string)
binary_tensor = parsing_ops.decode_json_example(json_tensor)
binary_val = self.evaluate(binary_tensor)
if examples.shape:
self.assertShapeEqual(binary_val, json_tensor)
for input_example, output_binary in zip(
np.array(examples).flatten(), binary_val.flatten()):
output_example = example_pb2.Example()
output_example.ParseFromString(output_binary)
self.assertProtoEquals(input_example, output_example)
else:
output_example = example_pb2.Example()
output_example.ParseFromString(binary_val)
self.assertProtoEquals(examples.item(), output_example)
def testEmptyTensor(self):
self._testRoundTrip([])
self._testRoundTrip([[], [], []])
def testEmptyExamples(self):
self._testRoundTrip([example(), example(), example()])
def testDenseFeaturesScalar(self):
self._testRoundTrip(
example(features=features({
"a": float_feature([1, 1, 3])
})))
def testDenseFeaturesVector(self):
self._testRoundTrip([
example(features=features({
"a": float_feature([1, 1, 3])
})),
example(features=features({
"a": float_feature([-1, -1, 2])
})),
])
def testDenseFeaturesMatrix(self):
self._testRoundTrip([
[example(features=features({
"a": float_feature([1, 1, 3])
}))],
[example(features=features({
"a": float_feature([-1, -1, 2])
}))],
])
def testSparseFeatures(self):
self._testRoundTrip([
example(features=features({
"st_c": float_feature([3, 4])
})),
example(features=features({
"st_c": float_feature([])
})),
example(features=features({
"st_d": feature()
})),
example(
features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
})),
])
def testSerializedContainingBytes(self):
aname = "a"
bname = "b*has+a:tricky_name"
self._testRoundTrip([
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"])
})),
example(
features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"])
})),
])
@test_util.run_deprecated_v1
def testInvalidSyntax(self):
with self.cached_session() as sess:
json_tensor = constant_op.constant(["{]"])
binary_tensor = parsing_ops.decode_json_example(json_tensor)
with self.assertRaisesOpError("Error while parsing JSON"):
self.evaluate(binary_tensor)
class ParseTensorOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testToFloat32(self):
with self.cached_session():
expected = np.random.rand(3, 4, 5).astype(np.float32)
tensor_proto = tensor_util.make_tensor_proto(expected)
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.float32)
result = tensor.eval(
feed_dict={serialized: tensor_proto.SerializeToString()})
self.assertAllEqual(expected, result)
@test_util.run_deprecated_v1
def testToUint8(self):
with self.cached_session():
expected = np.random.rand(3, 4, 5).astype(np.uint8)
tensor_proto = tensor_util.make_tensor_proto(expected)
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.uint8)
result = tensor.eval(
feed_dict={serialized: tensor_proto.SerializeToString()})
self.assertAllEqual(expected, result)
@test_util.run_deprecated_v1
def testTypeMismatch(self):
with self.cached_session():
expected = np.random.rand(3, 4, 5).astype(np.uint8)
tensor_proto = tensor_util.make_tensor_proto(expected)
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.uint16)
with self.assertRaisesOpError(
r"Type mismatch between parsed tensor \(uint8\) and dtype "
r"\(uint16\)"):
tensor.eval(feed_dict={serialized: tensor_proto.SerializeToString()})
@test_util.run_deprecated_v1
def testInvalidInput(self):
with self.cached_session():
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.uint16)
with self.assertRaisesOpError(
"Could not parse `serialized` as TensorProto: 'bogus'"):
tensor.eval(feed_dict={serialized: "bogus"})
with self.assertRaisesOpError(
r"Expected `serialized` to be a scalar, got shape: \[1\]"):
tensor.eval(feed_dict={serialized: ["bogus"]})
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/parsing_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseTensorsMap."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
# pylint: disable=protected-access
add_sparse_to_tensors_map = sparse_ops._add_sparse_to_tensors_map
add_many_sparse_to_tensors_map = sparse_ops._add_many_sparse_to_tensors_map
take_many_sparse_from_tensors_map = (
sparse_ops._take_many_sparse_from_tensors_map)
# pylint: enable=protected-access
class SparseTensorsMapTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
if dtype is None:
dtype = dtypes.int32
return sparse_tensor_lib.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
[2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
@test_util.run_deprecated_v1
def testAddTakeMany(self):
with self.session(graph=ops.Graph(), use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
handle0 = add_sparse_to_tensors_map(sp_input0, shared_name="a")
handle1 = add_sparse_to_tensors_map(sp_input1, shared_name="a")
self.assertEqual(handle0.get_shape(), ())
handles_concat = array_ops.stack([handle0, handle1])
sp_out = take_many_sparse_from_tensors_map(
sparse_map_op=handle0.op, sparse_handles=handles_concat)
combined_indices, combined_values, combined_shape = self.evaluate(sp_out)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
self.assertAllEqual(combined_values[:6], sp_input0[1])
self.assertAllEqual(combined_values[6:], sp_input1[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testFeedAddTakeMany(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
sparse_handles = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=sparse_handles)
combined_indices, combined_values, combined_shape = self.evaluate(
sp_roundtrip)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
self.assertAllEqual(combined_values[:6], input0_val[1])
self.assertAllEqual(combined_values[6:], input1_val[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testAddManyTakeManyRoundTrip(self):
with self.session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
handles = add_many_sparse_to_tensors_map(sparse_tensor)
roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handles.op, sparse_handles=handles)
handles_value, roundtrip_value = sess.run(
[handles, roundtrip],
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertEqual(handles_value.shape, (4,))
self.assertAllEqual(roundtrip_value.indices, indices_value)
self.assertAllEqual(roundtrip_value.values, values_value)
self.assertAllEqual(roundtrip_value.dense_shape, shape_value)
@test_util.run_deprecated_v1
def testDeserializeFailsInconsistentRank(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
handle_concat = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=handle_concat)
with self.assertRaisesOpError(
r"Inconsistent rank across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"):
self.evaluate(sp_roundtrip)
@test_util.run_deprecated_v1
def testTakeManyFailsWrongInputOp(self):
with self.session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
handle = add_sparse_to_tensors_map(input_val)
handle_value = self.evaluate(handle)
bad_handle = handle_value + 10
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=[handle_value, bad_handle])
with self.assertRaisesOpError(r"Unable to find SparseTensor: 10"):
self.evaluate(sp_roundtrip)
class BenchmarkSparseTensorsMapVsSerialization(test.Benchmark):
def benchmarkVeryLarge2DFloatSparseTensor(self):
np.random.seed(127)
num_elements = 10000
batch_size = 64
indices_batch = np.random.randint(
batch_size, size=num_elements, dtype=np.int64)
indices_value = np.arange(num_elements, dtype=np.int64)
indices = np.asarray(
sorted(zip(indices_batch, indices_value)), dtype=np.int64)
values = ["feature_value_for_embedding_lookup"] * num_elements
shape = np.asarray([batch_size, num_elements], dtype=np.int64)
with session.Session(config=benchmark.benchmark_config()) as sess:
with ops.device("/cpu:0"):
indices = variables.Variable(indices)
values = variables.Variable(values)
shape = variables.Variable(shape)
st = sparse_tensor_lib.SparseTensor(indices, values, shape)
st_handles = add_many_sparse_to_tensors_map(st)
st_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=st_handles.op, sparse_handles=st_handles)
st_roundtrip_op = st_roundtrip.values.op
st_serialized = sparse_ops.serialize_many_sparse(st)
st_deserialized = sparse_ops.deserialize_many_sparse(
st_serialized, dtype=values.dtype)
st_deserialized_op = st_deserialized.values.op
variables.global_variables_initializer().run()
st_roundtrip_values = self.evaluate(st_roundtrip)
st_deserialized_values = self.evaluate(st_deserialized)
np.testing.assert_equal(st_roundtrip_values.values,
st_deserialized_values.values)
np.testing.assert_equal(st_roundtrip_values.indices,
st_deserialized_values.indices)
np.testing.assert_equal(st_roundtrip_values.dense_shape,
st_deserialized_values.dense_shape)
self.run_op_benchmark(
sess,
st_roundtrip_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_tensor_maps")
self.run_op_benchmark(
sess,
st_deserialized_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_serialization")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import convolutional
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
Relative to inception, each depth value is `depth // shrink`.
Args:
shrink: Factor to shrink each depth value by relative to Inception.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the convolution
parameters of Inception layers.
"""
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],
[4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],
[4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],
[4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],
[4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],
[4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],
[4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],
[4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],
[4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],
[4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],
[4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],
[4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],
[4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],
[1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],
[1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],
[1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],
[3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],
[3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],
[3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],
[1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],
[1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],
[3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],
[1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],
[3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],
[1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],
[1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],
[3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],
[1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],
[3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],
[1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],
[1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],
[4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],
[4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],
[4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],
[4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],
[4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],
[4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],
[4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],
[4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],
[4, 147, 147, 64]]
strides = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1
]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID
]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCHW" format is only supported on CUDA.
test_configs += [("NCHW", True)]
return test_configs
class Conv2DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
if use_gpu and not test_util.GpuSupportsHalfMatMulAndConv():
return [dtypes.float32, dtypes.float64]
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float32, dtypes.float16, dtypes.float64]
def _CreateNumpyTensor(self, shape):
total_size = 1
for s in shape:
total_size *= s
return np.arange(1, total_size + 1, dtype=np.float32).reshape(shape)
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype, use_gpu):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
use_gpu: True if the operations should be run on GPU
Returns:
Symbolic tensor value that can be used to execute the computation
"""
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if isinstance(padding, (list, tuple)):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
if isinstance(padding, (list, tuple)):
padding = test_util.NHWCToNCHW(padding)
conv = nn_ops.conv2d(
t1,
t2,
dilations=dilations,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that CPU and GPU produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
def _SetupVal(data_format, use_gpu):
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
values = self.evaluate(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-3, atol=1e-3)
def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,
stride, dilation, padding, data_format,
use_gpu):
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections.Iterable):
strides = list(stride)
else:
strides = [stride, stride]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
full_strides = [1, 1] + strides
full_dilation = [1, 1] + dilation
else:
full_strides = [1] + strides + [1]
full_dilation = [1] + dilation + [1]
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilation,
data_format=data_format)
computed = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilation,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
expected = test_util.NCHWToNHWC(expected)
computed = test_util.NCHWToNHWC(computed)
return expected, computed
def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, strides,
padding, dilations, rtol=1e-4):
expected_results = []
computed_results = []
for data_format, use_gpu in GetTestConfigs():
expected, computed = self._ComputeReferenceDilatedConv(
tensor_in_sizes, filter_in_sizes, strides, dilations, padding,
data_format, use_gpu)
expected_results.append(expected)
computed_results.append(computed)
tolerance = 1e-2 if use_gpu else 1e-5
expected_values = self.evaluate(expected_results)
computed_values = self.evaluate(computed_results)
for e_value, c_value in zip(expected_values, computed_values):
tf_logging.debug("expected = %s", e_value)
tf_logging.debug("actual = %s", c_value)
self.assertAllClose(
e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=rtol)
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
expected,
dilations=(1, 1),
gpu_only=False,
test_grappler_layout_optimizer=False,
tol=1e-5,
fp16_tol=1e-3):
if gpu_only and not test.is_gpu_available(cuda_only=True):
return
tensors = []
dilations = list(dilations)
for (data_format, use_gpu) in GetTestConfigs():
if gpu_only and not use_gpu:
continue
for dtype in self._DtypesToTest(use_gpu):
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
dtype,
use_gpu=use_gpu)
if test_grappler_layout_optimizer and data_format == "NHWC" and use_gpu:
# Grappler's layout optimizer will not optimize a fetch node, so
# this identity allows Grappler to optimize the Conv2D node.
result = array_ops.identity(result)
tensors.append(result)
values = self.evaluate(tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
tol_to_use = fp16_tol if value.dtype == np.float16 else tol
self.assertAllClose(expected, np.ravel(value), atol=tol_to_use,
rtol=tol_to_use)
self.assertShapeEqual(value, conv)
def _VerifyExplicitPaddings(self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
dilations=(1, 1),
test_grappler_layout_optimizer=False,
tol=1e-5,
fp16_tol=1e-3):
"""Verifies Conv2D with explicit padding generates correct values.
It does this by comparing with Conv2D without explicit padding. This
function assumes Conv2D without explicit padding works correctly.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
strides: [row_stride, col_stride] for the convolution;
padding: Explicit padding amounts.
dilations: Dilation values
test_grappler_layout_optimizer: If True, allow the Grappler layout
optimizer to run, which turns NHWC Conv2Ds on the GPU to NCHW Conv2Ds.
tol: The absolute and relative tolerance for non-fp16 dtypes.
fp16_tol: The absolute and relative tolerance for fp16.
"""
input_tensor = self._CreateNumpyTensor(tensor_in_sizes)
filter_tensor = self._CreateNumpyTensor(filter_in_sizes)
input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)])
dilations = list(dilations)
conv2d_result = nn_ops.conv2d(
input_tensor,
filter_tensor, [1] + list(strides) + [1],
"VALID",
dilations=[1] + dilations + [1])
expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1])))
self._VerifyValues(
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
expected,
dilations,
test_grappler_layout_optimizer=test_grappler_layout_optimizer,
tol=tol,
fp16_tol=fp16_tol)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x1Filter(self):
expected_output = [
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter2x1Dilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2DEmpty(self):
expected_output = []
self._VerifyValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
dilations=[1, 2],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2(self):
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2Same(self):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="SAME",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride1x2(self):
expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]
self._VerifyValues(
tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideValid(self):
expected_output = [65, 95, 275, 305]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideSame(self):
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 9, 11])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="SAME",
expected=[44, 28, 41, 16])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
padding="VALID",
expected=[50, 60])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID")
@test_util.run_in_graph_and_eager_modes()
def testConv2D0x0Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding=[[0, 0], [0, 0]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[3, 4, 3, 2],
filter_in_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding=[[0, 0], [0, 0]])
@test_util.run_in_graph_and_eager_modes()
def testConv2D1x1Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 2],
filter_in_sizes=[2, 1, 2, 1],
strides=[1, 1],
padding=[[2, 2], [2, 2]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 2],
filter_in_sizes=[1, 1, 2, 1],
strides=[2, 1],
padding=[[2, 2], [2, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DOnlyBottomPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 2],
strides=[1, 1],
padding=[[0, 3], [0, 0]], tol=2e-5)
self._VerifyExplicitPaddings(
tensor_in_sizes=[2, 2, 4, 3],
filter_in_sizes=[1, 2, 3, 2],
strides=[2, 2],
padding=[[0, 3], [0, 0]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DOnlyTopRightPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 2],
strides=[1, 1],
padding=[[1, 0], [0, 2]],
tol=5e-5)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 4, 2],
filter_in_sizes=[2, 2, 2, 2],
strides=[1, 3],
padding=[[1, 0], [0, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DLotsPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 1, 1, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding=[[3, 4], [4, 2]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 1],
filter_in_sizes=[2, 2, 1, 3],
strides=[2, 1],
padding=[[3, 4], [4, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DExplicitPaddingWithDilations(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 3, 2, 1],
filter_in_sizes=[1, 2, 1, 2],
strides=[1, 1],
padding=[[1, 0], [0, 1]],
dilations=[2, 1])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[3, 2, 2, 1],
strides=[1, 1],
padding=[[2, 1], [1, 2]],
dilations=[2, 3])
def testConv2DExplicitPaddingWithLayoutOptimizer(self):
# Test with Grappler's layout optimizer, to ensure the layout optimizer
# handles explicit padding correctly.
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 3, 2, 1],
filter_in_sizes=[1, 2, 1, 2],
strides=[1, 1],
padding=[[1, 0], [0, 1]],
dilations=[2, 1],
test_grappler_layout_optimizer=True)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[3, 2, 2, 1],
strides=[1, 1],
padding=[[2, 1], [1, 2]],
dilations=[2, 3],
test_grappler_layout_optimizer=True)
def _VerifyGroupConvFwd(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype):
"""Verify the output of group convolution is equal to a for-loop implementation.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
"""
tensor_in = self._CreateNumpyTensor(tensor_in_sizes)
filter_in = self._CreateNumpyTensor(filter_in_sizes)
num_groups = tensor_in_sizes[3] // filter_in_sizes[2]
assert num_groups > 1 and \
filter_in_sizes[2] * num_groups == tensor_in_sizes[3]
with test_util.device(True):
t1 = constant_op.constant(tensor_in, dtype=dtype)
t2 = constant_op.constant(filter_in, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
t1_splits = array_ops.split(t1, num_groups, axis=1)
else:
t1_splits = array_ops.split(t1, num_groups, axis=3)
t2_splits = array_ops.split(t2, num_groups, axis=3)
def MakeConv2d(inputs, filters):
return nn_ops.conv2d(
inputs,
filters,
strides,
padding,
dilations=dilations,
data_format=data_format)
group_conv = MakeConv2d(t1, t2)
group_conv_loop = array_ops.concat(
[MakeConv2d(t1s, t2s) for t1s, t2s in zip(t1_splits, t2_splits)],
axis=1 if data_format == "NCHW" else 3)
results = self.evaluate([group_conv, group_conv_loop])
tol_to_use = 1e-5
self.assertAllClose(
results[0], results[1], atol=tol_to_use, rtol=tol_to_use)
@test_util.run_in_graph_and_eager_modes
@test_util.run_cuda_only
def testConv2DGroupConvFwd(self):
for data_format in ["NHWC", "NCHW"]:
for dilation in [1, 2]:
for stride in [1, 2]:
self._VerifyGroupConvFwd([10, 32, 32, 16], [3, 3, 4, 8],
dilations=[dilation, dilation],
strides=[stride, stride],
padding="SAME",
data_format=data_format,
dtype=dtypes.float32)
@test_util.deprecated_graph_mode_only
@test_util.run_cuda_only
def testInputGradientGroupConv(self):
for data_format in ["NCHW", "NHWC"]:
for test_input in [True, False]:
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
num_groups=2,
padding="VALID",
in_depth=4,
out_depth=6,
stride_rows=1,
stride_cols=1,
test_input=test_input,
data_format=data_format,
use_gpu=True,
max_err=0.005)
@test_util.deprecated_graph_mode_only
@test_util.run_cuda_only
def testFilterGradientGroupConv(self):
for data_format in ["NCHW", "NHWC"]:
for test_input in [True, False]:
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
num_groups=2,
padding="VALID",
in_depth=4,
out_depth=6,
stride_rows=1,
stride_cols=1,
test_input=test_input,
data_format=data_format,
use_gpu=True,
max_err=0.005)
# TODO(yzhwang): this currently fails.
# self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1],
# filter_in_sizes=[2, 2, 1, 1],
# strides=[4, 4], padding="SAME",
# expected=[72, 112, 392, 432])
# Testing for backprops
def _RunAndVerifyBackpropInput(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu,
err,
dilations=(1, 1)):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
x1 = self._CreateNumpyTensor(filter_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
with test_util.device(use_gpu):
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if isinstance(padding, (list, tuple)):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
if isinstance(padding, (list, tuple)):
padding = test_util.NHWCToNCHW((padding))
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
# "values" consists of two tensors for two backprops
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(expected, value.flatten(), err)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
if data_format == "NCHW":
new_input_sizes = test_util.NHWCToNCHW(input_sizes)
else:
new_input_sizes = input_sizes
t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropInput(self):
expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropInput(self):
expected_output = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInput(self):
expected_output = [
14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0,
140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0
]
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):
expected_output = [
1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0,
16.0, 15.0, 20.0, 18.0, 24.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropInput(self):
expected_output = [
1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
# Testing for backprops
def _RunAndVerifyBackpropFilter(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu,
dilations=(1, 1),
err=1e-5):
x0 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
explicit_strides = [1] + strides + [1]
new_padding = padding
new_dilations = [1] + dilations + [1]
if isinstance(new_padding, (list, tuple)):
new_padding = [(0, 0)] + new_padding + [(0, 0)]
if data_format == "NCHW":
explicit_strides = test_util.NHWCToNCHW(explicit_strides)
new_dilations = test_util.NHWCToNCHW(new_dilations)
if isinstance(padding, (list, tuple)):
new_padding = test_util.NHWCToNCHW(new_padding)
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=explicit_strides,
padding=new_padding,
dilations=new_dilations,
data_format=data_format)
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(expected, value.flatten(), err)
def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropFilter(self):
expected = [5.0, 8.0, 14.0, 17.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropFilter(self):
expected = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DBackpropFilterWithEmptyInput(self):
expected = [0, 0, 0, 0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilter(self):
expected = [
17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0,
37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0,
117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0,
120.0, 153.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):
expected = [161.0, 182.0, 287.0, 308.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropFilter(self):
expected_output = [78.]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self):
expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
# Testing for backprops
def _RunAndVerifyBackpropInputDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
x1 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(filter_sizes)
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t1)[0]
conv_2 = gradients_impl.gradients(conv_forward_2, t1)[0]
# "values" consists of two tensors for two backprops
value = self.evaluate(conv)
value_2 = self.evaluate(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.debug("expected = %s", value_2)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
# Testing for backprops
def _RunAndVerifyBackpropFilterDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
x1 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(filter_sizes)
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t2)[0]
conv_2 = gradients_impl.gradients(conv_forward, t2)[0]
value = self.evaluate(conv)
value_2 = self.evaluate(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.debug("expected = %s", value_2)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DEmptyBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 4, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DEmptyBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 2, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
@test_util.deprecated_graph_mode_only
def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def _RunAndVerifyBackpropInputExplicitPadding(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
data_format,
use_gpu,
dilations=(1, 1),
err=2e-5):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
if not use_gpu and dilations != (1, 1):
return # Non-default dilations is currently not supported on the CPU.
x1 = self._CreateNumpyTensor(filter_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
padded_input_sizes = input_sizes[:]
padded_input_sizes[1] += padding[0][0] + padding[0][1]
padded_input_sizes[2] += padding[1][0] + padding[1][1]
c = nn_ops.conv2d_backprop_input(
padded_input_sizes,
x1,
x2,
strides=[1] + strides + [1],
padding="VALID",
dilations=[1] + dilations + [1])
c = c[:, padding[0][0]:(c.shape[1] - padding[0][1]), padding[1][0]:(
c.shape[2] - padding[1][1]), :]
expected = list(self.evaluate(array_ops.reshape(c, [-1])))
self._RunAndVerifyBackpropInput(
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu=use_gpu,
err=err,
dilations=dilations)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding0x0BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding=[[0, 0], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 4, 2],
filter_sizes=[2, 2, 2, 3],
output_sizes=[1, 1, 2, 3],
strides=[2, 2],
padding=[[0, 0], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding1x1BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 3, 4, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 2],
filter_sizes=[1, 1, 2, 1],
output_sizes=[1, 4, 3, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 4, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 4, 2, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
dilations=[2, 2], use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding2x2BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[2, 3, 1, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[2, 2, 5, 1],
strides=[3, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 6, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 3, 4, 1],
strides=[1, 2],
padding=[[2, 2], [2, 2]],
data_format=data_format,
dilations=[2, 3],
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_1_8_4_1_BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 10, 8, 1],
strides=[1, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=5e-5)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 5, 3, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 4, 8, 1],
strides=[3, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_5_0_2_2_BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[1, 7, 7, 1],
strides=[1, 1],
padding=[[5, 0], [2, 2]],
data_format=data_format,
err=5e-5,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 4, 2, 1],
filter_sizes=[3, 3, 1, 1],
output_sizes=[1, 5, 2, 1],
strides=[1, 2],
padding=[[5, 0], [2, 2]],
data_format=data_format,
dilations=[2, 1],
use_gpu=use_gpu)
def _RunAndVerifyBackpropFilterExplicitPadding(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
data_format,
use_gpu,
dilations=(1, 1),
err=1e-5):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
if not use_gpu and dilations != (1, 1):
return # Non-default dilations is currently not supported on the CPU.
x0 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
x0 = np.pad(x0, [(0, 0)] + padding + [(0, 0)], "constant")
c = nn_ops.conv2d_backprop_filter(
x0,
filter_sizes,
x2,
strides=[1] + strides + [1],
padding="VALID",
dilations=[1] + dilations + [1])
expected = list(self.evaluate(array_ops.reshape(c, [-1])))
self._RunAndVerifyBackpropFilter(
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu=use_gpu,
dilations=dilations,
err=err)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding0x0BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding=[[0, 0], [0, 0]],
data_format=data_format, use_gpu=use_gpu)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 4, 2],
filter_sizes=[2, 2, 2, 3],
output_sizes=[1, 1, 2, 3],
strides=[2, 2],
padding=[[0, 0], [0, 0]],
data_format=data_format, use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding1x1BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 3, 4, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
err=5e-5)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 2],
filter_sizes=[1, 1, 2, 1],
output_sizes=[1, 4, 3, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
use_gpu=use_gpu,
data_format=data_format)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 4, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 4, 2, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 2])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding2x2BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[2, 3, 1, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[2, 2, 5, 1],
strides=[3, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 6, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 3, 4, 1],
strides=[1, 2],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 3])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_1_8_4_1_BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 10, 8, 1],
strides=[1, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 5, 3, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 4, 8, 1],
strides=[3, 1],
padding=[[1, 8], [4, 2]],
use_gpu=use_gpu,
data_format=data_format)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_5_0_2_2_BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[1, 7, 7, 1],
strides=[1, 1],
padding=[[5, 0], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 4, 2, 1],
filter_sizes=[3, 3, 1, 1],
output_sizes=[1, 5, 2, 1],
strides=[1, 2],
padding=[[5, 0], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 1])
# Gradient checkers
def ConstructAndTestGradient(self,
batch,
input_rows,
input_cols,
filter_rows,
filter_cols,
in_depth,
out_depth,
stride_rows,
stride_cols,
padding,
test_input,
data_format,
use_gpu,
num_groups=1,
max_err=0.002):
assert in_depth % num_groups == 0 and out_depth % num_groups == 0
input_shape = [batch, input_rows, input_cols, in_depth]
filter_shape = [filter_rows, filter_cols, in_depth // num_groups, out_depth]
# TODO(yangke): re-factor the computation of output shape.
if padding == "VALID":
output_rows = (input_rows - filter_rows + stride_rows) // stride_rows
output_cols = (input_cols - filter_cols + stride_cols) // stride_cols
elif padding == "SAME":
output_rows = (input_rows + stride_rows - 1) // stride_rows
output_cols = (input_cols + stride_cols - 1) // stride_cols
else:
self.assertIsInstance(padding, (list, tuple))
output_rows = (input_rows + padding[1][0] + padding[1][1] - filter_rows +
stride_rows) // stride_rows
output_cols = (input_cols + padding[2][0] + padding[2][1] - filter_cols +
stride_cols) // stride_cols
output_shape = [batch, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
# Conv2DGrad functions are not compiled for double due to
# a problem in the way Eigen's Conv2DGrad works for double.
# So we disable the DOUBLE path. We should re-enable this
# when double support returns for CPU and/or GPU.
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=dtype, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=dtype, name="filter")
strides = [1, stride_rows, stride_cols, 1]
new_padding = padding
if data_format == "NCHW":
new_input_tensor = test_util.NHWCToNCHW(input_tensor)
strides = test_util.NHWCToNCHW(strides)
if isinstance(padding, (list, tuple)):
new_padding = test_util.NHWCToNCHW(padding)
else:
new_input_tensor = input_tensor
conv = nn_ops.conv2d(
new_input_tensor,
filter_tensor,
strides,
new_padding,
data_format=data_format,
name="conv")
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
self.assertEqual(output_shape, conv.get_shape())
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor,
input_shape,
conv,
output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor,
filter_shape,
conv,
output_shape)
if dtype == dtypes.float32:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
tf_logging.debug("conv_2d gradient error = %s", err)
self.assertLess(err, max_err)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=3,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient1x1PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.0025)
@test_util.deprecated_graph_mode_only
def testFilterGradient1x1PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient1x1PaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient1x1PaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient2x2PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [2, 2], [2, 2], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.003)
@test_util.deprecated_graph_mode_only
def testFilterGradient2x2PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [2, 2], [2, 2], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.003)
@test_util.deprecated_graph_mode_only
def testInputGradient1_2_3_4PaddingStride3x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=5,
filter_rows=4,
filter_cols=2,
in_depth=3,
out_depth=2,
stride_rows=3,
stride_cols=2,
padding=[[0, 0], [1, 2], [3, 4], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient1_2_3_4PaddingStride3x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=5,
filter_rows=4,
filter_cols=2,
in_depth=3,
out_depth=2,
stride_rows=3,
stride_cols=2,
padding=[[0, 0], [1, 2], [3, 4], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient4_3_2_1PaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=3,
input_rows=5,
input_cols=7,
filter_rows=3,
filter_cols=2,
in_depth=1,
out_depth=2,
stride_rows=2,
stride_cols=1,
padding=[[0, 0], [4, 3], [2, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient4_3_2_1PaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=3,
input_rows=5,
input_cols=7,
filter_rows=3,
filter_cols=2,
in_depth=1,
out_depth=2,
stride_rows=2,
stride_cols=1,
padding=[[0, 0], [4, 3], [2, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient0_0_0_5PaddingStride1x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=6,
input_cols=7,
filter_rows=3,
filter_cols=4,
in_depth=3,
out_depth=2,
stride_rows=1,
stride_cols=2,
padding=[[0, 0], [0, 0], [0, 5], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient0_0_0_5PaddingStride1x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=6,
input_cols=7,
filter_rows=3,
filter_cols=4,
in_depth=3,
out_depth=2,
stride_rows=1,
stride_cols=2,
padding=[[0, 0], [0, 0], [0, 5], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
# Incorrect filter shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
strides=[1, 1, 1, 1],
padding="SAME")
# Depth mismatch.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[4, 4, 2, 2]),
strides=[1, 1, 1, 1],
padding="SAME")
# Input depth divisible by filter depth (group convolution).
# No exceptions should appear.
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 8]),
array_ops.placeholder(dtypes.float32, shape=[4, 4, 2, 16]),
strides=[1, 1, 1, 1],
padding="SAME")
# Negative padding.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, -1], [1, 2], [0, 0]])
# Nonzero padding in nonspatial dimension.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[1, 0], [0, 0], [0, 0], [0, 0]])
# Nonzero NCHW padding in nonspatial dimension.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, 1], [0, 0], [0, 0]],
data_format="NCHW")
# Wrong amount of padding
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, 0], [0, 0]])
# Only specify one padding amount per dimension
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0], [0], [0], [0]])
# Explicit padding elements are not lists
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[0, 0, 0, 0])
@test_util.deprecated_graph_mode_only
@test_util.disable_xla("b/123337890") # Error messages differ
def testOpEdgeCases(self):
with self.cached_session() as sess:
# Illegal strides.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[2, 1, 1, 1],
padding="SAME"))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 2],
padding="SAME"))
# Filter larger than input.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[20, 21, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[21, 20, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
# Filter larger than input + padding.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(dtypes.float32, shape=[24, 25, 3, 2]),
strides=[1, 1, 1, 1],
padding=[[0, 0], [2, 2], [2, 2], [0, 0]]))
# Negative padding during backprop.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"nonnegative"):
sess.run(
nn_ops.conv2d_backprop_input([32, 20, 20, 3],
array_ops.placeholder(
dtypes.float32,
shape=[18, 18, 3, 2]),
array_ops.placeholder(
dtypes.float32,
shape=[32, 3, 2, 2]),
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0],
[0, 0]]))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"nonnegative"):
sess.run(
nn_ops.conv2d_backprop_filter(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
[18, 18, 3, 2],
array_ops.placeholder(dtypes.float32, shape=[32, 3, 2, 2]),
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]))
class DepthwiseConv2DTest(test.TestCase):
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.cached_session() as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = self.evaluate(conv)
tf_logging.debug("value = %s", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
class SeparableConv2DTest(test.TestCase):
def _InitValues(self, sizes):
"""Initializes values for input tensors.
Args:
sizes: Tensor dimensions.
Returns:
Tensor initialized to values.
"""
total_size = 1
for s in sizes:
total_size *= s
x = [f * 0.5 for f in range(1, total_size + 1)]
return constant_op.constant(x, shape=sizes)
def _VerifyValues(self,
tensor_in_sizes,
depthwise_filter_in_sizes,
pointwise_filter_in_sizes,
stride,
padding,
expected,
data_format="NHWC"):
"""Verifies the output values of the separable convolution function.
Args:
tensor_in_sizes: Input tensor dimensions.
depthwise_filter_in_sizes: Depthwise filter tensor dimensions.
pointwise_filter_in_sizes: Pointwise filter tensor dimensions.
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
data_format: string data format for input tensor.
"""
with self.cached_session(use_gpu=True) as sess:
t1 = self._InitValues(tensor_in_sizes)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
real_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
real_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
conv = nn_impl.separable_conv2d(
real_t1,
f1,
f2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = array_ops.transpose(conv, [0, 2, 3, 1])
value = self.evaluate(conv)
tf_logging.debug("value = %s", value)
self.assertArrayNear(expected, np.ravel(value), 1e-3)
self.assertShapeEqual(value, conv)
def _testSeparableConv2D(self, data_format):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7].
# Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2).
expected_output = [
6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5,
8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5,
11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5,
4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5,
15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5,
18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5,
6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5,
19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5,
22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5,
24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5,
10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,
7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,
7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,
2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 7],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
def testSeparableConv2D(self):
self._testSeparableConv2D("NHWC")
def disabledtestSeparableConv2DNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2D("NCHW")
def _testSeparableConv2DEqualInputOutputDepth(self, data_format):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6].
# Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2).
expected_output = [
5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0,
8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0,
10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0,
11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0,
14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0,
17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0,
17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0,
20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0,
24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5,
5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0,
6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5,
1923.75, 2007.0, 2090.25, 2173.5
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 6],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
@test_util.deprecated_graph_mode_only
def testSeparableConv2DEqualInputOutputDepth(self):
self._testSeparableConv2DEqualInputOutputDepth("NHWC")
def testSeparableConv2DEqualInputOutputDepthNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2DEqualInputOutputDepth("NCHW")
class DeepConv2DTest(test.TestCase):
def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that DeepConv2D and Conv2D produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
with self.cached_session(use_gpu=False) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)
os.environ["TF_USE_DEEP_CONV2D"] = "0"
values_expect = self.evaluate([conv])
os.environ["TF_USE_DEEP_CONV2D"] = "1"
values_test = self.evaluate([conv])
self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5)
def _RunTestCases(self, conv_strides, padding):
input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288],
[2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]]
filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384],
[3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]]
for input_shape, filter_shape in zip(input_sizes, filter_sizes):
self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding)
def testConv2D3x3FilterStride1x1Valid(self):
self._RunTestCases([1, 1], "VALID")
def testConv2D3x3FilterStride1x1Same(self):
self._RunTestCases([1, 1], "SAME")
class Conv2DBenchmark(test.Benchmark):
def benchmarkGPUConvStackFirst(self):
# Benchmark the first iteration of a conv-net with many identical conv
# operations.
if not test.is_gpu_available():
return
with ops.Graph().as_default(), session_lib.Session() as session:
batch_size = 1
timesteps = 600
features = 1
inputs = random_ops.random_uniform(
[batch_size, 1, timesteps, features], seed=1234)
num_outputs_list = [512] * 40 + [1]
kernel_w = 3
x = inputs
for num_outputs in num_outputs_list:
x = convolutional.conv2d(x, num_outputs, [1, kernel_w])
outputs = x
variables.global_variables_initializer().run()
num_iterations = 4
for iter_index in xrange(num_iterations):
start = time.time()
session.run(outputs)
wall_time = time.time() - start
self.report_benchmark(
name="conv_stack_iter_%d" % iter_index, wall_time=wall_time)
tf_logging.info("conv_stack_iter_%d: %.4f" % (iter_index, wall_time))
def _bench_op(self, name, op, burn_iters, num_iters):
config = config_pb2.ConfigProto()
# Prevent Grappler from optimizing away the entire graph.
config.graph_options.rewrite_options.dependency_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
with session_lib.Session(config=config) as session:
variables.global_variables_initializer().run()
self.run_op_benchmark(
session, op, burn_iters=burn_iters, min_iters=num_iters, name=name)
def benchmarkExplicitVsManualPadding(self):
"""Compare performance of EXPLICIT padding and calling tf.pad.
A Conv2D op with EXPLICIT padding is benchmarked, and a tf.pad with the same
padding followed by an equivalent Conv2D op is benchmarked.
"""
if not test.is_gpu_available():
return
with ops.Graph().as_default():
burn_iters = 15
num_iters = 300
batch_size = 64
# The input and filter correspond to the first layer of Resnet50.
input = variables.Variable( # pylint: disable=redefined-builtin
random_ops.random_uniform([
batch_size,
3,
224,
224
]))
filter = variables.Variable(random_ops.random_uniform([7, 7, 3, 64])) # pylint: disable=redefined-builtin
strides = [1, 1, 2, 2]
padding = [(0, 0), (0, 0), (3, 3), (3, 3)]
output_explicit_pad = nn_ops.conv2d(
input, filter, strides, padding=padding, data_format="NCHW")
input_padded = array_ops.pad(input, padding)
output_manual_pad = nn_ops.conv2d(
input_padded, filter, strides, padding="VALID", data_format="NCHW")
# Benchmark just the forward pass.
self._bench_op("explicit_pad_forward", output_explicit_pad.op, burn_iters,
num_iters)
self._bench_op("manual_pad_forward", output_manual_pad.op, burn_iters,
num_iters)
# Benchmark both the forward and backwards passes.
input_grad_explicit_pad, filter_grad_explicit_pad = (
gradients_impl.gradients(output_explicit_pad, [input, filter]))
self._bench_op(
"explicit_pad_backward",
control_flow_ops.group(input_grad_explicit_pad,
filter_grad_explicit_pad), burn_iters,
num_iters)
input_grad_manual_pad, filter_grad_manual_pad = gradients_impl.gradients(
output_manual_pad, [input, filter])
self._bench_op(
"manual_pad_backward",
control_flow_ops.group(input_grad_manual_pad, filter_grad_manual_pad),
burn_iters, num_iters)
def benchmarkExplicitVsSamePaddingGraph(self):
"""Compare performance of EXPLICIT and SAME padding in graph mode.
A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op
with explicit padding is benchmarked, where the padding is the same as in
the SAME case. The purpose is to ensure EXPLICIT padding is just as
efficient as the SAME case
"""
if not test.is_gpu_available():
return
with ops.Graph().as_default():
burn_iters = 15
num_convs = 20
num_iters = 50
batch_size = 64
# The input and filter correspond to a middle layer of Resnet50.
input = variables.Variable( # pylint: disable=redefined-builtin
random_ops.random_uniform([
batch_size,
256,
14,
14
]))
filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin
strides = [1, 1, 1, 1]
padding = [(0, 0), (0, 0), (1, 1), (1, 1)]
output_explicit_pad = input
output_same_pad = input
for _ in range(num_convs):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
grad_explicit_pad, = gradients_impl.gradients(output_explicit_pad, filter)
grad_same_pad, = gradients_impl.gradients(output_same_pad, filter)
self._bench_op("graph_explicit_pad", grad_explicit_pad.op, burn_iters,
num_iters)
self._bench_op("graph_same_pad", grad_same_pad.op, burn_iters, num_iters)
def benchmarkExplicitVsSamePaddingEager(self):
"""Compare performance of EXPLICIT and SAME padding in eager mode.
A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op
with explicit padding is benchmarked, where the padding is the same as in
the SAME case. Currently, EXPLICIT padding is slightly slower, due to the
fact the Python padding list must be checked and processed before the Conv2D
op can run.
"""
# TODO(reedwm): Make EXPLICIT padding as fast as SAME padding.
if not test.is_gpu_available():
return
with context.eager_mode():
burn_iters = 15
num_convs = 20
num_iters = 50
batch_size = 64
# The input and filter correspond to a middle layer of Resnet50.
input = variables.Variable( # pylint: disable=redefined-builtin
random_ops.random_uniform([
batch_size,
256,
14,
14
]))
filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin
strides = [1, 1, 1, 1]
padding = [(0, 0), (0, 0), (1, 1), (1, 1)]
output_explicit_pad = input
output_same_pad = input
for _ in range(burn_iters):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
start = time.time()
for _ in range(num_iters):
with backprop.GradientTape() as tape:
for _ in range(num_convs):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
tape.gradient(output_explicit_pad, filter)
end = time.time()
self.report_benchmark(
name="eager_explicit_pad",
wall_time=(end - start) / num_iters,
iters=num_iters)
start = time.time()
for _ in range(num_iters):
with backprop.GradientTape() as tape:
for _ in range(num_convs):
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
tape.gradient(output_same_pad, filter)
end = time.time()
self.report_benchmark(
name="eager_same_pad",
wall_time=(end - start) / num_iters,
iters=num_iters)
def GetInceptionFwdTest(input_size, filter_size, stride, padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionFwd %s", (input_size, filter_size,
stride, padding))
return
tf_logging.info("Testing InceptionFwd %s", (input_size, filter_size, stride,
padding))
self._CompareFwdValues(input_size, filter_size, [stride, stride], padding)
return Test
def GetInceptionFwdDilatedConvTest(input_size, filter_size, stride, padding):
def Test(self):
if stride == 1:
tf_logging.info("Testing InceptionFwd with dilations %s",
(input_size, filter_size, stride, padding))
self._VerifyDilatedConvValues(
tensor_in_sizes=input_size,
filter_in_sizes=filter_size,
strides=[stride, stride],
dilations=[2, 2],
padding=padding,
rtol=5e-4)
return Test
def GetInceptionBackInputTest(input_size, filter_size, output_size, stride,
padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
return
tf_logging.info("Testing InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
self._CompareBackpropInput(input_size, filter_size, output_size,
[stride, stride], padding)
return Test
def GetInceptionBackFilterTest(input_size, filter_size, output_size, strides,
padding, gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
return
tf_logging.info("Testing InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
self._CompareBackFilter(input_size, filter_size, output_size, strides,
padding)
return Test
if __name__ == "__main__":
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(GetShrunkInceptionShapes()):
setattr(Conv2DTest, "testInceptionFwd_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(input_size_, filter_size_, stride_,
padding_)))
setattr(
Conv2DTest, "testInceptionFwdDilatedConv_" + str(index),
test_util.run_in_graph_and_eager_modes(GetInceptionFwdDilatedConvTest(
input_size_, filter_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackInput_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(input_size_, filter_size_,
output_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackFilter_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(input_size_, filter_size_,
output_size_, [stride_, stride_],
padding_)))
# TODO(b/35359731)
# Fwd, BckInput, and BackFilter to test that for certain input parameter
# set, winograd nonfused algorithm will be excluded from conv autotune. If
# in such case, winograd nonfused algorithm is added as one option of the
# conv autotune, and cuDNN version is smaller than 7, the following tests
# will fail.
ishape = [1, 400, 400, 1]
fshape = [1, 1, 1, 256]
oshape = [1, 400, 400, 256]
setattr(Conv2DTest, "testInceptionFwd_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True)))
setattr(Conv2DTest, "testInceptionFwdDilatedConv_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdDilatedConvTest(ishape, fshape, 1, "SAME")))
setattr(Conv2DTest, "testInceptionBackInput_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(ishape, fshape, oshape, 1, "SAME",
gpu_only=True)))
setattr(Conv2DTest, "testInceptionBackFilter_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(ishape, fshape, oshape, [1, 1], "SAME",
gpu_only=True)))
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/conv_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reshape_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class ReshapeTest(test.TestCase):
def _testReshape(self, x, y, use_gpu=False):
with self.cached_session(use_gpu=use_gpu):
np_ans = x.reshape(y)
tf_ans = array_ops.reshape(x, y)
out = self.evaluate(tf_ans)
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertShapeEqual(np_ans, tf_ans)
# Repeat with an int64 shape tensor.
y64 = constant_op.constant(y, dtype=dtypes.int64)
tf_ans = array_ops.reshape(x, y64)
out = self.evaluate(tf_ans)
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertShapeEqual(np_ans, tf_ans)
def _testZeroDimReshape(self, x, shape, expected, use_gpu=False):
with self.cached_session(use_gpu=use_gpu):
y = array_ops.reshape(x, shape)
out = self.evaluate(y)
self.assertEqual(expected, out.shape)
# Repeat with an int64 shape tensor.
shape64 = constant_op.constant(shape, dtype=dtypes.int64)
y = array_ops.reshape(x, shape64)
out = self.evaluate(y)
self.assertEqual(expected, out.shape)
def _testBothReshape(self, x, y):
self._testReshape(x, y, False)
self._testReshape(x, y, True)
def testBoolBasic(self):
x = np.arange(1., 7.).reshape([1, 6]) > 3
self._testBothReshape(x, [2, 3])
def testFloatBasic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float32)
self._testBothReshape(x, [2, 3])
def testDoubleBasic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float64)
self._testBothReshape(x, [2, 3])
def testInt32Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.int32)
self._testBothReshape(x, [2, 3])
def testComplex64Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex64)
self._testBothReshape(x, [2, 3])
def testComplex128Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex128)
self._testBothReshape(x, [2, 3])
def testFloatReshapeThreeDimensions(self):
x = np.arange(1., 28.).reshape([1, 27]).astype(np.float32)
self._testBothReshape(x, [3, 3, 3])
def testFloatUnspecifiedDimOnly(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [-1])
def testFloatUnspecifiedDimBegin(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [-1, 2])
def testFloatUnspecifiedDimEnd(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [3, -1])
def testZeroDimBasic(self):
x = np.zeros([0, 6]).astype(np.float32)
self._testBothReshape(x, [0, 2, 3])
def testZeroDimReshapeR1(self):
x = np.zeros([0, 6]).astype(np.float32)
self._testBothReshape(x, [-1])
def testZeroDimReshapeR3(self):
x = np.zeros([0, 6]).astype(np.float32)
self._testBothReshape(x, [-1, 2, 3])
# TODO(vrv): Add tests for failure conditions once python test_util
# reports errors.
@test_util.run_deprecated_v1
def testFloatReshapeGradThreeDimensions(self):
x = np.arange(1., 25.).reshape([2, 3, 4]).astype(np.float32)
s = list(np.shape(x))
with self.cached_session():
input_tensor = constant_op.constant(x)
reshape_out = array_ops.reshape(input_tensor, [1, 8, 3])
err = gradient_checker.compute_gradient_error(
input_tensor, s, reshape_out, s, x_init_value=x)
print("Reshape gradient error = " % err)
self.assertLess(err, 1e-3)
def testFloatEmpty(self):
x = np.empty((0, 0, 0, 0), dtype=np.float32)
self._testBothReshape(x, [1, 2, 3, 0])
self._testBothReshape(x, [1, 0, 0, 4])
self._testBothReshape(x, [0, 0, 0, 0])
self._testBothReshape(x, [1, 2, 0])
self._testBothReshape(x, [0, 0, 0])
self._testBothReshape(x, [1, -1, 5])
def testZeroDimWithUnspecifiedDim(self):
for use_gpu in (True, False):
self._testZeroDimReshape(x=np.zeros([0, 6]).astype(np.float32),
shape=[0, -1, 3],
expected=(0, 2, 3),
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testErrors(self):
y = constant_op.constant(0.0, shape=[23, 29, 31])
with self.assertRaisesRegexp(ValueError, "must be evenly divisible by 17"):
array_ops.reshape(y, [17, -1])
z = constant_op.constant(0.0, shape=[32, 128])
with self.assertRaisesRegexp(ValueError,
"Cannot reshape a tensor with 4096 elements"):
array_ops.reshape(z, [4095])
@test_util.run_deprecated_v1
def testPartialShapes(self):
x = array_ops.placeholder(dtypes.float32)
# Unknown input shape, partial new shape.
y = array_ops.reshape(x, [1, 1, -1, 1])
self.assertEqual([1, 1, None, 1], y.get_shape().as_list())
# Unknown input shape, unknown new shape.
y = array_ops.reshape(x, array_ops.placeholder(dtypes.int32))
self.assertEqual(None, y.get_shape().ndims)
# Unknown input shape, known rank for new shape.
y = array_ops.reshape(x, array_ops.placeholder(dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.stack()`.
y = array_ops.reshape(x, [array_ops.placeholder(dtypes.int32), 37])
self.assertEqual([None, 37], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.concat()`.
y = array_ops.reshape(
x,
array_ops.concat(
[array_ops.placeholder(
dtypes.int32, shape=(2,)), [37, 42]], 0))
self.assertEqual([None, None, 37, 42], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.shape()`.
y = array_ops.reshape(
x,
array_ops.shape(
array_ops.placeholder(
dtypes.float32, shape=[None, 37, None])))
self.assertEqual([None, 37, None], y.get_shape().as_list())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/reshape_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg.linalg_impl.matrix_exponential."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
def np_expm(x): # pylint: disable=invalid-name
"""Slow but accurate Taylor series matrix exponential."""
y = np.zeros(x.shape, dtype=x.dtype)
xn = np.eye(x.shape[0], dtype=x.dtype)
for n in range(40):
if n > 0:
xn /= float(n)
y += xn
xn = np.dot(xn, x)
return y
class ExponentialOpTest(test.TestCase):
def _verifyExponential(self, x, np_type):
inp = x.astype(np_type)
with test_util.use_gpu():
tf_ans = linalg_impl.matrix_exponential(inp)
if x.size == 0:
np_ans = np.empty(x.shape, dtype=np_type)
else:
if x.ndim > 2:
np_ans = np.zeros(inp.shape, dtype=np_type)
for i in itertools.product(*[range(x) for x in inp.shape[:-2]]):
np_ans[i] = np_expm(inp[i])
else:
np_ans = np_expm(inp)
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-3)
def _verifyExponentialReal(self, x):
for np_type in [np.float32, np.float64]:
self._verifyExponential(x, np_type)
def _verifyExponentialComplex(self, x):
for np_type in [np.complex64, np.complex128]:
self._verifyExponential(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0),
np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
def testNonsymmetricReal(self):
# 2x2 matrices
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
self._verifyExponentialReal(matrix1)
self._verifyExponentialReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyExponentialReal(self._makeBatch(matrix1, matrix2))
@test_util.run_deprecated_v1
def testNonsymmetricComplex(self):
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyExponentialComplex(matrix1)
self._verifyExponentialComplex(matrix2)
# Complex batch
self._verifyExponentialComplex(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefiniteReal(self):
# 2x2 matrices
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._verifyExponentialReal(matrix1)
self._verifyExponentialReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyExponentialReal(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefiniteComplex(self):
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyExponentialComplex(matrix1)
self._verifyExponentialComplex(matrix2)
# Complex batch
self._verifyExponentialComplex(self._makeBatch(matrix1, matrix2))
@test_util.run_deprecated_v1
def testNonSquareMatrix(self):
# When the exponential of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
linalg_impl.matrix_exponential(np.array([[1., 2., 3.], [3., 4., 5.]]))
@test_util.run_deprecated_v1
def testWrongDimensions(self):
# The input to the exponential should be at least a 2-dimensional tensor.
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_impl.matrix_exponential(tensor3)
def testEmpty(self):
self._verifyExponentialReal(np.empty([0, 2, 2]))
self._verifyExponentialReal(np.empty([2, 0, 0]))
@test_util.run_deprecated_v1
def testDynamic(self):
with self.session(use_gpu=True) as sess:
inp = array_ops.placeholder(ops.dtypes.float32)
expm = linalg_impl.matrix_exponential(inp)
matrix = np.array([[1., 2.], [3., 4.]])
sess.run(expm, feed_dict={inp: matrix})
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
expm1 = linalg_impl.matrix_exponential(matrix1)
expm2 = linalg_impl.matrix_exponential(matrix2)
expm = self.evaluate([expm1, expm2])
self.assertAllEqual(expm[0], expm[1])
class MatrixExponentialBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))
def benchmarkMatrixExponentialOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
expm = linalg_impl.matrix_exponential(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(expm),
min_iters=25,
name="matrix_exponential_cpu_{shape}".format(
shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix = self._GenerateMatrix(shape)
expm = linalg_impl.matrix_exponential(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(expm),
min_iters=25,
name="matrix_exponential_gpu_{shape}".format(
shape=shape))
def _TestRandomSmall(dtype, batch_dims, size):
def Test(self):
np.random.seed(42)
shape = batch_dims + (size, size)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=shape).astype(dtype)
self._verifyExponentialReal(matrix)
return Test
def _TestL1Norms(dtype, shape, scale):
def Test(self):
np.random.seed(42)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape)).reshape(shape).astype(dtype)
print(dtype, shape, scale, matrix)
l1_norm = np.max(np.sum(np.abs(matrix), axis=matrix.ndim-2))
matrix /= l1_norm
self._verifyExponentialReal(scale * matrix)
return Test
if __name__ == "__main__":
for dtype_ in [np.float32, np.float64, np.complex64, np.complex128]:
for batch_ in [(), (2,), (2, 2)]:
for size_ in [4, 7]:
name = "%s_%d_%d" % (dtype_.__name__, len(batch_), size_)
setattr(ExponentialOpTest, "testL1Norms_" + name,
_TestRandomSmall(dtype_, batch_, size_))
for shape_ in [(3, 3), (2, 3, 3)]:
for dtype_ in [np.float32, np.complex64]:
for scale_ in [0.1, 1.5, 5.0, 20.0]:
name = "%s_%d_%d" % (dtype_.__name__, len(shape_), int(scale_*10))
setattr(ExponentialOpTest, "testL1Norms_" + name,
_TestL1Norms(dtype_, shape_, scale_))
for dtype_ in [np.float64, np.complex128]:
for scale_ in [0.01, 0.2, 0.5, 1.5, 6.0, 25.0]:
name = "%s_%d_%d" % (dtype_.__name__, len(shape_), int(scale_*100))
setattr(ExponentialOpTest, "testL1Norms_" + name,
_TestL1Norms(dtype_, shape_, scale_))
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/matrix_exponential_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
# TODO(yangzihao): Currently matmul autotuning is disabled by default. Use
# os.environ["TF_MATMUL_AUTOTUNE_ENABLE"] = "1" to enable it.
class MatVecTest(test_lib.TestCase):
"""Simple test for matvec, which is sugar on top of matmul."""
def testTwoByTwoCase(self):
a = np.array([[1, 2], [3, 4]])
b = np.array([5, 6])
c = math_ops.matvec(a, b)
self.assertAllEqual((2,), c.shape)
self.assertAllEqual([5 + 2 * 6, 3 * 5 + 4 * 6], c)
def _AddTest(test, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, test_util.deprecated_graph_mode_only(fn))
def _GetTransposedMatrices(x, x_name, kwargs):
if kwargs["transpose_" + x_name] is True:
return x.T
elif kwargs["adjoint_" + x_name] is True:
return np.conj(x.T)
else:
return x
class MatMulTest(test_lib.TestCase):
pass # Filled in below
def _GetMatMulTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
np_val = np.matrix(a_np_) * np.matrix(b_np_)
use_gpu = True
if a_np_.dtype is np.float16 and (
not test_util.GpuSupportsHalfMatMulAndConv()):
use_gpu = False
print("Built without fp16 matmul support for Cuda, running test on CPU.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
with self.cached_session() as sess, test_util.device(use_gpu):
if use_static_shape_:
a = constant_op.constant(effective_a_np)
b = constant_op.constant(effective_b_np)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = self.evaluate(res)
else:
a = array_ops.placeholder(a_np_.dtype)
b = array_ops.placeholder(b_np_.dtype)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = sess.run(res, feed_dict={a: effective_a_np, b: effective_b_np})
self.assertAllCloseAccordingToType(
tf_val,
np_val,
float_rtol=3e-5,
float_atol=3e-5,
half_rtol=0.2,
half_atol=0.2)
return Test
class MatMulGradientTest(test_lib.TestCase):
pass # Will be filled in below.
def _GetMatMulGradientTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
if not use_static_shape_ or a_np_.dtype in (np.int32, np.int64, np.float16):
self.skipTest("Skipping infeasible gradient test.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
epsilon = np.finfo(a_np_.dtype).eps
delta = epsilon**(1.0 / 3.0)
tol = 20 * delta
with self.session():
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(x, effective_b_np, **kwargs_),
[effective_a_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(effective_a_np, x, **kwargs_),
[effective_b_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
return Test
class MatMulStatsTest(test_lib.TestCase):
@test_util.run_v1_only("Test requires a Graph and NodeDef inspection")
def testSimpleStatistics(self):
a = variables.Variable(random_ops.random_normal([25, 16]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b)
g = ops.get_default_graph()
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
@test_util.run_v1_only("Test requires a Graph and NodeDef inspection")
def testTransposedStatistics(self):
a = variables.Variable(random_ops.random_normal([16, 25]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b, transpose_a=True)
g = ops.get_default_graph()
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
try:
# @ operator supported since python 3.5.
infix_matmul = operator.matmul
except AttributeError:
# For earlier versions of python, emulate regular behavior.
# Useful to build and test for 3.5+ on earlier versions.
def infix_matmul(x, y): # pylint: disable=invalid-name
try:
r = type(x).__matmul__(x, y)
except AttributeError:
r = NotImplemented
if r is NotImplemented and type(x) is not type(y):
try:
r = type(y).__rmatmul__(y, x)
except AttributeError:
r = NotImplemented
if r is NotImplemented:
raise TypeError("unsupported operand type(s) for @: '{}' and '{}'"
.format(type(x).__name__, type(y).__name__))
return r
class MatMulInfixOperatorTest(test_lib.TestCase):
def testMismatchedShape(self):
with self.assertRaisesRegexp(
Exception, "(Shape must be rank 2 but is rank 1|is not a matrix)"):
infix_matmul(
ops.convert_to_tensor([10.0, 20.0, 30.0]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testMismatchedDimensions(self):
with self.assertRaisesRegexp(
Exception, "(Dimensions must be equal|Matrix size-incompatible)"):
infix_matmul(
ops.convert_to_tensor([[10.0, 20.0, 30.0]]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
@test_util.run_v1_only("Tensor.op is generally not applicable in TF 2")
def testInfixMatmulIsTfMatmul(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
self.assertEqual(c.op.type, "MatMul")
def testInfixMatmulDoesDotProduct(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
d = math_ops.matmul(a, b)
self.assertAllEqual(c, d)
if __name__ == "__main__":
sizes = [1, 3, 5]
trans_options = [[False, False], [True, False], [False, True]]
# TF2 does not support placeholders under eager so we skip it
for use_static_shape in set([True, tf2.enabled()]):
for dtype in (np.int32, np.int64, np.float16, np.float32, np.float64,
np.complex64, np.complex128):
if not use_static_shape and (dtype == np.int32 or dtype == np.int64):
# TODO(rmlarsen): Re-enable this test when we have fixed the underlying
# bug in Windows (b/35935459).
continue
for m in sizes:
for n in sizes:
for k in sizes:
# Construct compatible random matrices a_np of size [m, k] and b_np
# of size [k, n].
a_np = np.random.normal(-5, 5, m * k).astype(dtype).reshape([m, k])
if dtype in (np.complex64, np.complex128):
a_np.imag = np.random.normal(-5, 5,
m * k).astype(dtype).reshape([m, k])
b_np = np.random.normal(-5, 5, k * n).astype(dtype).reshape([k, n])
if dtype in (np.complex64, np.complex128):
b_np.imag = np.random.normal(-5, 5,
k * n).astype(dtype).reshape([k, n])
for adjoint_a, transpose_a in trans_options:
for adjoint_b, transpose_b in trans_options:
name = "%s_%s_%s_%s_%s_%s_%s_%s_%s" % (
use_static_shape, dtype.__name__, m, n, k, adjoint_a,
transpose_a, adjoint_b, transpose_b)
_AddTest(MatMulTest, "MatMulTest", name,
_GetMatMulTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
_AddTest(MatMulGradientTest, "MatMulGradientTest", name,
_GetMatMulGradientTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
test_lib.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/matmul_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.session_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import session_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class SessionOpsTest(test.TestCase):
@test_util.run_deprecated_v1
def testHandleBasic(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = self.evaluate(h)
# Feed a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
y = math_ops.multiply(x, 10)
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
@test_util.run_deprecated_v1
def testHandleEval(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = self.evaluate(h)
# Get the tensor from its handle.
self.assertEqual(50, h.eval())
@test_util.run_deprecated_v1
def testHandleAndValue(self):
with self.cached_session() as sess:
# Return a handle and a value.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
v = math_ops.multiply(a, c)
h, v = self.evaluate([h, v])
self.assertEqual(50, h.eval())
self.assertEqual(500, v)
@test_util.run_deprecated_v1
def testHandleCond(self):
with self.cached_session() as sess:
# Return a handle and a value
a = constant_op.constant(10)
b = constant_op.constant(5)
p = math_ops.less(a, b)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
p, h = self.evaluate([p, h])
# Run by feeding a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
if p:
y = math_ops.multiply(x, 10)
else:
y = math_ops.multiply(x, 100)
result = sess.run(y, feed_dict={f: h.handle})
self.assertEqual(5000, result)
@test_util.run_deprecated_v1
def testHandleForLoop(self):
with self.cached_session() as sess:
# Initialize a handle.
a = constant_op.constant(0)
h = session_ops.get_session_handle(a)
h = self.evaluate(h)
# Do some computation.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
# Must define the loop body outside the loop.
h_x = session_ops.get_session_handle(math_ops.add(x, 1))
for _ in range(100):
# This exercises garbage collection.
h = sess.run(h_x, feed_dict={f: h.handle})
self.assertEqual(100, h.eval())
@test_util.run_deprecated_v1
def testHandleWhileLoop(self):
with self.cached_session() as sess:
# Initialize a handle.
a = constant_op.constant(0)
h = session_ops.get_session_handle(a)
h = self.evaluate(h)
# Do some computation.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
b = constant_op.constant(100)
p = math_ops.less(x, b)
# Must define the loop body outside the loop.
h_x = session_ops.get_session_handle(math_ops.add(x, 1))
while True:
rp, h = sess.run([p, h_x], feed_dict={f: h.handle})
if not rp:
break
self.assertEqual(101, h.eval())
@test_util.run_deprecated_v1
def testHandleMover(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = self.evaluate(h)
# Feed a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
y = math_ops.multiply(x, 10)
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
# Feed another tensor handle.
with ops.device(test.gpu_device_name()):
a = constant_op.constant(10)
h = session_ops.get_session_handle(a)
h = self.evaluate(h)
self.assertEqual(100, sess.run(y, feed_dict={f: h.handle}))
@test_util.run_deprecated_v1
def testHandleDelete(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
self.evaluate(h).delete()
@test_util.run_deprecated_v1
def testHandleDeleteRaw(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = self.evaluate(h)
# Delete using a raw tensor handle.
raw_h = h.get_raw_handle()
f, x = session_ops.delete_session_tensor(raw_h)
sess.run(x, feed_dict={f: raw_h})
@test_util.run_deprecated_v1
def testMultiDevices(self):
with self.cached_session() as sess:
with ops.device(test.gpu_device_name()):
a = constant_op.constant(1.0)
a_handle = self.evaluate(session_ops.get_session_handle(a))
with ops.device("/cpu:0"):
b = constant_op.constant(2.0)
b_handle = self.evaluate(session_ops.get_session_handle(b))
a_p, a_t = session_ops.get_session_tensor(a_handle.handle, dtypes.float32)
b_p, b_t = session_ops.get_session_tensor(b_handle.handle, dtypes.float32)
c = math_ops.add(a_t, b_t)
c_handle = sess.run(
session_ops.get_session_handle(c),
feed_dict={a_p: a_handle.handle,
b_p: b_handle.handle})
self.assertEqual(3.0, c_handle.eval())
@test_util.run_deprecated_v1
def testHandleGC(self):
with self.cached_session() as sess:
# initial values live on CPU
with ops.device("/cpu:0"):
one = constant_op.constant(1, dtype=dtypes.float32)
one_handle = self.evaluate(session_ops.get_session_handle(one))
x_handle = self.evaluate(session_ops.get_session_handle(one))
# addition lives on GPU
with ops.device(test.gpu_device_name()):
add_h1, add_t1 = session_ops.get_session_tensor(one_handle.handle,
dtypes.float32)
add_h2, add_t2 = session_ops.get_session_tensor(x_handle.handle,
dtypes.float32)
add_op = math_ops.add(add_t1, add_t2)
add_output = session_ops.get_session_handle(add_op)
# add 1 to tensor 20 times
for _ in range(20):
x_handle = sess.run(
add_output,
feed_dict={add_h1: one_handle.handle,
add_h2: x_handle.handle})
@test_util.run_deprecated_v1
def testHandlePlacement(self):
with self.cached_session() as sess:
a = constant_op.constant(1.0)
a_handle_op = session_ops.get_session_handle(a)
b = constant_op.constant(2.0)
b_handle_op = session_ops.get_session_handle(b)
a_handle = self.evaluate(a_handle_op)
b_handle = self.evaluate(b_handle_op)
a_p, a_t = session_ops.get_session_tensor(a_handle.handle, dtypes.float32)
b_p, b_t = session_ops.get_session_tensor(b_handle.handle, dtypes.float32)
c = math_ops.add(a_t, b_t)
c_handle = sess.run(
session_ops.get_session_handle(c),
feed_dict={a_p: a_handle.handle,
b_p: b_handle.handle})
self.assertEqual(3.0, c_handle.eval())
@test_util.run_deprecated_v1
def testFeedOneHandleDirectly(self):
with self.cached_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
d = math_ops.multiply(c, c)
h_c = self.evaluate(session_ops.get_session_handle(c))
self.assertAllClose(2500.0, sess.run(d, feed_dict={c: h_c}))
@test_util.run_deprecated_v1
def testDirectHandleFeedOverlappingWithFetches(self):
with self.cached_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
h_c = self.evaluate(session_ops.get_session_handle(c))
d = array_ops.identity(c)
c_val = sess.run(c, feed_dict={c: h_c})
self.assertAllClose(50.0, c_val)
d_val = sess.run(d, feed_dict={c: h_c})
self.assertAllClose(50.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: h_c, d: 60.0})
self.assertAllClose(50.0, c_val)
self.assertAllClose(60.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: 60.0, d: h_c})
self.assertAllClose(60.0, c_val)
self.assertAllClose(50.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: h_c, d: h_c})
self.assertAllClose(50.0, c_val)
self.assertAllClose(50.0, d_val)
@test_util.run_deprecated_v1
def testFeedTwoHandlesDirectly(self):
with self.cached_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
d = math_ops.div(a, b)
e = math_ops.subtract(c, d)
h_c = self.evaluate(session_ops.get_session_handle(c))
h_d = self.evaluate(session_ops.get_session_handle(d))
self.assertAllClose(48.0, sess.run(e, feed_dict={c: h_c, d: h_d}))
self.assertAllClose(-48.0, sess.run(e, feed_dict={c: h_d, d: h_c}))
@test_util.run_deprecated_v1
def testFeedHandleToVariableDirectly(self):
with self.cached_session() as sess:
a = variables.Variable(12.0)
inc_a = state_ops.assign_add(a, 2.0)
b = math_ops.add(a, 5.0)
self.evaluate(a.initializer)
h_a_read = sess.run(session_ops.get_session_handle(a.read_value()))
self.assertAllClose(12.0, self.evaluate(a))
self.assertAllClose(17.0, sess.run(b, feed_dict={a: h_a_read}))
self.evaluate(inc_a)
self.assertAllClose(19.0, sess.run(b, feed_dict={a: h_a_read}))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/session_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for DepthToSpace op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class DepthToSpaceTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
input_nhwc = math_ops.cast(inputs, dtype)
with self.cached_session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
if test.is_gpu_available():
with self.cached_session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# test NCHW on GPU
input_nchw = test_util.NHWCToNCHW(input_nhwc)
output_nchw = array_ops.depth_to_space(
input_nchw, block_size, data_format="NCHW")
output_nhwc = test_util.NCHWToNHWC(output_nchw)
self.assertAllEqual(output_nhwc.eval(), outputs)
@test_util.run_deprecated_v1
def testBasic(self):
x_np = [[[[1, 2, 3, 4]]]]
block_size = 2
x_out = [[[[1], [2]], [[3], [4]]]]
self._testOne(x_np, block_size, x_out)
@test_util.run_deprecated_v1
def testBasicFloat16(self):
x_np = [[[[1, 2, 3, 4]]]]
block_size = 2
x_out = [[[[1], [2]], [[3], [4]]]]
self._testOne(x_np, block_size, x_out, dtype=dtypes.float16)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
@test_util.run_deprecated_v1
def testBlockSize2(self):
x_np = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
block_size = 2
x_out = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
self._testOne(x_np, block_size, x_out)
@test_util.run_deprecated_v1
def testBlockSize2Batch10(self):
block_size = 2
def batch_input_elt(i):
return [[[1 * i, 2 * i, 3 * i, 4 * i],
[5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
def batch_output_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
self._testOne(x_np, block_size, x_out)
def testBatchSize0(self):
block_size = 2
batch_size = 0
input_nhwc = array_ops.ones([batch_size, 2, 3, 12])
x_out = array_ops.ones([batch_size, 4, 6, 3])
with self.cached_session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
self.evaluate(x_tf)
if test.is_gpu_available():
with self.cached_session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
self.evaluate(x_tf)
# Tests for different width and height.
@test_util.run_deprecated_v1
def testNonSquare(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]],
[[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]],
[[5, 50], [6, 60]],
[[7, 70], [8, 80]],
[[9, 90], [10, 100]],
[[11, 110], [12, 120]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
@test_util.run_deprecated_v1
def testBlockSize4FlatInput(self):
x_np = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
block_size = 4
x_out = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
@test_util.run_deprecated_v1
def testDepthInterleaved(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
@test_util.run_deprecated_v1
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
@test_util.run_deprecated_v1
def testDepthInterleavedLarger(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40],
[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
self._testOne(x_np, block_size, x_out)
# Error handling:
# Tests for a block larger for the depth. In this case should raise an
# exception.
@test_util.run_deprecated_v1
def testBlockSizeTooLarge(self):
x_np = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
block_size = 4
# Raise an exception, since th depth is only 4 and needs to be
# divisible by 16.
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
self.evaluate(out_tf)
# Test when the block size is 0.
@test_util.run_deprecated_v1
def testBlockSize0(self):
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
self.evaluate(out_tf)
# Test when the block size is 1. The block size should be > 1.
@test_util.run_deprecated_v1
def testBlockSizeOne(self):
x_np = [[[[1, 1, 1, 1],
[2, 2, 2, 2]],
[[3, 3, 3, 3],
[4, 4, 4, 4]]]]
block_size = 1
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
self.evaluate(out_tf)
@test_util.run_deprecated_v1
def testBlockSizeLargerThanInput(self):
# The block size is too large for this input.
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 10
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
self.evaluate(out_tf)
@test_util.run_deprecated_v1
def testBlockSizeNotDivisibleDepth(self):
# The depth is not divisible by the square of the block size.
x_np = [[[[1, 1, 1, 1],
[2, 2, 2, 2]],
[[3, 3, 3, 3],
[4, 4, 4, 4]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
@test_util.run_deprecated_v1
def testUnknownShape(self):
t = array_ops.depth_to_space(
array_ops.placeholder(dtypes.float32), block_size=4)
self.assertEqual(4, t.get_shape().ndims)
def depthToSpaceUsingTranspose(self, tensor, block_size, data_format):
block_size_sq = block_size * block_size
if data_format == "NHWC":
b, ih, iw, ic = tensor.shape.as_list()
assert ic % block_size_sq == 0, (ic, block_size_sq)
ow, oh, oc = iw * block_size, ih * block_size, ic // block_size_sq
tensor = array_ops.reshape(tensor,
[b, ih, iw, block_size, block_size, oc])
tensor = array_ops.transpose(tensor, [0, 1, 3, 2, 4, 5])
tensor = array_ops.reshape(tensor, [b, oh, ow, oc])
elif data_format == "NCHW":
b, ic, ih, iw = tensor.shape.as_list()
assert ic % block_size_sq == 0, (ic, block_size_sq)
ow, oh, oc = iw * block_size, ih * block_size, ic // block_size_sq
tensor = array_ops.reshape(tensor,
[b, block_size, block_size, oc, ih, iw])
tensor = array_ops.transpose(tensor, [0, 3, 4, 1, 5, 2])
tensor = array_ops.reshape(tensor, [b, oc, oh, ow])
return tensor
def compareToTranspose(self, batch_size, in_height, in_width, out_channels,
block_size, data_format, use_gpu):
in_channels = out_channels * block_size * block_size
nhwc_input_shape = [batch_size, in_height, in_width, in_channels]
nchw_input_shape = [batch_size, in_channels, in_height, in_width]
total_size = np.prod(nhwc_input_shape)
if data_format == "NCHW_VECT_C":
# Initialize the input tensor with qint8 values that circle -127..127.
x = [((f + 128) % 255) - 127 for f in range(total_size)]
t = constant_op.constant(x, shape=nhwc_input_shape, dtype=dtypes.float32)
expected = self.depthToSpaceUsingTranspose(t, block_size, "NHWC")
t = test_util.NHWCToNCHW_VECT_C(t)
t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
t = array_ops.depth_to_space(t, block_size, data_format="NCHW_VECT_C")
t = gen_array_ops.dequantize(t, -128, 127)
actual = test_util.NCHW_VECT_CToNHWC(t)
else:
# Initialize the input tensor with ascending whole numbers as floats.
x = [f * 1.0 for f in range(total_size)]
shape = nchw_input_shape if data_format == "NCHW" else nhwc_input_shape
t = constant_op.constant(x, shape=shape, dtype=dtypes.float32)
expected = self.depthToSpaceUsingTranspose(t, block_size, data_format)
actual = array_ops.depth_to_space(t, block_size, data_format=data_format)
with self.session(use_gpu=use_gpu) as sess:
actual_vals, expected_vals = self.evaluate([actual, expected])
self.assertTrue(np.array_equal(actual_vals, expected_vals))
def testAgainstTranspose(self):
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", False)
self.compareToTranspose(3, 2, 3, 2, 2, "NHWC", False)
self.compareToTranspose(1, 2, 3, 2, 3, "NHWC", False)
if not test.is_gpu_available():
tf_logging.info("skipping gpu tests since gpu not available")
return
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", True)
self.compareToTranspose(3, 2, 3, 2, 2, "NHWC", True)
self.compareToTranspose(3, 2, 3, 1, 2, "NCHW", True)
self.compareToTranspose(3, 2, 3, 2, 2, "NCHW", True)
self.compareToTranspose(3, 2, 3, 1, 3, "NCHW", True)
self.compareToTranspose(3, 2, 3, 2, 3, "NCHW", True)
self.compareToTranspose(5, 7, 11, 3, 2, "NCHW", True)
self.compareToTranspose(3, 200, 300, 32, 2, "NCHW", True)
self.compareToTranspose(3, 2, 3, 8, 2, "NCHW_VECT_C", True)
self.compareToTranspose(3, 2, 3, 4, 3, "NCHW_VECT_C", True)
self.compareToTranspose(3, 2, 3, 8, 3, "NCHW_VECT_C", True)
self.compareToTranspose(5, 7, 11, 12, 2, "NCHW_VECT_C", True)
self.compareToTranspose(3, 200, 300, 32, 2, "NCHW_VECT_C", True)
class DepthToSpaceGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size, data_format):
# NCHW is implemented for only GPU.
if data_format == "NCHW" and not test.is_gpu_available():
return
assert 4 == x.ndim
with self.cached_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.depth_to_space(tf_x, block_size, data_format=data_format)
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for depth_to_space of x which is a four dimensional
# tensor of shape [b, h, w, d * block_size * block_size].
def _compare(self, b, h, w, d, block_size, data_format):
block_size_sq = block_size * block_size
data = np.random.normal(0, 1, b * h * w * d * block_size_sq).astype(
np.float32)
if data_format == "NHWC":
x = data.reshape([b, h, w, d * block_size_sq])
else:
x = data.reshape([b, d * block_size_sq, h, w])
self._checkGrad(x, block_size, data_format)
# Don't use very large numbers as dimensions here, as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
block_size = 2
self._compare(3, 2, 5, 3, block_size, "NHWC")
self._compare(3, 2, 5, 3, block_size, "NCHW")
@test_util.run_deprecated_v1
def testSmall2(self):
block_size = 3
self._compare(1, 2, 3, 2, block_size, "NHWC")
self._compare(1, 2, 3, 2, block_size, "NCHW")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/depthtospace_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.BatchMatMul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
def GetRandomNormalInput(shape, dtype):
# float16 has limited range so we reduce the variance of the scalars.
scale = 10.0 if dtype != np.float16 else 0.1
loc = -10.0 if dtype != np.float16 else 0.1
vals = np.array(np.random.normal(loc, scale, np.prod(shape)), dtype=dtype)
if dtype in (np.complex64, np.complex128):
imag = np.array(np.random.normal(loc, scale, np.prod(shape)), dtype=dtype)
vals += 1j * imag
return vals.reshape(shape)
class BatchMatmulOpTest(test.TestCase):
# Uses numpy to compute batch_matmul(x, y, adjoint_a, adjoint_b).
def _npBatchMatmul(self, x, y, adjoint_a, adjoint_b):
# output's shape depends on adj[0] and adj[1]
if adjoint_a:
x = np.conjugate(np.swapaxes(x, -1, -2))
if adjoint_b:
y = np.conjugate(np.swapaxes(y, -1, -2))
return np.matmul(x, y)
# Compares TensorFlow BatchMatmul with NumPy's matmul.
def _compare(self, x_in, y_in, adjoint_a, adjoint_b, static_shape):
x_t_shape = x_in.shape[:-2] + (x_in.shape[-1], x_in.shape[-2])
y_t_shape = y_in.shape[:-2] + (y_in.shape[-1], y_in.shape[-2])
x = x_in if not adjoint_a else x_in.reshape(x_t_shape)
y = y_in if not adjoint_b else y_in.reshape(y_t_shape)
is_floating = x.dtype != np.int32
tol = 100 * np.finfo(x.dtype).eps if is_floating else 0
with self.cached_session(use_gpu=is_floating) as sess:
if static_shape:
z0 = math_ops.matmul(x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
z0_val = self.evaluate(z0)
else:
x_ph = array_ops.placeholder(x.dtype)
y_ph = array_ops.placeholder(y.dtype)
z0 = math_ops.matmul(
x_ph, y_ph, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
z0_val = sess.run(z0, feed_dict={x_ph: x, y_ph: y})
z1 = self._npBatchMatmul(x, y, adjoint_a, adjoint_b)
self.assertAllClose(z0_val, z1, rtol=tol, atol=tol)
def _testNonEmpty(self, dtype, adjoint_a, adjoint_b, use_static_shape):
def CompareNonEmpty(self, a_shape, b_shape):
self._compare(
GetRandomNormalInput(a_shape, dtype),
GetRandomNormalInput(b_shape, dtype),
adjoint_a,
adjoint_b,
static_shape=use_static_shape)
CompareNonEmpty(self, [1, 2, 3], [1, 3, 5])
CompareNonEmpty(self, [1, 2, 3], [1, 3, 1])
CompareNonEmpty(self, [1, 1, 3], [1, 3, 5])
CompareNonEmpty(self, [1, 2, 3], [1, 3, 5])
CompareNonEmpty(self, [7, 1, 3], [7, 3, 5])
CompareNonEmpty(self, [7, 2, 3], [7, 3, 1])
CompareNonEmpty(self, [7, 2, 3], [7, 3, 5])
CompareNonEmpty(self, [10, 64, 75], [10, 75, 30])
CompareNonEmpty(self, [5, 7, 2, 3], [5, 7, 3, 5])
def _testBroadcasting(self, dtype, adjoint_a, adjoint_b, use_static_shape):
def CompareNonEmpty(self, a_shape, b_shape):
self._compare(
GetRandomNormalInput(a_shape, dtype),
GetRandomNormalInput(b_shape, dtype),
adjoint_a,
adjoint_b,
static_shape=use_static_shape)
CompareNonEmpty(self, [2, 3], [1, 3, 5])
CompareNonEmpty(self, [1, 2, 3], [3, 5])
CompareNonEmpty(self, [5, 1, 2, 3], [1, 7, 3, 5])
CompareNonEmpty(self, [5, 2, 2, 3], [3, 5])
CompareNonEmpty(self, [2, 3], [5, 2, 3, 5])
CompareNonEmpty(self, [4, 5, 1, 2, 3], [1, 1, 3, 5])
CompareNonEmpty(self, [1, 2, 1, 4, 2, 1, 3, 4], [3, 2, 1, 1, 1, 2, 4, 2])
def _testEmpty(self, dtype, adjoint_a, adjoint_b, use_static_shape):
def CompareEmpty(self, a_shape, b_shape):
self._compare(
np.zeros(a_shape).astype(dtype),
np.zeros(b_shape).astype(dtype),
adjoint_a,
adjoint_b,
static_shape=use_static_shape)
CompareEmpty(self, [0, 3, 2], [0, 2, 4])
CompareEmpty(self, [3, 0, 2], [3, 2, 5])
CompareEmpty(self, [3, 3, 2], [3, 2, 0])
def _GetBatchMatmulOpTest(dtype, adjoint_a, adjoint_b, use_static_shape):
def Test(self):
np.random.seed(42)
self._testNonEmpty(dtype, adjoint_a, adjoint_b, use_static_shape)
self._testEmpty(dtype, adjoint_a, adjoint_b, use_static_shape)
return Test
def _GetBatchMatmulOpBroadcastingTest(dtype, adjoint_a, adjoint_b,
use_static_shape):
def Test(self):
with compat.forward_compatibility_horizon(2019, 4, 26):
np.random.seed(42)
self._testBroadcasting(dtype, adjoint_a, adjoint_b, use_static_shape)
return Test
class BatchMatmulGradientTest(test.TestCase):
# loss = sum(batch_matmul(x, y)). Verify dl/dx and dl/dy via the
# gradient checker.
def _checkGrad(self, x_in, y_in, adjoint_a, adjoint_b):
x_t_shape = x_in.shape[:-2] + (x_in.shape[-1], x_in.shape[-2])
y_t_shape = y_in.shape[:-2] + (y_in.shape[-1], y_in.shape[-2])
x = x_in if not adjoint_a else x_in.reshape(x_t_shape)
y = y_in if not adjoint_b else y_in.reshape(y_t_shape)
epsilon = np.finfo(x.dtype).eps
# Since our gradient is linear, a larger delta decreases the error.
delta = 10 * epsilon**(1.0 / 3.0)
def Loss(x, y):
return math_ops.reduce_sum(math_ops.matmul(x, y, adjoint_a, adjoint_b))
with self.cached_session(use_gpu=True):
((x_jacob_t, y_jacob_t),
(x_jacob_n, y_jacob_n)) = gradient_checker_v2.compute_gradient(
Loss, [x, y], delta=delta)
tol = 10 * delta
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=tol, atol=tol)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=tol, atol=tol)
# Tests gradients of a batched matmul of x, and y
def _compare(self, a_shape, b_shape, dtype, adjoint_a, adjoint_b):
np.random.seed(42)
x = GetRandomNormalInput(a_shape, dtype)
y = GetRandomNormalInput(b_shape, dtype)
self._checkGrad(x, y, adjoint_a, adjoint_b)
def _GetBatchMatmulGradientTest(dtype, adjoint_a, adjoint_b):
def Test(self):
def CheckGradients(self, a_shape, b_shape):
self._compare(a_shape, b_shape, dtype, adjoint_a, adjoint_b)
CheckGradients(self, [1, 2, 3], [1, 3, 5])
CheckGradients(self, [3, 4, 7], [3, 7, 10])
return Test
def _GetBatchMatmulGradientWithBroadcastingTest(dtype, adjoint_a, adjoint_b):
def Test(self):
def CheckGradients(self, a_shape, b_shape):
self._compare(a_shape, b_shape, dtype, adjoint_a, adjoint_b)
with compat.forward_compatibility_horizon(2019, 4, 26):
CheckGradients(self, [1, 5, 2, 3], [7, 1, 3, 2])
CheckGradients(self, [2, 3], [1, 3, 5])
CheckGradients(self, [2, 3], [5, 3, 5])
CheckGradients(self, [5, 2, 5], [5, 3])
CheckGradients(self, [5, 2, 2, 3], [3, 5])
CheckGradients(self, [4, 5, 1, 2, 3], [1, 1, 3, 5])
CheckGradients(self, [1, 2, 1, 4, 2, 1, 3, 4], [3, 2, 1, 1, 1, 2, 4, 2])
return Test
class BatchMatMulBenchmark(test.Benchmark):
# Batch sizes are 512.
shape_pairs = [
# Typical fully connected layer.
((4, 8, 4, 2, 1, 1024), (1024, 1024)),
((4, 1, 4, 1, 1, 1024), (1, 8, 1, 2, 1024, 1024)),
# Square matmul.
((4, 8, 4, 2, 512, 512), (512, 512)),
((4, 1, 4, 1, 512, 512), (1, 8, 1, 2, 512, 512)),
# Matrix-vector multiplies.
((4, 8, 4, 2, 10000, 200), (200, 1)),
((4, 1, 4, 1, 10000, 200), (1, 8, 1, 2, 200, 1)),
# Vector-matrix multiplies.
((4, 8, 4, 2, 1, 200), (200, 10000)),
((4, 1, 4, 1, 1, 200), (1, 8, 1, 2, 200, 10000)),
]
def benchmarkBatchMatMulBroadcast(self):
for (a_shape, b_shape) in self.shape_pairs:
with compat.forward_compatibility_horizon(2019, 4, 26):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix_a = variables.Variable(
GetRandomNormalInput(a_shape, np.float32))
matrix_b = variables.Variable(
GetRandomNormalInput(b_shape, np.float32))
variables.global_variables_initializer().run()
# Use batch matmul op's internal broadcasting.
self.run_op_benchmark(
sess,
math_ops.matmul(matrix_a, matrix_b),
min_iters=50,
name="batch_matmul_cpu_{}_{}".format(a_shape, b_shape))
# Manually broadcast the input matrices using the broadcast_to op.
broadcasted_batch_shape = array_ops.broadcast_static_shape(
matrix_a.shape[:-2], matrix_b.shape[:-2])
broadcasted_a_shape = broadcasted_batch_shape.concatenate(
matrix_a.shape[-2:])
broadcasted_b_shape = broadcasted_batch_shape.concatenate(
matrix_b.shape[-2:])
self.run_op_benchmark(
sess,
math_ops.matmul(
array_ops.broadcast_to(matrix_a, broadcasted_a_shape),
array_ops.broadcast_to(matrix_b, broadcasted_b_shape)),
min_iters=50,
name="batch_matmul_manual_broadcast_cpu_{}_{}".format(
a_shape, b_shape))
if __name__ == "__main__":
for dtype_ in [
np.float16, np.float32, np.float64, np.complex64, np.complex128, np.int32
]:
for adjoint_a_ in False, True:
for adjoint_b_ in False, True:
name = "%s_%s_%s" % (dtype_.__name__, adjoint_a_, adjoint_b_)
# TF2 does not support placeholders under eager so we skip it.
for use_static_shape_ in set([True, tf2.enabled()]):
setattr(
BatchMatmulOpTest,
"testBatchMatmulOp_" + name + "_{}".format(use_static_shape_),
_GetBatchMatmulOpTest(dtype_, adjoint_a_, adjoint_b_,
use_static_shape_))
# Broadcasting is supported only in v2.
setattr(
BatchMatmulOpTest, "testBatchMatmulBroadcasting_" + name +
("_%s" % use_static_shape_),
_GetBatchMatmulOpBroadcastingTest(dtype_, adjoint_a_, adjoint_b_,
use_static_shape_))
if dtype_ == np.int32:
continue
setattr(BatchMatmulGradientTest, "testBatchMatmulGradient_" + name,
_GetBatchMatmulGradientTest(dtype_, adjoint_a_, adjoint_b_))
# Broadcasting is supported only in v2.
setattr(
BatchMatmulGradientTest,
"testBatchMatmulGradientWithBroadcasting_" + name,
_GetBatchMatmulGradientWithBroadcastingTest(dtype_, adjoint_a_,
adjoint_b_))
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/batch_matmul_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for segment reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class SegmentReductionHelper(test.TestCase):
def _input(self, input_shape, dtype=dtypes_lib.int32):
num_elem = 1
for x in input_shape:
num_elem *= x
values = np.arange(1, num_elem + 1)
np_values = values.reshape(input_shape).astype(dtype.as_numpy_dtype)
# Add a non-zero imaginary component to complex types.
if dtype.is_complex:
np_values -= 1j * np_values
return constant_op.constant(
np_values, shape=input_shape, dtype=dtype), np_values
def _segmentReduce(self, indices, x, op1, op2=None, num_segments=None,
initial_value=0):
if not x.size:
return np.array([])
indices = np.asarray(indices)
if num_segments is None:
num_segments = indices[-1] + 1
output = [None] * num_segments
slice_shape = x.shape[indices.ndim:]
x_flat = x.reshape((indices.size,) + slice_shape)
for i, index in enumerate(indices.ravel()):
if (output[index] is not None) and op1 == np.max:
for j in range(0, output[index].shape[0]):
output[index][j] = op1([output[index][j], x_flat[i][j]])
elif output[index] is not None:
output[index] = op1(output[index], x_flat[i])
else:
output[index] = x_flat[i]
# zero initialize values that are still uncalcuated.
initial_value_slice = np.ones(slice_shape) * initial_value
output = [o if o is not None else initial_value_slice for o in output]
if op2 is not None:
output = [op2(o) for o in output]
output = [o.reshape(slice_shape) for o in output]
return np.array(output)
def _mean_cum_op(self, x, y):
return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2)
def _mean_reduce_op(self, x):
return x[0] / x[1] if isinstance(x, tuple) else x
def _sqrt_n_reduce_op(self, x):
return x[0] / np.sqrt(x[1]) if isinstance(x, tuple) else x
class SegmentReductionOpTest(SegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128
]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, math_ops.segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.segment_mean),
(np.ndarray.__mul__, None, math_ops.segment_prod),
(np.minimum, None, math_ops.segment_min),
(np.maximum, None, math_ops.segment_max)]
# A subset of ops has been enabled for complex numbers
complex_ops_list = [(np.add, None, math_ops.segment_sum),
(np.ndarray.__mul__, None, math_ops.segment_prod),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.segment_mean)]
n = 10
shape = [n, 2]
indices = [i // 3 for i in range(n)]
for dtype in dtypes:
if dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
curr_ops_list = complex_ops_list
else:
curr_ops_list = ops_list
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtype)
for np_op1, np_op2, tf_op in curr_ops_list:
np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2)
s = tf_op(data=tf_x, segment_ids=indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
@test_util.run_deprecated_v1
def testSegmentIdsShape(self):
shape = [4, 4]
tf_x, _ = self._input(shape)
indices = constant_op.constant([0, 1, 2, 2], shape=[2, 2])
with self.assertRaises(ValueError):
math_ops.segment_sum(data=tf_x, segment_ids=indices)
@test_util.run_deprecated_v1
def testSegmentIdsSize(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape)
indices = [0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment_ids should be the same size"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsValid(self):
# This is a baseline for the following SegmentIdsInvalid* tests.
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, 1]
result = math_ops.segment_sum(data=tf_x, segment_ids=indices).eval()
self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result)
def testSegmentIdsGreaterThanZero(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)
indices = [1, 1, 2, 2]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
def testSegmentIdsHole(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 3, 3]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid1(self):
shape = [4, 4]
with self.cached_session():
tf_x, _ = self._input(shape)
indices = [-1, -1, 0, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id -1 out of range \[0, 1\), possibly because "
"'segment_ids' input is not sorted."):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid2(self):
shape = [4, 4]
with self.cached_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids are not increasing"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid3(self):
shape = [4, 4]
with self.cached_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 2, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), possibly "
"because 'segment_ids' input is not sorted."):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid4(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, -1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid5(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, -2]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradient(self):
shape = [4, 4]
indices = [0, 1, 2, 2]
for tf_op in [
math_ops.segment_sum, math_ops.segment_mean, math_ops.segment_min,
math_ops.segment_max
]:
with self.cached_session():
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, segment_ids=indices)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
class UnsortedSegmentTest(SegmentReductionHelper):
def __init__(self, methodName='runTest'):
# Each item is np_op1, np_op2, tf_op, initial_value functor
self.ops_list = [(np.add, None,
math_ops.unsorted_segment_sum, lambda t: 0),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.unsorted_segment_mean, lambda t: 0),
(self._mean_cum_op, self._sqrt_n_reduce_op,
math_ops.unsorted_segment_sqrt_n, lambda t: 0),
(np.ndarray.__mul__, None,
math_ops.unsorted_segment_prod, lambda t: 1),
(np.minimum, None,
math_ops.unsorted_segment_min, lambda t: t.max),
(np.maximum, None,
math_ops.unsorted_segment_max, lambda t: t.min)]
# A subset of ops has been enabled for complex numbers
self.complex_ops_list = [(np.add, None,
math_ops.unsorted_segment_sum, lambda t: 0),
(np.ndarray.__mul__, None,
math_ops.unsorted_segment_prod, lambda t: 1)]
self.differentiable_dtypes = [dtypes_lib.float16, dtypes_lib.float32,
dtypes_lib.float64]
self.all_dtypes = (self.differentiable_dtypes +
[dtypes_lib.bfloat16,
dtypes_lib.int64, dtypes_lib.int32,
dtypes_lib.complex64, dtypes_lib.complex128])
super(UnsortedSegmentTest, self).__init__(methodName=methodName)
def testValues(self):
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in self.all_dtypes:
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
tf_x, np_x = self._input(shape, dtype=dtype)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=True):
for np_op1, np_op2, tf_op, init_op in ops_list:
# sqrt_n doesn't support integers
if (np_op2 == self._sqrt_n_reduce_op and dtype.is_integer):
continue
# todo(philjd): enable this test once real_div supports bfloat16
if (np_op2 in [self._sqrt_n_reduce_op, self._mean_reduce_op] and
dtype == dtypes_lib.bfloat16):
continue
np_ans = self._segmentReduce(
indices, np_x, np_op1, np_op2, num_segments=num_segments,
initial_value=init_op(dtype))
s = tf_op(tf_x, segment_ids=indices, num_segments=num_segments)
tf_ans = self.evaluate(s)
if dtype is dtypes_lib.bfloat16:
tf_ans = tf_ans.astype(np.float32)
self.assertAllCloseAccordingToType(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
def testNumSegmentsTypes(self):
dtypes = [dtypes_lib.int32, dtypes_lib.int64]
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in dtypes:
with self.cached_session(use_gpu=True):
tf_x, np_x = self._input(shape)
num_segments_constant = constant_op.constant(
num_segments, dtype=dtype)
np_ans = self._segmentReduce(
indices, np_x, np.add, op2=None, num_segments=num_segments)
s = math_ops.unsorted_segment_sum(
data=tf_x,
segment_ids=indices,
num_segments=num_segments_constant)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
@test_util.run_deprecated_v1
def testGradients(self):
num_cols = 2
indices_flat = np.array([0, 4, 0, -1, 3, -1, 4, 7, 7, 3])
num_segments = max(indices_flat) + 3
for dtype in self.differentiable_dtypes:
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
# test CPU and GPU as tf.gather behaves differently on each device
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
for _, _, tf_op, _ in ops_list:
tf_x, np_x = self._input(shape, dtype=dtype)
s = tf_op(tf_x, indices, num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [num_segments, num_cols],
x_init_value=np_x,
delta=1)
self.assertAllClose(jacob_t, jacob_n)
@test_util.run_deprecated_v1
def testProdGrad(self):
# additional test for the prod gradient to ensure correct handling of zeros
values = np.array([0, 0, 1, 0, 2, 2, 3, 3, 3], dtype=np.float32)
indices = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32)
indices_neg = np.array([-1, 0, 0, -1, 1, 1, -1, 2, 2], dtype=np.int32)
values_tf = constant_op.constant(values)
# ground truth partial derivatives
gradients_indices = np.zeros((9, 3), dtype=np.float32)
gradients_indices_neg = np.zeros((9, 3), dtype=np.float32)
# the derivative w.r.t. to the other segments is zero, so here we only
# explicitly set the grad values for the corresponding segment
gradients_indices[range(9), indices] = [0, 0, 0, 4, 0, 0, 9, 9, 9]
gradients_indices_neg[range(9), indices_neg] = [0, 1, 0, 0, 2, 2, 0, 3, 3]
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
for ind, grad_gt in [(indices, gradients_indices),
(indices_neg, gradients_indices_neg)]:
s = math_ops.unsorted_segment_prod(values_tf,
constant_op.constant(ind), 3)
jacob_t, jacob_n = gradient_checker.compute_gradient(
values_tf, (9,), s, (3,), x_init_value=values, delta=1)
self.assertAllClose(jacob_t, jacob_n)
self.assertAllClose(jacob_t, grad_gt)
@test_util.run_deprecated_v1
def testGradientMatchesSegmentSum(self):
# Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
# and compare the outputs, which should be identical.
# NB: for this test to work, indices must be valid for SegmentSum, namely
# it must be sorted, the indices must be contiguous, and num_segments
# must be max(indices) + 1.
indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
n = len(indices)
num_cols = 2
shape = [n, num_cols]
num_segments = max(indices) + 1
for dtype in self.differentiable_dtypes:
with self.cached_session(use_gpu=True):
tf_x, np_x = self._input(shape, dtype=dtype)
# Results from UnsortedSegmentSum
unsorted_s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
unsorted_jacob_t, unsorted_jacob_n = (
gradient_checker.compute_gradient(tf_x, shape, unsorted_s,
[num_segments, num_cols],
x_init_value=np_x, delta=1))
# Results from SegmentSum
sorted_s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
sorted_jacob_t, sorted_jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
sorted_s, [num_segments, num_cols],
x_init_value=np_x,
delta=1)
self.assertAllClose(unsorted_jacob_t, sorted_jacob_t)
self.assertAllClose(unsorted_jacob_n, sorted_jacob_n)
@test_util.run_deprecated_v1
def testBadIndices(self):
# Note: GPU kernel does not return the out-of-range error needed for this
# test, so this test is marked as cpu-only.
# Note: With PR #13055 a negative index will be ignored silently.
with self.session(use_gpu=False):
for bad in [[2]], [[7]]:
unsorted = math_ops.unsorted_segment_sum([[17]], bad, num_segments=2)
with self.assertRaisesOpError(
r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
self.evaluate(unsorted)
@test_util.run_deprecated_v1
def testEmptySecondDimension(self):
dtypes = [np.float16, np.float32, np.float64, np.int64, np.int32,
np.complex64, np.complex128]
with self.session(use_gpu=True):
for dtype in dtypes:
for itype in (np.int32, np.int64):
data = np.zeros((2, 0), dtype=dtype)
segment_ids = np.array([0, 1], dtype=itype)
unsorted = math_ops.unsorted_segment_sum(data, segment_ids, 2)
self.assertAllEqual(unsorted.eval(), np.zeros((2, 0), dtype=dtype))
def testDropNegatives(self):
# Note: the test is done by replacing segment_ids with 8 to -1
# for index and replace values generated by numpy with 0.
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in self.all_dtypes:
with self.session(use_gpu=True):
tf_x, np_x = self._input(shape, dtype=dtype)
np_ans = self._segmentReduce(
indices, np_x, np.add, op2=None, num_segments=num_segments)
# Replace np_ans[8] with 0 for the value
np_ans[8:] = 0
# Replace 8 with -1 in indices
np.place(indices, indices == 8, [-1])
s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
class SparseSegmentReductionHelper(SegmentReductionHelper):
def _sparse_input(self, input_shape, num_indices, dtype=dtypes_lib.int32):
a, b = super(SparseSegmentReductionHelper, self)._input(input_shape, dtype)
indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32)
return (constant_op.constant(
indices, dtype=dtypes_lib.int32), indices, a, b)
def _sparseSegmentReduce(self,
x,
indices,
segment_indices,
op1,
op2=None,
num_segments=None):
return self._segmentReduce(
segment_indices, x[indices], op1, op2, num_segments=num_segments)
class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32
]
mean_dtypes = [dtypes_lib.float32, dtypes_lib.float64]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, math_ops.sparse_segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.sparse_segment_mean)]
n = 400
shape = [n, 2]
segment_indices = []
for i in range(20):
for _ in range(i + 1):
segment_indices.append(i)
num_indices = len(segment_indices)
for dtype in dtypes:
with self.cached_session(use_gpu=False):
tf_indices, np_indices, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtype)
for np_op1, np_op2, tf_op in ops_list:
if tf_op == math_ops.sparse_segment_mean and dtype not in mean_dtypes:
continue
np_ans = self._sparseSegmentReduce(np_x, np_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
def testSegmentIdsHole(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum), (
self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]
segment_indices = [0, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
def testWithNumSegments(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum_with_num_segments),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.sparse_segment_mean_with_num_segments)]
segment_indices = [0, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
num_segments = 5
with self.session(use_gpu=False):
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(
np_x,
tf_indices,
segment_indices,
np_op1,
np_op2,
num_segments=num_segments)
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
def testWithEmptySegments(self):
tf_x = constant_op.constant([], shape=[0, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments
]
segment_indices = []
tf_indices = []
num_segments = 5
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
tf_ans = self.evaluate(s)
self.assertAllClose(np.zeros([5, 4]), tf_ans)
def testSegmentIdsGreaterThanZero(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum), (
self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]
segment_indices = [1, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
def testValid(self):
# Baseline for the test*Invalid* methods below.
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
self.evaluate(s)
@test_util.run_deprecated_v1
def testIndicesInvalid1(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, -1, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[1\] == -1 out of range \[0, 10\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testIndicesInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[3\] == 10 out of range \[0, 10\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 0, 1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids are not increasing"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid3(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), possibly because "
"'segment_ids' input is not sorted"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid4(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"Segment id -1 out of range \[0, 2\), possibly because "
"'segment_ids' input is not sorted"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid6(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid7(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -2]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
def testSegmentWithNumSegmentsValid(self):
# Baseline for the test*WithNumSegmentsInvalid* methods below.
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]
num_segments = 5
segment_indices = [0, 1, 3, 3]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentWithNumSegmentsInvalid1(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]
num_segments = 5
segment_indices = [0, 1, 3, 5]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
with self.assertRaisesOpError("segment ids must be < num_segments"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentWithNumSegmentsInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]
num_segments = -2
segment_indices = [0, 1, 3, 3]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
with self.assertRaisesRegexp(
ValueError, "Cannot specify a negative value for num_segments"):
tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
@test_util.run_deprecated_v1
def testGradient(self):
shape = [10, 4]
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
for tf_op in [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]:
with self.cached_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
@test_util.run_deprecated_v1
def testGradientWithEmptySegmentsAtEnd(self):
shape = [10, 4]
num_segments = 5
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
for tf_op in [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]:
with self.cached_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtypes_lib.float64)
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [5, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
def testGradientValid(self):
# Baseline for the testGradient*Invalid* methods below.
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientIndicesInvalid1(self):
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index 10 out of range \[0, 10\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientIndicesInvalid2(self):
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, -1, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index -1 out of range \[0, 10\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientSegmentsInvalid1(self):
tf_x, _ = self._input(
[3, 4], dtype=dtypes_lib.float32) # expecting 3 segments
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 1, 4] # 5 segments
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError("Invalid number of segments"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientSegmentsInvalid2(self):
tf_x, _ = self._input([1, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 1 out of range \[0, 1\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientSegmentsInvalid3(self):
tf_x, _ = self._input([2, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id -1 out of range \[0, 2\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientSegmentsInvalid4(self):
tf_x, _ = self._input([0, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, -1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 0 out of range \[0, 0\)"):
self.evaluate(s)
class SegmentReductionOpBenchmark(test.Benchmark):
outer_dim_options = [2**x for x in range(9, 14, 2)]
ratio_options = [2**x for x in range(1, 6, 2)]
inner_dim_options = [2**x for x in range(9, 14, 2)]
# randomly generated sizes with less alignments
inner_dim_options += [
1120, 1215, 1856, 1302, 1329, 1531, 1313, 1672, 1851, 1584
]
dtype_options = [np.float32, np.float64]
options = (outer_dim_options, ratio_options, inner_dim_options, dtype_options)
# pylint: disable=g-long-lambda
op_functors = [lambda vc, vs, seg_ids:
("sorted", math_ops.segment_sum(vc, vs)),
lambda vc, vs, seg_ids:
("unsorted",
math_ops.unsorted_segment_sum(vc, vs, seg_ids[-1]+1))]
# pylint: enable=g-long-lambda
repeat = 10
def _npTypeToStr(self, t):
if t == np.float32:
return "fp32"
if t == np.float64:
return "fp64"
def _runGraph(self, op_functor, outer_dim, ratio, inner_dim, dtype):
output_outer_dim = int(outer_dim / ratio)
const = np.random.randint(5, size=(outer_dim, inner_dim))
seg_ids = np.sort(np.random.randint(output_outer_dim, size=outer_dim))
vs = variables.Variable(seg_ids.astype(np.int32))
with ops.device("/gpu:0"):
vc = variables.Variable(const.astype(dtype))
name, op = op_functor(vc, vs, seg_ids)
with session.Session() as sess:
variables.global_variables_initializer().run()
r = self.run_op_benchmark(
sess,
op,
min_iters=self.repeat,
name="_".join(
map(str,
[name, outer_dim, ratio, inner_dim,
self._npTypeToStr(dtype)])))
return name, r["wall_time"]
def benchmarkSegmentSumGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):
op_functor = self.op_functors[0]
with ops.Graph().as_default():
self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)
def benchmarkUnsortedSegmentSumGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):
op_functor = self.op_functors[1]
with ops.Graph().as_default():
self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/segment_reduction_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for broadcast rules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import test
def _test_values(shape):
return np.reshape(np.cumsum(np.ones(shape), dtype=np.int32), newshape=shape)
class AssertBroadcastableTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def _test_valid(self, weights, values):
static_op = weights_broadcast_ops.assert_broadcastable(
weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.assert_broadcastable(
weights=weights_placeholder, values=values_placeholder)
with self.cached_session():
static_op.run()
dynamic_op.run(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
@test_util.run_deprecated_v1
def testScalar(self):
self._test_valid(weights=5, values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def test1x1x1(self):
self._test_valid(
weights=np.asarray((5,)).reshape((1, 1, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def test1x1xN(self):
self._test_valid(
weights=np.asarray((5, 7, 11, 3)).reshape((1, 1, 4)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def test1xNx1(self):
self._test_valid(
weights=np.asarray((5, 11)).reshape((1, 2, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def test1xNxN(self):
self._test_valid(
weights=np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testNx1x1(self):
self._test_valid(
weights=np.asarray((5, 7, 11)).reshape((3, 1, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testNx1xN(self):
self._test_valid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testNxNxN(self):
self._test_valid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4)),
values=_test_values((3, 2, 4)))
def _test_invalid(self, weights, values):
error_msg = 'weights can not be broadcast to values'
with self.assertRaisesRegexp(ValueError, error_msg):
weights_broadcast_ops.assert_broadcastable(weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.assert_broadcastable(
weights=weights_placeholder, values=values_placeholder)
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.OpError, error_msg):
dynamic_op.run(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
@test_util.run_deprecated_v1
def testInvalid1(self):
self._test_invalid(weights=np.asarray((5,)), values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalid1x1(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidPrefixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12)).reshape((3, 2)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidSuffixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12, 7, 5)).reshape((2, 4)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidOnesExtraDim(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1, 1, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidPrefixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidSuffixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((1, 3, 2, 4)),
values=_test_values((3, 2, 4)))
class BroadcastWeightsTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def _test_valid(self, weights, values, expected):
static_op = weights_broadcast_ops.broadcast_weights(
weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.broadcast_weights(
weights=weights_placeholder, values=values_placeholder)
with self.cached_session():
self.assertAllEqual(expected, self.evaluate(static_op))
self.assertAllEqual(expected, dynamic_op.eval(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
}))
@test_util.run_deprecated_v1
def testScalar(self):
self._test_valid(
weights=5,
values=_test_values((3, 2, 4)),
expected=5 * np.ones((3, 2, 4)))
@test_util.run_deprecated_v1
def test1x1x1(self):
self._test_valid(
weights=np.asarray((5,)).reshape((1, 1, 1)),
values=_test_values((3, 2, 4)),
expected=5 * np.ones((3, 2, 4)))
@test_util.run_deprecated_v1
def test1x1xN(self):
weights = np.asarray((5, 7, 11, 3)).reshape((1, 1, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 2, 1)))
@test_util.run_deprecated_v1
def test1xNx1(self):
weights = np.asarray((5, 11)).reshape((1, 2, 1))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 1, 4)))
@test_util.run_deprecated_v1
def test1xNxN(self):
weights = np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 1, 1)))
@test_util.run_deprecated_v1
def testNx1x1(self):
weights = np.asarray((5, 7, 11)).reshape((3, 1, 1))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(1, 2, 4)))
@test_util.run_deprecated_v1
def testNx1xN(self):
weights = np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(1, 2, 1)))
@test_util.run_deprecated_v1
def testNxNxN(self):
weights = np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4))
self._test_valid(
weights=weights, values=_test_values((3, 2, 4)), expected=weights)
def _test_invalid(self, weights, values):
error_msg = 'weights can not be broadcast to values'
with self.assertRaisesRegexp(ValueError, error_msg):
weights_broadcast_ops.broadcast_weights(weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.broadcast_weights(
weights=weights_placeholder, values=values_placeholder)
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.OpError, error_msg):
dynamic_op.eval(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
@test_util.run_deprecated_v1
def testInvalid1(self):
self._test_invalid(weights=np.asarray((5,)), values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalid1x1(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidPrefixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12)).reshape((3, 2)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidSuffixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12, 7, 5)).reshape((2, 4)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidOnesExtraDim(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1, 1, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidPrefixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidSuffixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((1, 3, 2, 4)),
values=_test_values((3, 2, 4)))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/weights_broadcast_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for BatchToSpace op.
Additional tests are included in spacetobatch_op_test.py, where the BatchToSpace
op is tested in tandem with its reverse SpaceToBatch op.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class PythonOpImpl(object):
@staticmethod
def batch_to_space(*args, **kwargs):
return array_ops.batch_to_space(*args, **kwargs)
class CppOpImpl(object):
@staticmethod
def batch_to_space(*args, **kwargs):
return gen_array_ops.batch_to_space(*args, **kwargs)
class BatchToSpaceDepthToSpace(test.TestCase, PythonOpImpl):
# Verifies that: batch_to_space(x) = transpose(depth_to_space(transpose(x)))
@test_util.run_deprecated_v1
def testDepthToSpaceTranspose(self):
x = np.arange(20 * 5 * 8 * 7, dtype=np.float32).reshape([20, 5, 8, 7])
block_size = 2
for crops_dtype in [dtypes.int64, dtypes.int32]:
crops = array_ops.zeros((2, 2), dtype=crops_dtype)
y1 = self.batch_to_space(x, crops, block_size=block_size)
y2 = array_ops.transpose(
array_ops.depth_to_space(
array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
[3, 1, 2, 0])
with self.cached_session():
self.assertAllEqual(y1.eval(), y2.eval())
class BatchToSpaceDepthToSpaceCpp(BatchToSpaceDepthToSpace, CppOpImpl):
pass
class BatchToSpaceErrorHandlingTest(test.TestCase, PythonOpImpl):
@test_util.run_deprecated_v1
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 2
with self.assertRaises(ValueError):
_ = self.batch_to_space(x_np, crops, block_size)
@test_util.run_deprecated_v1
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 0
with self.assertRaises(ValueError):
out_tf = self.batch_to_space(x_np, crops, block_size)
out_tf.eval()
@test_util.run_deprecated_v1
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 1
with self.assertRaises(ValueError):
out_tf = self.batch_to_space(x_np, crops, block_size)
out_tf.eval()
@test_util.run_deprecated_v1
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 10
with self.assertRaises(ValueError):
out_tf = self.batch_to_space(x_np, crops, block_size)
out_tf.eval()
@test_util.run_deprecated_v1
def testBlockSizeSquaredNotDivisibleBatch(self):
# The block size squared does not divide the batch.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = self.batch_to_space(x_np, crops, block_size)
@test_util.run_deprecated_v1
def testUnknownShape(self):
t = self.batch_to_space(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
block_size=4)
self.assertEqual(4, t.get_shape().ndims)
class BatchToSpaceErrorHandlingCppTest(BatchToSpaceErrorHandlingTest,
CppOpImpl):
pass
class BatchToSpaceNDErrorHandlingTest(test.TestCase):
def _testStaticShape(self, input_shape, block_shape, paddings, error):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes known at graph construction time.
with self.assertRaises(error):
_ = array_ops.batch_to_space_nd(
np.zeros(input_shape, np.float32), block_shape, paddings)
def _testDynamicShape(self, input_shape, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes unknown at graph construction time.
input_placeholder = array_ops.placeholder(dtypes.float32)
block_shape_placeholder = array_ops.placeholder(
dtypes.int32, shape=block_shape.shape)
paddings_placeholder = array_ops.placeholder(dtypes.int32)
t = array_ops.batch_to_space_nd(input_placeholder, block_shape_placeholder,
paddings_placeholder)
with self.assertRaises(ValueError):
_ = t.eval({
input_placeholder: np.zeros(input_shape, np.float32),
block_shape_placeholder: block_shape,
paddings_placeholder: paddings
})
def _testShape(self, input_shape, block_shape, paddings, error):
self._testStaticShape(input_shape, block_shape, paddings, error)
self._testDynamicShape(input_shape, block_shape, paddings)
@test_util.run_deprecated_v1
def testInputWrongDimMissingBatch(self):
self._testShape([2, 2], [2, 2], [[0, 0], [0, 0]], ValueError)
self._testShape([2, 2, 3], [2, 2, 3], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockSize0(self):
# The block size is 0.
self._testShape([1, 2, 2, 1], [0, 1], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockSizeNegative(self):
self._testShape([1, 2, 2, 1], [-1, 1], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testNegativePadding(self):
self._testShape([1, 2, 2], [1, 1], [[0, -1], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testCropTooLarge(self):
# The amount to crop exceeds the padded size.
self._testShape([1 * 2 * 2, 2, 3, 1], [2, 2], [[3, 2], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockSizeSquaredNotDivisibleBatch(self):
# The batch dimension is not divisible by the product of the block_shape.
self._testShape([3, 1, 1, 1], [2, 3], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testUnknownShape(self):
# Verify that input shape and paddings shape can be unknown.
_ = array_ops.batch_to_space_nd(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
# Only number of input dimensions is known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, None)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual(4, t.get_shape().ndims)
# Dimensions are partially known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, 2)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(3 * 2 * 3, None, None, 2)), [2, 3],
array_ops.placeholder(dtypes.int32))
self.assertEqual([3, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(3 * 2 * 3, None, 2, 2)), [2, 3],
[[1, 1], [0, 1]])
self.assertEqual([3, None, 5, 2], t.get_shape().as_list())
# Dimensions are fully known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(3 * 2 * 3, 2, 1, 2)), [2, 3],
[[1, 1], [0, 0]])
self.assertEqual([3, 2, 3, 2], t.get_shape().as_list())
class BatchToSpaceGradientTest(test.TestCase, PythonOpImpl):
# Check the gradients.
def _checkGrad(self, x, crops, block_size):
assert 4 == x.ndim
with self.cached_session():
tf_x = ops.convert_to_tensor(x)
tf_y = self.batch_to_space(tf_x, crops, block_size)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for batch_to_space of x which is a four dimensional
# tensor of shape [b * block_size * block_size, h, w, d].
def _compare(self, b, h, w, d, block_size, crop_beg, crop_end):
block_size_sq = block_size * block_size
x = np.random.normal(0, 1, b * h * w * d *
block_size_sq).astype(np.float32).reshape(
[b * block_size * block_size, h, w, d])
crops = np.array(
[[crop_beg, crop_end], [crop_beg, crop_end]], dtype=np.int32)
self._checkGrad(x, crops, block_size)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
block_size = 2
crop_beg = 0
crop_end = 0
self._compare(1, 2, 3, 5, block_size, crop_beg, crop_end)
@test_util.run_deprecated_v1
def testSmall2(self):
block_size = 2
crop_beg = 0
crop_end = 0
self._compare(2, 4, 3, 2, block_size, crop_beg, crop_end)
@test_util.run_deprecated_v1
def testSmallCrop1x1(self):
block_size = 2
crop_beg = 1
crop_end = 1
self._compare(1, 2, 3, 5, block_size, crop_beg, crop_end)
class BatchToSpaceGradientCppTest(BatchToSpaceGradientTest, CppOpImpl):
pass
class BatchToSpaceNDGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_shape, crops, crops_dtype):
block_shape = np.array(block_shape)
crops = constant_op.constant(
np.array(crops).reshape((len(block_shape), 2)), crops_dtype)
with self.cached_session():
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.batch_to_space_nd(tf_x, block_shape, crops)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
def _compare(self, input_shape, block_shape, crops, crops_dtype):
input_shape = list(input_shape)
input_shape[0] *= np.prod(block_shape)
x = np.random.normal(
0, 1, np.prod(input_shape)).astype(np.float32).reshape(input_shape)
self._checkGrad(x, block_shape, crops, crops_dtype)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([1, 2, 3, 5], [2, 2], [[0, 0], [0, 0]], dtype)
@test_util.run_deprecated_v1
def testSmall2(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([2, 4, 3, 2], [2, 2], [[0, 0], [0, 0]], dtype)
@test_util.run_deprecated_v1
def testSmallCrop1x1(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([1, 2, 3, 5], [2, 2], [[1, 1], [1, 1]], dtype)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/batchtospace_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Softplus and SoftplusGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SoftplusTest(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_softplus = self._npSoftplus(np_features)
with self.cached_session(use_gpu=use_gpu):
softplus = nn_ops.softplus(np_features)
tf_softplus = self.evaluate(softplus)
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
self.assertTrue(np.all(tf_softplus > 0))
self.assertShapeEqual(np_softplus, softplus)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testGradGrad(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad, [2, 5], x_init_value=x_init)
print("softplus (float) gradient of gradient err = ", err)
self.assertLess(err, 5e-5)
@test_util.run_deprecated_v1
def testGradGradGrad(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
(grad_grad,) = gradients_impl.gradients(grad, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad_grad, [2, 5], x_init_value=x_init)
print("softplus (float) third-order gradient err = ", err)
self.assertLess(err, 5e-5)
@test_util.run_deprecated_v1
def testNoInts(self):
with self.cached_session():
with self.assertRaisesRegexp(
TypeError,
"'features' has DataType int32 not in list of allowed values"):
nn_ops.softplus(constant_op.constant(42)).eval()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/softplus_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numbers
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# The maximum input rank to test.
_MAX_RANK = 5
def _powerset(iterable):
"""Helper for generating all possible reduction_axes arguments.
Example:
powerset([0,1,2]): () (0,) (1,) (2,) (0,1) (0,2) (1,2) (0,1,2)
Args:
iterable: An iterable of items to generate the powerset of.
Returns:
The powerset of all items in iterable.
"""
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1))
class ReducedShapeTest(test.TestCase):
def _check(self, shape, axes, result):
output = math_ops.reduced_shape(shape, axes=axes)
self.assertAllEqual(output.eval(), result)
@test_util.run_deprecated_v1
def testSimple(self):
with self.cached_session():
self._check([3], [], [3])
self._check([3], [0], [1])
self._check([5, 3], [], [5, 3])
self._check([5, 3], [0], [1, 3])
self._check([5, 3], [1], [5, 1])
self._check([5, 3], [0, 1], [1, 1])
@test_util.run_deprecated_v1
def testZeros(self):
"""Check that reduced_shape does the right thing with zero dimensions."""
with self.cached_session():
self._check([0], [], [0])
self._check([0], [0], [1])
self._check([0, 3], [], [0, 3])
self._check([0, 3], [0], [1, 3])
self._check([0, 3], [1], [0, 1])
self._check([0, 3], [0, 1], [1, 1])
self._check([3, 0], [], [3, 0])
self._check([3, 0], [0], [1, 0])
self._check([3, 0], [1], [3, 1])
self._check([3, 0], [0, 1], [1, 1])
@test_util.run_deprecated_v1
def testNegAxes(self):
with self.cached_session():
self._check([10, 10, 10], [-1], [10, 10, 1])
self._check([10, 10, 10], [-1, 2], [10, 10, 1])
self._check([10, 10, 10], [-1, -1], [10, 10, 1])
self._check([10, 10, 10], [-1, 0], [1, 10, 1])
self._check([10, 10, 10], [-3], [1, 10, 10])
class ReductionUnknownShape(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
with self.cached_session():
for dtype, reductions in [(dtypes.float32,
(math_ops.reduce_sum, math_ops.reduce_mean,
math_ops.reduce_prod, math_ops.reduce_max,
math_ops.reduce_min,
math_ops.reduce_euclidean_norm)),
(dtypes.bool, (math_ops.reduce_all,
math_ops.reduce_any))]:
for reduction in reductions:
x = array_ops.placeholder(
dtype=dtype, shape=None) # Some tensor w/ unknown shape.
y = reduction(x)
self.assertEqual(y.shape, ())
class BaseReductionTest(test.TestCase):
def _tf_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
def _np_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
def _makeIncremental(self, shape, dtype):
data = np.arange(np.prod(shape)).reshape(shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 2j * data
return data
def _makeRandom(self, shape, dtype):
data = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 2j * data
return data
def _compare(self, x, reduction_axes, keepdims, feed_dict=None):
np_ans = self._np_reduce(x, reduction_axes, keepdims)
with self.cached_session(use_gpu=True) as sess:
tf_ans = self._tf_reduce(x, reduction_axes, keepdims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes, feed_dict=None):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, keepdims=False, feed_dict=feed_dict)
self._compare(x, reduction_axes, keepdims=True, feed_dict=feed_dict)
def _compareAllAxes(self, x, feed_dict=None):
self._compareAll(x, None)
for axes in _powerset(range(x.ndim)):
self._compareAll(x, axes, feed_dict)
def _compareGradient(self, x, reduction_axes, rtol=1e-8, atol=1e-8):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareGradient(x, reduction_axes[0], rtol=rtol, atol=atol)
with self.cached_session(use_gpu=True):
t = ops.convert_to_tensor(x)
su = self._tf_reduce(t, reduction_axes, False)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, x.shape, su, su.get_shape().as_list(), x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=rtol, atol=atol)
def _compareGradientAxes(self, x, rtol=1e-8, atol=1e-8):
self._compareGradient(x, None, rtol=rtol, atol=atol)
self._compareGradient(x, [], rtol=rtol, atol=atol)
self._compareGradient(x, 0, rtol=rtol, atol=atol)
self._compareGradient(x, [1], rtol=rtol, atol=atol)
self._compareGradient(x, [2], rtol=rtol, atol=atol)
self._compareGradient(x, [1, 2], rtol=rtol, atol=atol)
self._compareGradient(x, [0, 1, 2, 3], rtol=rtol, atol=atol)
class SumReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_sum(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
return np.sum(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_sum([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat16(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float16)
self._compareAllAxes(np_arr)
# test that mean doesn't overflow
# only on GPU, since it has the more accurate implementation
if not test.is_gpu_available():
return
arr = np.ones([68000], dtype=np.float16)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_arr = variables.Variable(arr)
variables.global_variables_initializer().run()
tf_mean = math_ops.reduce_mean(tf_arr, 0, False)
tf_out_mean = self.evaluate(tf_mean)
self.assertAllClose(tf_out_mean, 1.)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
for _ in range(10):
size_x = int(2**np.random.uniform(0, 15))
size_y = int(2**np.random.uniform(0, 15))
if size_x * size_y > 1e7:
size_y = int(1e7 / size_x)
arr = np.ones([size_x, size_y], dtype=np.float32)
col_sum = np.sum(arr, axis=0)
row_sum = np.sum(arr, axis=1)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_row_sum = self._tf_reduce(arr, 1, False)
tf_col_sum = self._tf_reduce(arr, 0, False)
tf_out_row, tf_out_col = self.evaluate([tf_row_sum, tf_col_sum])
self.assertAllClose(col_sum, tf_out_col)
self.assertAllClose(row_sum, tf_out_row)
for size_x in [1, 3, 16, 33]:
for size_y in [1, 3, 16, 33]:
for size_z in [1, 3, 16, 33]:
arr = np.ones([size_x, size_y, size_z], dtype=np.float32)
sum_y = np.sum(arr, axis=1)
sum_xz = np.sum(arr, axis=(0, 2))
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_sum_xz = self._tf_reduce(arr, [0, 2], False)
tf_sum_y = self._tf_reduce(arr, 1, False)
tf_out_sum_xz, tf_out_sum_y = self.evaluate([tf_sum_xz, tf_sum_y])
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testInvalidIndex(self):
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = ops.convert_to_tensor(np_arr)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [-3])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [2])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [0, 2])
@test_util.run_deprecated_v1
def testPartialShapes(self):
np.random.seed(1618)
# Input shape is unknown.
reduction_axes = [1, 2]
c_unknown = array_ops.placeholder(dtypes.float32)
s_unknown = math_ops.reduce_sum(c_unknown, reduction_axes)
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_unknown: np_input})
# Input shape only has known rank.
c_known_rank = array_ops.placeholder(dtypes.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(rank=3))
s_known_rank = math_ops.reduce_sum(
c_known_rank, reduction_axes, keepdims=True)
self.assertEqual(3, s_known_rank.get_shape().rank)
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_known_rank: np_input})
# Reduction indices are unknown.
unknown_indices = array_ops.placeholder(dtypes.int32)
c_unknown_indices = constant_op.constant([[10.0], [20.0]])
s_unknown_indices = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keepdims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keepdims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().rank)
@test_util.run_deprecated_v1
def testWrongShapeForReductionIndices(self):
reduction_axes = [[1], [2]]
c_unknown = array_ops.placeholder(dtypes.float32)
with self.assertRaisesWithPredicateMatch(ValueError,
".*must be at most rank 1.*"):
math_ops.reduce_sum(c_unknown, reduction_axes)
# Int64??
@test_util.run_deprecated_v1
def testGradient(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]:
x = self._makeIncremental([2, 3, 4, 2], dtype)
self._compareGradientAxes(x)
@test_util.run_deprecated_v1
def testHighRank(self):
# Do a bunch of random high dimensional reductions
np.random.seed(42)
for _ in range(20):
rank = np.random.randint(4, 10 + 1)
axes, = np.nonzero(np.random.randint(2, size=rank))
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
self._compareAll(data, axes)
# Check some particular axis patterns
for rank in 4, 7, 10:
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
for axes in ([], np.arange(rank), np.arange(0, rank, 2),
np.arange(1, rank, 2)):
self._compareAll(data, axes)
@test_util.run_deprecated_v1
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_sum(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_sum(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
class MeanReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_mean(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
elif isinstance(reduction_axes, numbers.Integral):
reduction_axes = (reduction_axes,)
if reduction_axes is None:
count = np.prod(x.shape)
else:
count = np.prod([x.shape[ax] for ax in reduction_axes])
# np.mean automatically converts integer inputs to float, while TensorFlow's
# reduce_mean does not. For integer inputs, we emulate TensorFlow's behavior
# using np.sum and truncating division.
np_sum = np.sum(x, axis=reduction_axes, keepdims=keepdims)
if np.issubdtype(x.dtype, np.integer):
return np_sum // count
return np_sum / count
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
for dtype in [dtypes.float32, dtypes.float64]:
x = self._makeIncremental(s, dtype)
self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_mean(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_mean(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertTrue(np.all(np.isnan(y)))
class EuclideanNormReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_euclidean_norm(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
if reduction_axes is None or reduction_axes != tuple():
np_fro = np.sqrt(
np.sum(x * np.conj(x), axis=reduction_axes, keepdims=keepdims))
else:
np_fro = x
if np.issubdtype(x.dtype, np.integer):
np_fro = np.floor(np_fro)
return np_fro
@test_util.run_deprecated_v1
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True):
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_euclidean_norm(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertAllEqual(y, np.zeros(9938))
class ProdReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_prod(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
return np.prod(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_prod([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
# Numpy automatically upgrades the type of np.prod from int32 to int64, so
# Numpy does not overflow an int32 np.prod while TensorFlow does. To avoid
# overflow, divide the incremental int32 array by 2.
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32) / 2
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testGradientWithZeros(self):
s = [2, 3, 4, 2]
x = self._makeIncremental(s, dtypes.float32) / 20.
# No zeros in input
self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)
# Zero at beginning
x1 = x.copy()
x1[:, :, 0, :] = 0
self._compareGradientAxes(x1, rtol=1e-3, atol=1e-3)
# Zero at end
x2 = x.copy()
x2[:, :, -1, :] = 0
self._compareGradientAxes(x2, rtol=1e-3, atol=1e-3)
# Zero in middle
x3 = x.copy()
x3[:, :, 2, :] = 0
self._compareGradientAxes(x3, rtol=1e-3, atol=1e-3)
# All zeros
x4 = x.copy()
x4[:, :, :, :] = 0
self._compareGradientAxes(x4, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_prod(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_prod(x, [0])
self.assertAllEqual(y.eval(), np.ones(9938))
class MinReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amin(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amin(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_min(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_min([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.cached_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_min(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class MaxReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amax(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amax(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_max(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_max([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testInt64Reduce3D(self):
# Create a 3D array of int64s and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.int64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.cached_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_max(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class AllReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.all(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.all(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_all(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.session(use_gpu=True) as sess:
v = math_ops.reduce_all([True, True],
constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, True)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.1).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
class AnyReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.any(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.any(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_any(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.session(use_gpu=True) as sess:
v = math_ops.reduce_any([True, True],
constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, True)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.9).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
class CountNonzeroReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False, zero=0,
feed_dict=None):
np_ans = (x != zero).astype(np.int32)
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keepdims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu) as sess:
tf_ans = math_ops.count_nonzero(x, reduction_axes, keepdims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes, feed_dict=None):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, False, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, False, use_gpu=False, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=False, feed_dict=feed_dict)
@test_util.run_deprecated_v1
def testBoolReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([False, False, True, False, False, True])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
@test_util.run_deprecated_v1
def testFloatReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([0.0, 1.0, -1.0, 0.0, 0.0, 3.0]).astype(np.float32)
self._compareAll(np_arr, [0])
@test_util.run_deprecated_v1
def testFloatReduce4D(self):
# Create a 4D array of floats and reduce across some
# dimensions
np_arr = np.floor(np.arange(0.0, 210.0) / 100.0).reshape([2, 3, 5,
7]).astype(
np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
@test_util.run_deprecated_v1
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
@test_util.run_deprecated_v1
def testDegenerate(self):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
for dtype in (dtypes.bool,):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.count_nonzero(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
def testStringReduce(self):
# Test case for GitHub issue 18712
with self.cached_session() as sess:
v = math_ops.count_nonzero(constant_op.constant(["test"]))
self.assertAllClose(self.evaluate(v), 1)
@test_util.run_deprecated_v1
def testStringReduce1D(self):
# Create a 1D array of strings
x = np.asarray(["", "", "a", "", "", "b"])
self._compare(x, None, keepdims=False, zero=np.str(""))
self._compare(x, [], keepdims=False, zero=np.str(""))
self._compare(x, [0], keepdims=False, zero=np.str(""))
self._compare(x, None, keepdims=True, zero=np.str(""))
self._compare(x, [], keepdims=True, zero=np.str(""))
self._compare(x, [0], keepdims=True, zero=np.str(""))
@test_util.run_deprecated_v1
def testStringReduce2D(self):
# Create a 2D array of strings
x = np.asarray([["", "", "a", "", "", "b"],
["", "c", "", "d", "", ""],
["e", "", "f", "", "", ""]])
self._compare(x, None, keepdims=False, zero=np.str(""))
self._compare(x, [], keepdims=False, zero=np.str(""))
self._compare(x, [0], keepdims=False, zero=np.str(""))
self._compare(x, [1], keepdims=False, zero=np.str(""))
self._compare(x, [0, 1], keepdims=False, zero=np.str(""))
self._compare(x, None, keepdims=True, zero=np.str(""))
self._compare(x, [], keepdims=True, zero=np.str(""))
self._compare(x, [0], keepdims=True, zero=np.str(""))
self._compare(x, [0, 1], keepdims=True, zero=np.str(""))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/reduction_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.gather_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class GatherNdTest(test.TestCase):
def _testSimpleDtype(self, dtype):
with self.cached_session(use_gpu=True):
params = constant_op.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype))
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertAllEqual(np.array([7, 7, 8], dtype=dtype), gather_nd_val)
self.assertEqual([3], gather_nd_t.get_shape())
def testSimpleDtype(self):
self._testSimpleDtype(np.float32)
self._testSimpleDtype(np.float64)
self._testSimpleDtype(np.int32)
self._testSimpleDtype(np.int64)
self._testSimpleDtype(np.complex64)
self._testSimpleDtype(np.complex128)
self._testSimpleDtype("|S") # byte strings in python2 + 3
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testEmptyIndicesAndParamsOKButJustEmptyParamsFails(self):
with self.session(use_gpu=True):
params = np.ones((3, 3), dtype=np.float32)
indices_empty = np.empty((0, 2), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
gather_nd_ok_val = self.evaluate(gather_nd_ok_t)
self.assertEqual([0], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
indices_empty = np.empty((0, 1), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
gather_nd_ok_val = self.evaluate(gather_nd_ok_t)
self.assertEqual([0, 3], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0, 3), dtype=np.float32), gather_nd_ok_val)
params_empty = np.empty((0, 3), dtype=np.float32)
indices_empty = np.empty((0, 2), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params_empty, indices_empty)
gather_nd_ok_val = self.evaluate(gather_nd_ok_t)
self.assertEqual([0], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
params_empty = np.empty((0, 3), dtype=np.float32)
indices_nonempty = np.zeros((1, 2), dtype=np.int32)
gather_nd_break_t = array_ops.gather_nd(params_empty, indices_nonempty)
with self.assertRaisesOpError(
r"Requested more than 0 entries, but params is empty."):
self.evaluate(gather_nd_break_t)
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
def testIndexScalar(self):
with self.session(use_gpu=True):
params = np.array(
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([4, 1])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([], gather_nd_t.get_shape())
self.assertAllEqual(np.array(7), gather_nd_val)
def testParamsRankLargerThanIndexIndexScalarSlices(self):
with self.session(use_gpu=True):
params = np.array(
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([4])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([2], gather_nd_t.get_shape())
self.assertAllEqual(np.array([-7, 7]), gather_nd_val)
def testParamsRankLargerThanIndexSlices(self):
with self.session(use_gpu=True):
params = np.array(
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([3, 2], gather_nd_t.get_shape())
self.assertAllEqual(np.array([[-7, 7], [-7, 7], [-8, 8]]), gather_nd_val)
def testHigherRankParamsLargerThanIndexSlices(self):
with self.session(use_gpu=True):
params = np.array(
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
params_t = constant_op.constant(params)
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([3, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(params[[4, 4, 0]], gather_nd_val)
def testEmptyIndicesLastRankMeansCopyEntireTensor(self):
with self.session(use_gpu=True):
params = np.array(
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
params_t = constant_op.constant(params)
indices = constant_op.constant(
[[], []], dtype=dtypes.int32) # Size (2, 0)
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([2, 6, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(
np.vstack((params[np.newaxis, :], params[np.newaxis, :])),
gather_nd_val)
def testHigherRankParamsAndIndicesLargerThanIndexSlices(self):
with self.session(use_gpu=True):
params = np.array(
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
params_t = constant_op.constant(params)
indices = constant_op.constant([[[3], [2], [1]], [[4], [4], [0]]])
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([2, 3, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(params[[3, 2, 1, 4, 4, 0]].reshape(2, 3, 2, 2),
gather_nd_val)
def testHigherRankParams(self):
with self.session(use_gpu=True):
shape = (10, 20, 5, 1, 17)
params = np.random.rand(*shape)
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
expected = params[tuple(indices.T)]
self.assertAllEqual(expected, gather_nd_val)
self.assertEqual([2000], gather_nd_t.get_shape())
def testHigherRankParamsAndIndices(self):
with self.session(use_gpu=True):
shape = (10, 20, 5, 1, 17)
params = np.random.rand(*shape)
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
indices_reshaped = indices.reshape([10, 10, 20, 5])
gather_nd_t = array_ops.gather_nd(params, indices_reshaped)
gather_nd_val = self.evaluate(gather_nd_t)
expected = params[tuple(indices.T)]
self.assertAllEqual(expected.reshape([10, 10, 20]), gather_nd_val)
self.assertEqual([10, 10, 20], gather_nd_t.get_shape())
def assertIndexedSlices(self, t):
self.assertIsInstance(t, ops.IndexedSlices)
@test_util.run_deprecated_v1
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32)
gather_nd_t = array_ops.gather_nd(params, indices)
shape = gather_nd_t.get_shape()
self.assertEqual(None, shape.ndims)
self.assertEqual(None, tensor_shape.dimension_value(shape[0]))
@test_util.run_deprecated_v1
def testBadIndicesCPU(self):
with self.session(use_gpu=False):
params = [0, 1, 2]
indices = [[[0], [7]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,1\] = \[7\] does not index into param shape \[3\]"):
self.evaluate(gather_nd)
def _disabledTestBadIndicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
# On GPU the bad indices do not raise error but fetch 0 values
if not test.is_gpu_available():
return
with self.session(use_gpu=True):
params = [0, 1, 2]
indices = [[[0], [7]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,1\] = \[7\] does not index into param shape \[3\]"):
self.evaluate(gather_nd)
@test_util.run_deprecated_v1
def testBadIndicesWithSlicesCPU(self):
with self.session(use_gpu=False):
params = [[0, 1, 2]]
indices = [[[0], [0], [1]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,2\] = \[1\] does not index into param shape \[1,3\]"):
self.evaluate(gather_nd)
def _disabledTestBadIndicesWithSlicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
# On GPU the bad indices do not raise error but fetch 0 values
if not test.is_gpu_available():
return
with self.session(use_gpu=True):
params = [[0, 1, 2]]
indices = [[[0], [0], [1]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,2\] = \[1\] does not index into param shape \[1,3\]"):
self.evaluate(gather_nd)
@test_util.run_deprecated_v1
def testGradientsRank2Elements(self):
indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)
inputs = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant([1, 2], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array([[1, 0], [0, 2]], dtype=np.float64)
with self.session(use_gpu=True):
assert np.array_equal(expected_grads, self.evaluate(grads))
@test_util.run_deprecated_v1
def testGradientsRank2Slices(self):
indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)
inputs = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array([[3, 4], [1, 2]], dtype=np.float64)
with self.session(use_gpu=True):
self.assertIndexedSlices(grads)
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads).eval())
@test_util.run_deprecated_v1
def testGradientsRank3Elements(self):
indices = constant_op.constant(
[[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int32)
inputs = constant_op.constant(
[[[1, 3], [5, 7]], [[2, 4], [6, 8]]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
with self.session(use_gpu=True):
self.assertAllEqual(expected_grads, self.evaluate(grads))
@test_util.run_deprecated_v1
def testGradientsRank7Elements(self):
# Shape [1,1,2,1,1,2,2]
indices = constant_op.constant(
[[[
[[[[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1]]]]
]]],
dtype=dtypes.int32)
inputs = constant_op.constant(
[[[
[[[[1, 3], [5, 7]]]],
[[[[2, 4], [6, 8]]]]
]]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[[
[[[[1, 2], [3, 4]]]],
[[[[5, 6], [7, 8]]]]
]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[[
[[[[5, 6], [1, 2]]]],
[[[[3, 4], [7, 8]]]]
]]], dtype=np.float64)
with self.session(use_gpu=True):
self.assertAllEqual(expected_grads, self.evaluate(grads))
@test_util.run_deprecated_v1
def testGradientsInt64Indices(self):
indices = constant_op.constant(
[[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int64)
inputs = constant_op.constant(
[[[1, 3], [5, 7]], [[2, 4], [6, 8]]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
with self.session(use_gpu=True):
self.assertAllEqual(expected_grads, self.evaluate(grads))
@test_util.run_deprecated_v1
def testGradientsRank2SlicesWithEmptySpace(self):
indices = constant_op.constant([[2], [0], [5]], dtype=dtypes.int32)
inputs = constant_op.constant(
[[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9]],
dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 3, 3, 3, 3, 3]],
dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[2, 2, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 3, 3, 3, 3]],
dtype=np.float64)
with self.session(use_gpu=True):
self.assertIndexedSlices(grads)
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads).eval())
@test_util.run_v1_only("RefVariable is not supported in v2")
def testGatherNdRefVariable(self):
with self.cached_session():
v = variables.RefVariable(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather_nd(v, [[0, 1], [2, 0]])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("GatherNd", gather.op.name)
self.assertAllEqual([2, 5], gather)
@test_util.run_in_graph_and_eager_modes
def testGatherNdResourceVariable(self):
with compat.forward_compatibility_horizon(2019, 4, 30):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather_nd(v, [[0, 1], [2, 0]])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("ResourceGatherNd", gather.op.inputs[0].op.type)
self.assertAllEqual([2, 5], gather)
class GatherNdOpBenchmark(test.Benchmark):
def benchmark_gather_nd_op(self):
shape = (100, 47, 18, 170, 13)
np.random.seed(127)
params = np.random.rand(*shape)
indices = np.vstack([np.random.randint(0, s, size=10000) for s in shape]).T
with session.Session():
t_params = variables.Variable(params)
t_indices = variables.Variable(indices)
gather_op = array_ops.gather_nd(t_params, t_indices)
variables.global_variables_initializer().run()
for _ in range(10):
self.evaluate(gather_op)
t1 = time.time()
for _ in range(1000):
self.evaluate(gather_op)
t2 = time.time()
self.report_benchmark(iters=1000, wall_time=(t2 - t1) / 1000.0)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/gather_nd_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bucketize_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class BucketizationOpTest(test.TestCase):
def testInt(self):
op = math_ops._bucketize(
constant_op.constant([-5, 0, 2, 3, 5, 8, 10, 11, 12]),
boundaries=[0, 3, 8, 11])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
with self.session(use_gpu=True) as sess:
self.assertAllEqual(expected_out, self.evaluate(op))
def testFloat(self):
op = math_ops._bucketize(
constant_op.constant([-5., 0., 2., 3., 5., 8., 10., 11., 12.]),
boundaries=[0., 3., 8., 11.])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
with self.session(use_gpu=True) as sess:
self.assertAllEqual(expected_out, self.evaluate(op))
def test2DInput(self):
op = math_ops._bucketize(
constant_op.constant([[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]),
boundaries=[0, 3, 8, 11])
expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]]
with self.session(use_gpu=True) as sess:
self.assertAllEqual(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def testInvalidBoundariesOrder(self):
op = math_ops._bucketize(
constant_op.constant([-5, 0]), boundaries=[0, 8, 3, 11])
with self.session(use_gpu=True) as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError, "Expected sorted boundaries"):
self.evaluate(op)
def testBoundariesNotList(self):
with self.assertRaisesRegexp(
TypeError, "Expected list.*"):
math_ops._bucketize(constant_op.constant([-5, 0]), boundaries=0)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/bucketize_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for inplace_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import inplace_ops
from tensorflow.python.platform import test as test_lib
class InplaceOpsTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasicUpdate(self):
for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
with self.session(use_gpu=True):
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] = 1
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, [-1],
array_ops.ones([1, 3], dtype) * 2)
y[-1, :] = 2
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] = 7
self.assertAllClose(x.eval(), y)
@test_util.run_deprecated_v1
def testBasicUpdateBool(self):
with self.session(use_gpu=True):
x = array_ops.ones([7, 3], dtypes.bool)
y = np.ones([7, 3], dtypes.bool.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, [3], array_ops.ones([1, 3],
dtypes.bool))
y[3, :] = True
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, [-1],
array_ops.zeros([1, 3], dtypes.bool))
y[-1, :] = False
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, 5, array_ops.zeros([3], dtypes.bool))
y[5, :] = False
self.assertAllClose(x.eval(), y)
@test_util.run_deprecated_v1
def testBasicAdd(self):
for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
with self.cached_session(use_gpu=True):
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
x = array_ops.inplace_add(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] += 1
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_add(x, [-1], array_ops.ones([1, 3], dtype) * 2)
y[-1, :] += 2
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_add(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] += 7
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_add(x, None, array_ops.ones([7, 3], dtype) * 99)
y[:, :] += 99
self.assertAllClose(x.eval(), y)
@test_util.run_deprecated_v1
def testBasicSub(self):
for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
with self.cached_session(use_gpu=True):
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_sub(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] -= 1
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_sub(x, [-1], array_ops.ones([1, 3], dtype) * 2)
y[-1, :] -= 2
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_sub(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] -= 7
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_sub(x, None, array_ops.ones([7, 3], dtype) * 99)
y[:, :] -= 99
self.assertAllClose(x.eval(), y)
@test_util.run_deprecated_v1
def testRandom(self):
with self.session(use_gpu=True):
d0, d1, d2 = 100, 3, 5
x = array_ops.zeros([d0, d1, d2])
y = np.zeros([d0, d1, d2])
for _ in xrange(20):
idx = np.random.choice(d0, d0 // 10, replace=False)
val = np.random.randint(10, size=(d0 // 10, d1, d2))
op = np.random.randint(3)
if op == 0:
x = inplace_ops.inplace_update(x, idx, val)
y[idx, :] = val
elif op == 1:
x = inplace_ops.inplace_add(x, idx, val)
y[idx, :] += val
elif op == 2:
x = inplace_ops.inplace_sub(x, idx, val)
y[idx, :] -= val
self.assertAllClose(x.eval(), y)
@test_util.run_deprecated_v1
def testRandom1D(self):
with self.session(use_gpu=True):
d0 = 100
x = array_ops.zeros([d0])
y = np.zeros([d0])
for _ in xrange(20):
idx = np.random.choice(d0, d0 // 10, replace=False)
val = np.random.randint(10, size=(d0 // 10))
op = np.random.randint(3)
if op == 0:
x = inplace_ops.inplace_update(x, idx, val)
y[idx] = val
elif op == 1:
x = inplace_ops.inplace_add(x, idx, val)
y[idx] += val
elif op == 2:
x = inplace_ops.inplace_sub(x, idx, val)
y[idx] -= val
self.assertAllClose(x.eval(), y)
def testAlias(self):
with self.session(use_gpu=True) as sess:
x = array_ops.ones([2, 3])
y = inplace_ops.alias_inplace_add(x, [0], [[1, 2, 3]])
with ops.control_dependencies([y]):
z = array_ops.identity(x)
_, vy, vz = self.evaluate([x, y, z])
self.assertAllClose(vy, vz)
def testError(self):
with self.cached_session():
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"must be a vector"):
_ = inplace_ops.inplace_update([[1.]], [[0]], [[10]]).eval()
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"x and v shape doesn't match"):
_ = inplace_ops.inplace_update([[1.]], [0], [10]).eval()
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"i and x shape doesn't match"):
_ = inplace_ops.inplace_update([[1.]], [0, 1], [[10]]).eval()
@test_util.run_deprecated_v1
def testEmpty(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64, dtypes.bool,
dtypes.uint8
]:
with self.cached_session(use_gpu=True):
test_shapes = [(), (1,), (2, 3), (0, 2), (2, 3, 5), (2, 0, 5)]
for shape in test_shapes:
val = inplace_ops.empty(shape, dtype).eval()
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
val = inplace_ops.empty(shape, dtype, init=True).eval()
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
self.assertAllEqual(val, np.zeros(shape, dtype.as_numpy_dtype))
val = inplace_ops.empty_like(array_ops.zeros(shape, dtype)).eval()
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
val = inplace_ops.empty_like(
array_ops.zeros(shape, dtype), init=True).eval()
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
self.assertAllEqual(val, np.zeros(shape, dtype.as_numpy_dtype))
with self.cached_session(use_gpu=True):
val = inplace_ops.empty((1, 2), dtypes.string, init=True).eval()
self.assertEqual(val.tolist(), [[b"", b""]])
val = inplace_ops.empty((1, 2), dtypes.string, init=False).eval()
self.assertEqual(val.tolist(), [[b"", b""]])
if __name__ == "__main__":
test_lib.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/inplace_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom user ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.framework import errors
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class InvalidOpTest(test.TestCase):
def testBasic(self):
library_filename = os.path.join(resource_loader.get_data_files_path(),
'invalid_op.so')
with self.assertRaises(errors.InvalidArgumentError):
load_library.load_op_library(library_filename)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/invalid_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.variable_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_NP_TO_TF = {
np.float32: dtypes.float32,
np.float64: dtypes.float64,
np.int32: dtypes.int32,
np.int64: dtypes.int64,
}
class VariableOpTest(test.TestCase):
def _initFetch(self, x, tftype, use_gpu=None):
with self.test_session(use_gpu=use_gpu):
p = state_ops.variable_op(x.shape, tftype)
op = state_ops.assign(p, x)
op.op.run()
return self.evaluate(p)
def _testTypes(self, vals):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
self.setUp()
x = vals.astype(dtype)
tftype = _NP_TO_TF[dtype]
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
# NOTE(touts): the GPU test should pass for all types, whether the
# Variable op has an implementation for that type on GPU as we expect
# that Variable and Assign have GPU implementations for matching tf.
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))
@test_util.run_deprecated_v1
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
@test_util.run_deprecated_v1
def testset_shape(self):
p = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual([1, 2], p.get_shape())
p = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())
@test_util.run_deprecated_v1
def testAssign(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
@test_util.run_deprecated_v1
def testAssignNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
@test_util.run_deprecated_v1
def testAssignNoVarShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
@test_util.run_deprecated_v1
def testAssignNoVarShapeNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def _NewShapelessTensor(self):
tensor = array_ops.placeholder(dtypes.float32)
self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
return tensor
@test_util.run_deprecated_v1
def testAssignNoValueShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
assigned = state_ops.assign(var, value)
self.assertEqual(shape, var.get_shape())
self.assertEqual(shape, assigned.get_shape())
@test_util.run_deprecated_v1
def testAssignNoValueShapeNoValidateShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
self.assertEqual(shape, var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())
@test_util.run_deprecated_v1
def testAssignNoShape(self):
with self.cached_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(tensor_shape.unknown_shape(),
state_ops.assign(var, value).get_shape())
@test_util.run_deprecated_v1
def testAssignNoShapeNoValidateShape(self):
with self.cached_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(
tensor_shape.unknown_shape(),
state_ops.assign(
var, value, validate_shape=False).get_shape())
@test_util.run_deprecated_v1
def testAssignUpdate(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
@test_util.run_deprecated_v1
def testAssignUpdateNoVarShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
@test_util.run_deprecated_v1
def testAssignUpdateNoValueShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual([1, 2], subbed.get_shape())
@test_util.run_deprecated_v1
def testAssignUpdateNoShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
@test_util.run_deprecated_v1
def testTemporaryVariable(self):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="foo")
var = state_ops.assign(var, [[4.0, 5.0]])
var = state_ops.assign_add(var, [[6.0, 7.0]])
final = gen_state_ops.destroy_temporary_variable(var, var_name="foo")
self.assertAllClose([[10.0, 12.0]], self.evaluate(final))
@test_util.run_deprecated_v1
def testDestroyNonexistentTemporaryVariable(self):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable([1, 2], dtypes.float32)
final = gen_state_ops.destroy_temporary_variable(var, var_name="bad")
with self.assertRaises(errors.NotFoundError):
self.evaluate(final)
@test_util.run_deprecated_v1
def testDuplicateTemporaryVariable(self):
with test_util.use_gpu():
var1 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var1 = state_ops.assign(var1, [[1.0, 2.0]])
var2 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var2 = state_ops.assign(var2, [[3.0, 4.0]])
final = var1 + var2
with self.assertRaises(errors.AlreadyExistsError):
self.evaluate(final)
@test_util.run_deprecated_v1
def testDestroyTemporaryVariableTwice(self):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable([1, 2], dtypes.float32)
val1 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
val2 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
final = val1 + val2
with self.assertRaises(errors.NotFoundError):
self.evaluate(final)
@test_util.run_deprecated_v1
def testTemporaryVariableNoLeak(self):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="bar")
final = array_ops.identity(var)
self.evaluate(final)
@test_util.run_deprecated_v1
def testTwoTemporaryVariablesNoLeaks(self):
with test_util.use_gpu():
var1 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="var1")
var2 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="var2")
final = var1 + var2
self.evaluate(final)
@test_util.run_deprecated_v1
def testAssignDependencyAcrossDevices(self):
with test_util.use_gpu():
# The variable and an op to increment it are on the GPU.
var = state_ops.variable_op([1], dtypes.float32)
self.evaluate(state_ops.assign(var, [1.0]))
increment = state_ops.assign_add(var, [1.0])
with ops.control_dependencies([increment]):
with test_util.force_cpu():
# This mul op is pinned to the CPU, but reads the variable from the
# GPU. The test ensures that the dependency on 'increment' is still
# honored, i.e., the Send and Recv from GPU to CPU should take place
# only after the increment.
result = math_ops.multiply(var, var)
self.assertAllClose([4.0], self.evaluate(result))
@test_util.run_deprecated_v1
def testIsVariableInitialized(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
v0 = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual(False, variables.is_variable_initialized(v0).eval())
state_ops.assign(v0, [[2.0, 3.0]]).eval()
self.assertEqual(True, variables.is_variable_initialized(v0).eval())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/variable_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for various tensorflow.ops.tf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class ShapeOpsTest(test.TestCase):
def _compareShape(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.shape(x)
tf_ans_64 = array_ops.shape(x, out_type=dtypes.int64)
result = self.evaluate(tf_ans)
result_64 = self.evaluate(tf_ans_64)
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeSparse(self, x_np, use_gpu=False):
np_ans = np.array(np.shape(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.shape(x_tf)
result = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeN(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.cached_session(use_gpu=use_gpu) as sess:
tf_ans = array_ops.shape_n([x, x, x])
tf_ans_64 = array_ops.shape_n([x, x, x], out_type=dtypes.int64)
result = self.evaluate(tf_ans)
result_64 = self.evaluate(tf_ans_64)
for i in range(3):
self.assertAllEqual(np_ans, result[i])
self.assertAllEqual(np_ans, result_64[i])
self.assertShapeEqual(np_ans, tf_ans[i])
def _compareRank(self, x, use_gpu=False):
np_ans = np.asarray(np.ndim(x))
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.rank(x)
result = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareRankSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.ndim(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.rank(x_tf)
result = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSize(self, x, use_gpu=False):
np_ans = np.asarray(np.size(x))
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.size(x)
result = self.evaluate(tf_ans)
tf_ans_64 = array_ops.size(x, out_type=dtypes.int64)
result_64 = self.evaluate(tf_ans_64)
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSizeSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.size(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.size(x_tf)
result = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _testCpu(self, x):
self._compareShape(x, use_gpu=False)
self._compareShapeN(x, use_gpu=False)
self._compareRank(x, use_gpu=False)
self._compareSize(x, use_gpu=False)
self._compareShapeSparse(x, use_gpu=False)
self._compareRankSparse(x, use_gpu=False)
self._compareSizeSparse(x, use_gpu=False)
def _testGpu(self, x):
self._compareShape(x, use_gpu=True)
self._compareShapeN(x, use_gpu=True)
self._compareRank(x, use_gpu=True)
self._compareSize(x, use_gpu=True)
self._compareShapeSparse(x, use_gpu=True)
self._compareRankSparse(x, use_gpu=True)
self._compareSizeSparse(x, use_gpu=True)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testBasic(self):
self._testAll(np.random.randn(2))
self._testAll(np.random.randn(2, 3))
self._testAll(np.random.randn(2, 3, 5))
self._testAll(np.random.randn(2, 3, 5, 7))
self._testAll(np.random.randn(2, 3, 5, 7, 11))
self._testAll(np.random.randn(2, 3, 5, 7, 11, 13))
def testBool(self):
self._testAll(np.random.choice((False, True), size=(2,)))
self._testAll(np.random.choice((False, True), size=(2, 3)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7, 11)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7, 11, 13)))
# Disabled because it takes too long to run, but manually verified
# as passing at time of writing.
def _test64BitOutput(self):
with self.cached_session():
inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=dtypes.int64)
self.assertEqual(2**31, self.evaluate(num_elements))
# Too large for tf.int32 output.
with self.assertRaises(errors_impl.InvalidArgumentError):
with self.cached_session():
inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=dtypes.int32)
self.assertEqual(2**31, self.evaluate(num_elements))
def _compareExpandDims(self, x, dim, use_gpu):
np_ans = np.expand_dims(x, axis=dim)
with self.cached_session(use_gpu=use_gpu):
tensor = array_ops.expand_dims(x, dim)
tf_ans = self.evaluate(tensor)
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareExpandDimsAll(self, x, dim):
self._compareExpandDims(x, dim, False)
self._compareExpandDims(x, dim, True)
def testExpandDims(self):
self._compareExpandDimsAll(np.zeros([2]), 0)
self._compareExpandDimsAll(np.zeros([2]), 1)
self._compareExpandDimsAll(np.zeros([2]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), 0)
self._compareExpandDimsAll(np.zeros([2, 3]), 1)
self._compareExpandDimsAll(np.zeros([2, 3]), 2)
self._compareExpandDimsAll(np.zeros([2, 3]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 0)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -4)
def testExpandDimsBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
self._compareExpandDimsAll(choice([2]), 0)
self._compareExpandDimsAll(choice([2]), 1)
self._compareExpandDimsAll(choice([2]), -1)
self._compareExpandDimsAll(choice([2, 3]), 0)
self._compareExpandDimsAll(choice([2, 3]), 1)
self._compareExpandDimsAll(choice([2, 3]), 2)
self._compareExpandDimsAll(choice([2, 3]), -1)
self._compareExpandDimsAll(choice([2, 3]), -2)
self._compareExpandDimsAll(choice([2, 3, 5]), 0)
self._compareExpandDimsAll(choice([2, 3, 5]), 1)
self._compareExpandDimsAll(choice([2, 3, 5]), 2)
self._compareExpandDimsAll(choice([2, 3, 5]), 3)
self._compareExpandDimsAll(choice([2, 3, 5]), -1)
self._compareExpandDimsAll(choice([2, 3, 5]), -2)
self._compareExpandDimsAll(choice([2, 3, 5]), -3)
self._compareExpandDimsAll(choice([2, 3, 5]), -4)
@test_util.run_deprecated_v1
def testExpandDimsErrors(self):
with self.cached_session():
self.assertRaises(ValueError, array_ops.expand_dims,
np.zeros([2, 3, 5]), -5)
self.assertRaises(ValueError, array_ops.expand_dims,
[False, True, True], -5)
self.assertRaises(ValueError, array_ops.expand_dims,
np.zeros([2, 3, 5]), 4)
self.assertRaises(ValueError, array_ops.expand_dims,
[False, True, True], 4)
@test_util.run_deprecated_v1
def testExpandDimsGradient(self):
with self.cached_session():
inp = constant_op.constant(
np.random.rand(4, 2).astype("f"), dtype=dtypes.float32)
squeezed = array_ops.expand_dims(inp, 1)
err = gradient_checker.compute_gradient_error(inp, [4, 2], squeezed,
[4, 1, 2])
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testExpandDimsScalar(self):
with self.cached_session():
inp = constant_op.constant(7)
self.assertAllEqual([7], array_ops.expand_dims(inp, 0).eval())
self.assertAllEqual([7], array_ops.expand_dims(inp, -1).eval())
inp = constant_op.constant(True)
self.assertAllEqual([True], array_ops.expand_dims(inp, 0).eval())
self.assertAllEqual([True], array_ops.expand_dims(inp, -1).eval())
def testExpandDimsDimType(self):
for dtype in [dtypes.int32, dtypes.int64]:
x = np.zeros([2])
np_ans = np.expand_dims(x, axis=0)
with self.cached_session(use_gpu=True):
tensor = array_ops.expand_dims(x, constant_op.constant(0, dtype))
tf_ans = self.evaluate(tensor)
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueeze(self, x, squeeze_dims, use_gpu):
with self.cached_session(use_gpu=use_gpu):
if squeeze_dims:
np_ans = np.squeeze(x, axis=tuple(squeeze_dims))
tensor = array_ops.squeeze(x, squeeze_dims)
tf_ans = self.evaluate(tensor)
else:
np_ans = np.squeeze(x)
tensor = array_ops.squeeze(x)
tf_ans = self.evaluate(tensor)
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueezeAll(self, x, squeeze_dims=None):
if squeeze_dims is None:
squeeze_dims = []
self._compareSqueeze(x, squeeze_dims, False)
self._compareSqueeze(x, squeeze_dims, True)
def testSqueeze(self):
# Nothing to squeeze.
self._compareSqueezeAll(np.zeros([2]))
self._compareSqueezeAll(np.zeros([2, 3]))
# Squeeze the middle element away.
self._compareSqueezeAll(np.zeros([2, 1, 2]))
# Squeeze on both ends.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]))
def testSqueezeBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
# Nothing to squeeze.
self._compareSqueezeAll(choice([2]))
self._compareSqueezeAll(choice([2, 3]))
# Squeeze the middle element away.
self._compareSqueezeAll(choice([2, 1, 2]))
# Squeeze on both ends.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]))
def testSqueezeSpecificDimension(self):
# Positive squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [2, 4])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0, 4, 2])
# Negative squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-1])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5, -1])
def testSqueezeSpecificDimensionBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
# Positive squeeze dim index.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [0])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [2, 4])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [0, 4, 2])
# Negative squeeze dim index.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-1])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-3, -5])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-3, -5, -1])
def testSqueezeAllOnes(self):
# Numpy squeezes a 1 element tensor into a zero dimensional tensor.
# Verify that we do the same.
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
tensor = array_ops.squeeze(np.zeros([1, 1, 1]), [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = self.evaluate(tensor)
self.assertEqual(np.shape(1), tf_ans.shape)
def testSqueezeAllOnesBool(self):
# Numpy squeezes a 1 element tensor into a zero dimensional tensor.
# Verify that we do the same.
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
tensor = array_ops.squeeze([[[False]]], [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = self.evaluate(tensor)
self.assertEqual(np.shape(1), tf_ans.shape)
@test_util.run_deprecated_v1
def testSqueezeOnlyOnes(self):
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
input_1x1x3 = np.zeros([1, 1, 3])
self._compareSqueezeAll(input_1x1x3)
self._compareSqueezeAll(input_1x1x3, [0])
self._compareSqueezeAll(input_1x1x3, [1])
self.assertRaises(ValueError, array_ops.squeeze, input_1x1x3, [2])
@test_util.run_deprecated_v1
def testSqueezeErrors(self):
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [-4])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [0, -4])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [3])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [2, 3])
@test_util.run_deprecated_v1
def testSqueezeGradient(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = array_ops.reshape(inp, [4, 1, 2])
squeezed = array_ops.squeeze(a, [])
err = gradient_checker.compute_gradient_error(a, [4, 1, 2], squeezed,
[4, 2])
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testSqueezeGradientWithSqueezeDims(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = array_ops.reshape(inp, [4, 1, 2, 1])
squeezed = array_ops.squeeze(a, [1])
err = gradient_checker.compute_gradient_error(a, [4, 1, 2, 1], squeezed,
[4, 2, 1])
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testSqueezeWithUnknownShape(self):
with self.cached_session():
a = array_ops.placeholder(dtypes.float32, shape=[2, None])
squeezed = array_ops.squeeze(a, [1])
self.assertEqual([2], squeezed.get_shape().as_list())
squeezed = array_ops.squeeze(a)
self.assertEqual(None, squeezed.get_shape())
self.assertRaises(ValueError, array_ops.squeeze, a, [0])
self.assertRaises(ValueError, array_ops.squeeze, a, [100])
class TileTest(test.TestCase, parameterized.TestCase):
def testScalar(self):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
a = constant_op.constant(7, shape=[], dtype=dtypes.float32)
tiled = array_ops.tile(a, [])
result = self.evaluate(tiled)
self.assertEqual(result.shape, ())
self.assertEqual([], tiled.get_shape())
self.assertEqual(7, result)
def testSimple(self):
# multiples could be int32 or int64
for dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(use_gpu=True):
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, constant_op.constant([1, 4], dtype=dtype))
result = self.evaluate(tiled)
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 4))).all())
def testIdentityTileAndGrad(self):
with self.cached_session():
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [1, 1])
result = self.evaluate(tiled)
self.assertEqual(result.shape, (4, 1))
self.assertEqual([4, 1], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 1))).all())
def testEmpty(self):
with self.cached_session():
inp = np.random.rand(2, 3).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [5, 0])
result = self.evaluate(tiled)
self.assertEqual(result.shape, (10, 0))
self.assertEqual([10, 0], tiled.get_shape())
@test_util.run_deprecated_v1
def testUnknownInputShape(self):
"""Importing can call _TileShape without shape of <multiples> known."""
with self.cached_session():
inp = array_ops.placeholder(dtypes.float32) # unknown shape
multiples = constant_op.constant([1, 2, 3, 4], dtype=np.int32)
tiled = array_ops.tile(inp, multiples)
gdef = tiled.graph.as_graph_def()
# Move the tile op to the start of the graph so that shapes of its inputs
# are not available when the shape function runs on import.
swapped = False
for i, n in enumerate(gdef.node):
if n.op == "Tile":
# Swap tile op to be first in gdef.node
assert i != 0
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(gdef.node[i])
gdef.node[i].CopyFrom(gdef.node[0])
gdef.node[0].CopyFrom(new_node)
swapped = True
assert swapped
tiled_imported, = importer.import_graph_def(
gdef, return_elements=[tiled.name])
self.assertEqual(4, tiled_imported.get_shape().ndims)
def testTypes(self):
types_to_test = {
"bool": (dtypes.bool, bool),
"float32": (dtypes.float32, float),
"float64": (dtypes.float64, float),
"complex64": (dtypes.complex64, complex),
"complex128": (dtypes.complex128, complex),
"uint8": (dtypes.uint8, int),
"int8": (dtypes.int8, int),
"int16": (dtypes.int16, int),
"int32": (dtypes.int32, int),
"int64": (dtypes.int64, int),
bytes: (dtypes.string, bytes)
}
for dtype_np, (dtype_tf, cast) in types_to_test.items():
with self.cached_session(use_gpu=True):
inp = np.random.rand(4, 1).astype(dtype_np)
a = constant_op.constant(
[cast(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtype_tf)
tiled = array_ops.tile(a, [1, 4])
result = self.evaluate(tiled)
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertAllEqual(result, np.tile(inp, (1, 4)))
@test_util.run_deprecated_v1
def testInvalidDim(self):
with self.cached_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtypes.float32)
# Wrong length of multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [1, 4, 2])
# Wrong rank for multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [[2, 3], [3, 4]]).eval()
def _RunAndVerifyResult(self, rank, use_gpu):
with self.cached_session(use_gpu=use_gpu):
# Random dims of given rank
input_shape = np.random.randint(1, 4, size=rank)
inp = np.random.rand(*input_shape).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
multiples = np.random.randint(1, 4, size=rank).astype(np.int32)
tiled = array_ops.tile(a, multiples)
result = self.evaluate(tiled)
self.assertTrue((np.array(multiples) * np.array(inp.shape) == np.array(
result.shape)).all())
self.assertAllEqual(result, np.tile(inp, tuple(multiples)))
self.assertShapeEqual(result, tiled)
def testRandom(self):
# test low rank, like 5
for _ in range(5):
self._RunAndVerifyResult(5, use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(5, use_gpu=True)
# test high rank, like 10
for _ in range(5):
self._RunAndVerifyResult(10, use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(10, use_gpu=True)
@parameterized.parameters(dtypes.int32, dtypes.int64)
@test_util.run_deprecated_v1
def testGradientSimpleReduction(self, multiples_dtype):
with self.cached_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
multiples = constant_op.constant([1, 4], dtype=multiples_dtype)
tiled = array_ops.tile(a, multiples)
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = self.evaluate(grad)
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
@test_util.run_deprecated_v1
def testGradientStridedReduction(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = self.evaluate(grad)
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertTrue((np.abs(expected - result) < 1e-3).all())
@test_util.run_deprecated_v1
def testGradientSimpleReductionOnGPU(self):
with self.session(use_gpu=True):
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = self.evaluate(grad)
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
@test_util.run_deprecated_v1
def testGradientStridedReductionOnGPU(self):
with self.session(use_gpu=True):
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = self.evaluate(grad)
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertAllClose(expected, result, 1e-3)
def _RunAndVerifyGradientResult(self, input_shape, multiples):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
# Random values
inp = np.asarray(np.random.rand(*input_shape))
a = constant_op.constant(inp, dtype=dtypes.float64)
tiled = array_ops.tile(a, multiples)
grad_shape = list(np.array(multiples) * np.array(inp.shape))
err = gradient_checker.compute_gradient_error(
a, list(input_shape), tiled, grad_shape, x_init_value=inp)
print("tile(float) error = ", err)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testGradientRandomScalar(self):
self._RunAndVerifyGradientResult([], [])
@test_util.run_deprecated_v1
def testGradientRandom(self):
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 1, 1, 1, 1])
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 2, 1, 3, 1])
self._RunAndVerifyGradientResult([2, 3, 1, 1, 3], [3, 1, 1, 2, 2])
self._RunAndVerifyGradientResult([2, 1, 3, 3, 2], [1, 3, 3, 1, 2])
@test_util.run_deprecated_v1
def testGradientStridedReductionGC(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
err = gradient_checker.compute_gradient_error(a, [4, 2], tiled, [4, 4])
self.assertLess(err, 1e-3)
@parameterized.parameters(dtypes.int32, dtypes.int64)
@test_util.run_deprecated_v1
def testGradientWithSparseGradWithRank1(self, multiples_dtype):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0],
dtype=dtypes.float32)
multiples = constant_op.constant([3], dtype=dtypes.int64)
outputs = array_ops.gather(array_ops.tile(inputs, multiples),
[1, 5, 9, 3, 7, 2, 2, 2])
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithSparseGradWithRank3(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0],
dtype=dtypes.float32)
inputs = array_ops.reshape(inputs, [-1, 1, 1])
outputs = array_ops.gather(array_ops.tile(inputs, [3, 4, 2]),
[1, 5, 9, 3, 7, 2, 2, 2])
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# Unknown multiples shape.
inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, [2, 2, 2, 2])
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input and multiples shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertIs(None, tiled.get_shape().ndims)
# Known input and partially known multiples.
inp = constant_op.constant(0.0, shape=[1, 1])
tiled = array_ops.tile(inp, [array_ops.placeholder(dtypes.int32), 7])
self.assertEqual([None, 7], tiled.get_shape().as_list())
# Mismatched input rank and multiples length.
inp = array_ops.placeholder(dtypes.float32, shape=[None, None])
with self.assertRaises(ValueError):
tiled = array_ops.tile(
inp, array_ops.placeholder(
dtypes.int32, shape=[3]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/shape_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for division with division imported from __future__.
This file should be exactly the same as division_past_test.py except
for the __future__ division line.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class DivisionTestCase(test.TestCase):
def testDivision(self):
"""Test all the different ways to divide."""
values = [1, 2, 7, 11]
functions = (lambda x: x), constant_op.constant
# TODO(irving): Test int8, int16 once we support casts for those.
dtypes = np.int32, np.int64, np.float32, np.float64
tensors = []
checks = []
def check(x, y):
x = ops.convert_to_tensor(x)
y = ops.convert_to_tensor(y)
tensors.append((x, y))
def f(x, y):
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x, y)
checks.append(f)
with self.cached_session() as sess:
for dtype in dtypes:
for x in map(dtype, values):
for y in map(dtype, values):
for fx in functions:
for fy in functions:
tf_x = fx(x)
tf_y = fy(y)
div = x / y
tf_div = tf_x / tf_y
check(div, tf_div)
floordiv = x // y
tf_floordiv = tf_x // tf_y
check(floordiv, tf_floordiv)
# Do only one sess.run for speed
for f, (x, y) in zip(checks, self.evaluate(tensors)):
f(x, y)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/division_future_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse_cross_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseCrossOpTest(test.TestCase):
@test_util.run_deprecated_v1
def test_simple(self):
"""Tests a simple scenario."""
op = sparse_ops.sparse_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['batch1-FC1-F1_X_batch1-FC2-F1'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_dense(self):
"""Tests only dense inputs."""
op = sparse_ops.sparse_cross([
constant_op.constant([['batch1-FC1-F1', 'batch1-FC1-F2'],
['batch2-FC1-F1', 'batch2-FC1-F2']],
dtypes.string),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2',
'batch1-FC1-F2_X_batch1-FC2-F1', 'batch1-FC1-F2_X_batch1-FC2-F2'
], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_integer_mixed_string_sparse(self):
"""Tests mixed type."""
op = sparse_ops.sparse_cross([
self._sparse_tensor([[11], [333, 55555]]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['11_X_batch1-FC2-F1'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '55555_X_batch2-FC2-F1',
'55555_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_integer_mixed_string_dense(self):
"""Tests mixed dense inputs."""
op = sparse_ops.sparse_cross([
constant_op.constant([[11, 333], [55555, 999999]], dtypes.int64),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2', '333_X_batch1-FC2-F1',
'333_X_batch1-FC2-F2'
], [
'55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2',
'999999_X_batch2-FC2-F1', '999999_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_sparse_cross_dense(self):
"""Tests sparse and dense inputs."""
op = sparse_ops.sparse_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_integer_sparse_input(self):
"""Tests mixed type sparse and dense inputs."""
op = sparse_ops.sparse_cross([
self._sparse_tensor([[11], [333, 5555]]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2',
'5555_X_batch2-FC2-F1', '5555_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_permutation_3x3x3(self):
"""Tests 3x3x3 permutation."""
op = sparse_ops.sparse_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor(
[['batch1-FC2-F1', 'batch1-FC2-F2', 'batch1-FC2-F3']]),
self._sparse_tensor(
[['batch1-FC3-F1', 'batch1-FC3-F2', 'batch1-FC3-F3']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F3'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_permutation_3x1x2(self):
"""Tests 3x1x2 permutation."""
op = sparse_ops.sparse_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_large_batch(self):
"""Tests with large batch size to force multithreading."""
batch_size = 5000
col1 = []
col2 = []
col3 = []
for b in range(batch_size):
col1.append(
['batch%d-FC1-F1' % b, 'batch%d-FC1-F2' % b, 'batch%d-FC1-F3' % b])
col2.append(['batch%d-FC2-F1' % b])
col3.append(['batch%d-FC3-F1' % b, 'batch%d-FC3-F2' % b])
op = sparse_ops.sparse_cross([
self._sparse_tensor(col1),
self._sparse_tensor(col2),
self._sparse_tensor(col3)
])
col_out = []
for b in range(batch_size):
col_out.append([
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b)
])
expected_out = self._sparse_tensor(col_out)
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_one_column_empty(self):
"""Tests when one column is empty.
The crossed tensor should be empty.
"""
op = sparse_ops.sparse_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']]),
self._sparse_tensor([], 1),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
with self.cached_session() as sess:
self._assert_sparse_tensor_empty(self.evaluate(op))
@test_util.run_deprecated_v1
def test_some_columns_empty(self):
"""Tests when more than one columns are empty.
Cross for the corresponding batch should be empty.
"""
op = sparse_ops.sparse_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']], 2),
self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1']], 2),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']], 2)
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2'
]], 2)
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_all_columns_empty(self):
"""Tests when all columns are empty.
The crossed tensor should be empty.
"""
op = sparse_ops.sparse_cross([
self._sparse_tensor([]),
self._sparse_tensor([]),
self._sparse_tensor([])
])
with self.cached_session() as sess:
self._assert_sparse_tensor_empty(self.evaluate(op))
@test_util.run_deprecated_v1
def test_hashed_zero_bucket_no_hash_key(self):
op = sparse_ops.sparse_cross_hashed([
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
])
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[1971693436396284976]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_hashed_zero_bucket(self):
op = sparse_ops.sparse_cross_hashed(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hash_key=sparse_ops._DEFAULT_HASH_KEY + 1)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[4847552627144134031]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
# TODO(sibyl-Aix6ihai): Add benchmark to compare Hashed vs Non-hashed.
@test_util.run_deprecated_v1
def test_hashed_no_hash_key(self):
op = sparse_ops.sparse_cross_hashed(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
num_buckets=100)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[83]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_hashed_output(self):
op = sparse_ops.sparse_cross_hashed(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
num_buckets=100,
hash_key=sparse_ops._DEFAULT_HASH_KEY + 1)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[31]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_hashed__has_no_collision(self):
"""Tests that fingerprint concatenation has no collisions."""
# Although the last 10 bits of 359 and 1024+359 are identical.
# As a result, all the crosses shouldn't collide.
t1 = constant_op.constant([[359], [359 + 1024]])
t2 = constant_op.constant([list(range(10)), list(range(10))])
cross = sparse_ops.sparse_cross_hashed(
[t2, t1], num_buckets=1024, hash_key=sparse_ops._DEFAULT_HASH_KEY + 1)
cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
with session.Session():
values = self.evaluate(cross_dense)
self.assertTrue(numpy.not_equal(values[0], values[1]).all())
def test_hashed_3x1x2(self):
"""Tests 3x1x2 permutation with hashed output."""
op = sparse_ops.sparse_cross_hashed(
[
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
],
num_buckets=1000)
with self.cached_session() as sess:
out = self.evaluate(op)
self.assertEqual(6, len(out.values))
self.assertAllEqual([[0, i] for i in range(6)], out.indices)
self.assertTrue(all(x < 1000 and x >= 0 for x in out.values))
all_values_are_different = len(out.values) == len(set(out.values))
self.assertTrue(all_values_are_different)
def _assert_sparse_tensor_empty(self, sp):
self.assertEquals(0, sp.indices.size)
self.assertEquals(0, sp.values.size)
# TODO(zakaria): check if we can ignore the first dim of the shape.
self.assertEquals(0, sp.dense_shape[1])
def _assert_sparse_tensor_equals(self, sp1, sp2):
self.assertAllEqual(sp1.indices.eval(), sp2.indices)
self.assertAllEqual(sp1.values.eval(), sp2.values)
self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape)
def _sparse_tensor(self, data, batch_size=-1):
"""Generates a SparseTensor.
Args:
data: Should be a list of list of strings or int64. Each item of the outer
list represents a batch. Each item of the batch is a feature of a
specific feature column.
batch_size: optional batch size, especially for cases when data has no
entry for some batches.
Returns:
A SparseTensor.
"""
indices = []
values = []
max_col_count = 0
for batch, batch_ix in zip(data, range(len(data))):
for column, column_ix in zip(batch, range(len(batch))):
indices.append([batch_ix, column_ix])
values.append(column)
max_col_count = max(max_col_count, column_ix + 1)
shape = [batch_size if batch_size != -1 else len(data), max_col_count]
value_type = (dtypes.string if not values or isinstance(values[0], str) else
dtypes.int64)
return sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64, [len(indices), 2]),
constant_op.constant(values, value_type, [len(indices)]),
constant_op.constant(shape, dtypes.int64))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_cross_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_strip_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringStripOpTest(test.TestCase):
""" Test cases for tf.strings.strip."""
def test_string_strip(self):
strings = ["pigs on the wing", "animals"]
with self.cached_session() as sess:
output = string_ops.string_strip(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [b"pigs on the wing", b"animals"])
def test_string_strip_2d(self):
strings = [["pigs on the wing", "animals"],
[" hello ", "\n\tworld \r \n"]]
with self.cached_session() as sess:
output = string_ops.string_strip(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [[b"pigs on the wing", b"animals"],
[b"hello", b"world"]])
def test_string_strip_with_empty_strings(self):
strings = [" hello ", "", "world ", " \t \r \n "]
with self.cached_session() as sess:
output = string_ops.string_strip(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [b"hello", b"", b"world", b""])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/string_strip_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for math_ops.bincount."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class BincountTest(test_util.TensorFlowTestCase):
def test_empty(self):
with self.session(use_gpu=True):
self.assertAllEqual(self.evaluate(math_ops.bincount([], minlength=5)),
[0, 0, 0, 0, 0])
self.assertAllEqual(self.evaluate(math_ops.bincount([], minlength=1)),
[0])
self.assertAllEqual(self.evaluate(math_ops.bincount([], minlength=0)),
[])
self.assertEqual(self.evaluate(math_ops.bincount([], minlength=0,
dtype=np.float32)).dtype,
np.float32)
self.assertEqual(self.evaluate(math_ops.bincount([], minlength=3,
dtype=np.float64)).dtype,
np.float64)
def test_values(self):
with self.session(use_gpu=True):
self.assertAllEqual(self.evaluate(math_ops.bincount([1, 1, 1, 2, 2, 3])),
[0, 3, 2, 1])
arr = [1, 1, 2, 1, 2, 3, 1, 2, 3, 4, 1, 2, 3, 4, 5]
self.assertAllEqual(self.evaluate(math_ops.bincount(arr)),
[0, 5, 4, 3, 2, 1])
arr += [0, 0, 0, 0, 0, 0]
self.assertAllEqual(self.evaluate(math_ops.bincount(arr)),
[6, 5, 4, 3, 2, 1])
self.assertAllEqual(self.evaluate(math_ops.bincount([])), [])
self.assertAllEqual(self.evaluate(math_ops.bincount([0, 0, 0])), [3])
self.assertAllEqual(self.evaluate(math_ops.bincount([5])),
[0, 0, 0, 0, 0, 1])
self.assertAllEqual(self.evaluate(math_ops.bincount(np.arange(10000))),
np.ones(10000))
def test_maxlength(self):
with self.session(use_gpu=True):
self.assertAllEqual(self.evaluate(math_ops.bincount([5], maxlength=3)),
[0, 0, 0])
self.assertAllEqual(self.evaluate(math_ops.bincount([1], maxlength=3)),
[0, 1])
self.assertAllEqual(self.evaluate(math_ops.bincount([], maxlength=3)),
[])
def test_random_with_weights(self):
num_samples = 10000
with self.session(use_gpu=True):
np.random.seed(42)
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
arr = np.random.randint(0, 1000, num_samples)
if dtype == dtypes.int32 or dtype == dtypes.int64:
weights = np.random.randint(-100, 100, num_samples)
else:
weights = np.random.random(num_samples)
self.assertAllClose(
self.evaluate(math_ops.bincount(arr, weights)),
np.bincount(arr, weights))
def test_random_without_weights(self):
num_samples = 10000
with self.session(use_gpu=True):
np.random.seed(42)
for dtype in [np.int32, np.float32]:
arr = np.random.randint(0, 1000, num_samples)
weights = np.ones(num_samples).astype(dtype)
self.assertAllClose(
self.evaluate(math_ops.bincount(arr, None)),
np.bincount(arr, weights))
def test_zero_weights(self):
with self.session(use_gpu=True):
self.assertAllEqual(
self.evaluate(math_ops.bincount(np.arange(1000), np.zeros(1000))),
np.zeros(1000))
def test_negative(self):
# unsorted_segment_sum will only report InvalidArgumentError on CPU
with self.cached_session():
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(math_ops.bincount([1, 2, 3, -1, 6, 8]))
@test_util.run_deprecated_v1
def test_shape_function(self):
# size must be scalar.
with self.assertRaisesRegexp(
ValueError, "Shape must be rank 0 but is rank 1 for 'Bincount'"):
gen_math_ops.bincount([1, 2, 3, -1, 6, 8], [1], [])
# size must be positive.
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
gen_math_ops.bincount([1, 2, 3, -1, 6, 8], -5, [])
# if size is a constant then the shape is known.
v1 = gen_math_ops.bincount([1, 2, 3, -1, 6, 8], 5, [])
self.assertAllEqual(v1.get_shape().as_list(), [5])
# if size is a placeholder then the shape is unknown.
s = array_ops.placeholder(dtype=dtypes.int32)
v2 = gen_math_ops.bincount([1, 2, 3, -1, 6, 8], s, [])
self.assertAllEqual(v2.get_shape().as_list(), [None])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/bincount_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class ReverseSequenceTest(test.TestCase):
def _testReverseSequence(self,
x,
batch_axis,
seq_axis,
seq_lengths,
truth,
use_gpu=False,
expected_err_re=None):
with self.cached_session(use_gpu=use_gpu):
ans = array_ops.reverse_sequence(
x, batch_axis=batch_axis, seq_axis=seq_axis, seq_lengths=seq_lengths)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllClose(tf_ans, truth, atol=1e-10)
self.assertShapeEqual(truth, ans)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBothReverseSequence(self,
x,
batch_axis,
seq_axis,
seq_lengths,
truth,
expected_err_re=None):
self._testReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth, True,
expected_err_re)
self._testReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth,
False, expected_err_re)
def _testBasic(self, dtype, len_dtype=np.int64):
x = np.asarray(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
[[17, 18, 19, 20], [21, 22, 23, 24]]],
dtype=dtype)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
# reverse dim 2 up to (0:3, none, 0:4) along dim=0
seq_lengths = np.asarray([3, 0, 4], dtype=len_dtype)
truth_orig = np.asarray(
[
[[3, 2, 1, 4], [7, 6, 5, 8]], # reverse 0:3
[[9, 10, 11, 12], [13, 14, 15, 16]], # reverse none
[[20, 19, 18, 17], [24, 23, 22, 21]]
], # reverse 0:4 (all)
dtype=dtype)
truth_orig = truth_orig.reshape(3, 2, 4, 1, 1)
truth = truth_orig.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
seq_axis = 0 # permute seq_axis and batch_axis (originally 2 and 0, resp.)
batch_axis = 2
self._testBothReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth)
def testSeqLengthInt32(self):
self._testBasic(np.float32, np.int32)
def testFloatBasic(self):
self._testBasic(np.float32)
def testDoubleBasic(self):
self._testBasic(np.float64)
def testInt32Basic(self):
self._testBasic(np.int32)
def testInt64Basic(self):
self._testBasic(np.int64)
def testComplex64Basic(self):
self._testBasic(np.complex64)
def testComplex128Basic(self):
self._testBasic(np.complex128)
@test_util.run_deprecated_v1
def testFloatReverseSequenceGrad(self):
x = np.asarray(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
[[17, 18, 19, 20], [21, 22, 23, 24]]],
dtype=np.float)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # transpose axes 0 <=> 2
# reverse dim 0 up to (0:3, none, 0:4) along dim=2
seq_axis = 0
batch_axis = 2
seq_lengths = np.asarray([3, 0, 4], dtype=np.int64)
with self.cached_session():
input_t = constant_op.constant(x, shape=x.shape)
seq_lengths_t = constant_op.constant(seq_lengths, shape=seq_lengths.shape)
reverse_sequence_out = array_ops.reverse_sequence(
input_t,
batch_axis=batch_axis,
seq_axis=seq_axis,
seq_lengths=seq_lengths_t)
err = gradient_checker.compute_gradient_error(
input_t, x.shape, reverse_sequence_out, x.shape, x_init_value=x)
print("ReverseSequence gradient error = %g" % err)
self.assertLess(err, 1e-8)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
t = array_ops.reverse_sequence(
array_ops.placeholder(
dtypes.float32, shape=None),
seq_lengths=array_ops.placeholder(
dtypes.int64, shape=(32,)),
batch_axis=0,
seq_axis=1)
self.assertIs(t.get_shape().ndims, None)
# Batch size mismatched between input and seq_lengths.
with self.assertRaises(ValueError):
array_ops.reverse_sequence(
array_ops.placeholder(
dtypes.float32, shape=(32, 2, 3)),
seq_lengths=array_ops.placeholder(
dtypes.int64, shape=(33,)),
seq_axis=3)
# seq_axis out of bounds.
with self.assertRaisesRegexp(ValueError, "seq_dim must be < input rank"):
array_ops.reverse_sequence(
array_ops.placeholder(
dtypes.float32, shape=(32, 2, 3)),
seq_lengths=array_ops.placeholder(
dtypes.int64, shape=(32,)),
seq_axis=3)
# batch_axis out of bounds.
with self.assertRaisesRegexp(ValueError, "batch_dim must be < input rank"):
array_ops.reverse_sequence(
array_ops.placeholder(
dtypes.float32, shape=(32, 2, 3)),
seq_lengths=array_ops.placeholder(
dtypes.int64, shape=(32,)),
seq_axis=0,
batch_axis=3)
with self.cached_session():
inputs = array_ops.placeholder(dtypes.float32, shape=(32, 2, 3))
seq_lengths = array_ops.placeholder(dtypes.int64, shape=(32,))
output = array_ops.reverse_sequence(
inputs, seq_lengths=seq_lengths,
seq_axis=0) # batch_axis default is 0
with self.assertRaisesOpError("batch_dim == seq_dim"):
output.eval(feed_dict={
inputs: np.random.rand(32, 2, 3),
seq_lengths: xrange(32)
})
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/reverse_sequence_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for atrous convolution functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def upsample_filters(filters, rate):
"""Upsamples the filters by a factor of rate along the spatial dimensions.
Args:
filters: spatial_shape + [in_channels, out_channels]
Original filters.
rate: A list of len(spatial_shape) positive ints, specifying the
upsampling rate.
Returns:
filters_up: output_spatial_shape + [in_channels, out_channels].
Upsampled filters with
output_spatial_shape[i] = (spatial_shape[i] - 1) * rate[i] + 1
containing (rate[i] - 1) zeros between consecutive filter values along
spatial dimension i.
"""
num_spatial_dims = len(rate)
spatial_shape = np.array(filters.shape[:num_spatial_dims])
output_spatial_shape = (spatial_shape - 1) * rate + 1
output = np.zeros(
tuple(output_spatial_shape) + tuple(filters.shape[-2:]), filters.dtype)
output[tuple(np.s_[::rate[i]] for i in range(num_spatial_dims))] = filters
return output
class AtrousConvolutionTest(test.TestCase):
@contextlib.contextmanager
def _delay_checks(self):
"""Context manager for combining checks depending on tensor evaluations.
Each call to Session.run has some overhead, and this overhead can easily
account for the majority of the time spent in tests that call Session.run
(or Tensor.eval) many times.
This context manager provides a mechanism for registering callback functions
and associated tensors. When the context is exited, all of the tensors
associated with all of the registrations are evaluated with a single call to
Session.run, and then each registered callback function is called with the
values of its associated tensors.
Yields:
A function `add_check(check, *args, **kwargs)` where `check` is the
callback function to be invoked, and `*args` and `**kwargs` specify the
associated Tensors. When in EAGER mode, check is executed in add_check,
otherwise, it's delayed after the context.
"""
checks = []
def add_check(check, *args, **kwargs):
if context.executing_eagerly():
args_val, kwargs_val = self.evaluate([args, kwargs])
check(*args_val, **kwargs_val)
else:
checks.append((check, args, kwargs))
yield add_check
if not context.executing_eagerly():
all_values = self.evaluate([[args, kwargs] for _, args, kwargs in checks])
for (check, _, _), (args, kwargs) in zip(checks, all_values):
check(*args, **kwargs)
def _test_atrous_convolution(self, add_check, input_shape, filter_shape,
dilation_rate, **kwargs):
filters = np.arange(
np.prod(filter_shape), dtype=np.float32).reshape(filter_shape)
filters_upsampled = upsample_filters(filters, dilation_rate)
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)
y1 = nn_ops.convolution(
input=x, filter=filters, dilation_rate=dilation_rate, **kwargs)
y2 = nn_ops.convolution(input=x, filter=filters_upsampled, **kwargs)
def check(y1_eval, y2_eval):
self.assertAllClose(y1_eval, y2_eval, rtol=1e-2, atol=1e-2)
add_check(check, y1, y2)
@test_util.run_v1_only("b/120545219")
def test_unknown_spatial_dims_for_channel_last_format(self):
x = array_ops.placeholder(dtypes.float32, [1, None, None, 10])
w = array_ops.zeros([3, 3, 10, 20])
y = nn_ops.convolution(
x, w, "VALID", dilation_rate=[2, 2], data_format="NHWC")
self.assertEqual(y.shape.as_list(), [1, None, None, 20])
@test_util.run_v1_only("b/120545219")
def test_unknown_spatial_dims_for_channel_first_format(self):
x = array_ops.placeholder(dtypes.float32, [1, 10, None, None])
w = array_ops.zeros([3, 3, 10, 20])
y = nn_ops.convolution(
x, w, "VALID", dilation_rate=[2, 2], data_format="NCHW")
self.assertEqual(y.shape.as_list(), [1, 20, None, None])
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolution2D(self):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
for height, width in [[9, 9], [9, 10]]:
for kernel_height, kernel_width in [[1, 1], [2, 2], [2, 3]]:
for dilation_rate in [[1, 1], [3, 2], [2, 1]]:
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, height, width, 2],
filter_shape=[kernel_height, kernel_width, 2, 2],
padding=padding,
dilation_rate=dilation_rate,
)
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolution3D(self):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
for depth, height, width in [[9, 9, 10], [9, 10, 9]]:
for kernel_depth, kernel_height, kernel_width in [[3, 3,
3], [3, 2, 2],
[2, 1, 3]]:
for dilation_rate in [[1, 1, 1], [3, 3, 3], [3, 2, 3], [3, 1, 2]]:
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, depth, height, width, 2],
filter_shape=[
kernel_depth, kernel_height, kernel_width, 2, 2
],
padding=padding,
dilation_rate=dilation_rate,
)
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolution1D(self):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
for width in [9, 10]:
for kernel_width in range(1, 4):
for rate in range(1, 4):
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, width, 2],
filter_shape=[kernel_width, 2, 2],
padding=padding,
dilation_rate=[rate],
)
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolutionNC(self):
if test.is_gpu_available(cuda_only=True):
# "NCW" and "NCHW" formats are currently supported only on CUDA.
with test_util.device(use_gpu=True):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, 2, 9],
padding=padding,
filter_shape=[3, 2, 2],
dilation_rate=[2],
data_format="NCW",
)
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, 2, 9, 5],
padding=padding,
filter_shape=[3, 3, 2, 2],
dilation_rate=[2, 1],
data_format="NCHW",
)
@test_util.run_in_graph_and_eager_modes
def testAtrousSequence(self):
"""Tests optimization of sequence of atrous convolutions.
See the documentation of with_space_to_batch.
"""
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
for height in range(15, 17):
for width in range(15, 17):
x_shape = [3, height, width, 2]
x = np.random.random_sample(x_shape).astype(np.float32)
kernel_sizes = [1, 3] if padding == "SAME" else range(1, 3)
for kernel in kernel_sizes:
f_shape = [kernel, kernel, 2, 2]
f1 = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
f2 = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
def combined_op(converted_input, num_spatial_dims, padding_arg): # pylint: disable=unused-argument
# pylint: disable=cell-var-from-loop
result = nn_ops.convolution(
input=converted_input, filter=f1, padding=padding)
result = nn_ops.convolution(
input=result, filter=f2, padding=padding)
# pylint: enable=cell-var-from-loop
return result
for rate_height in range(2, 4):
for rate_width in range(2, 4):
dilation_rate = [rate_height, rate_width]
y1 = nn_ops.convolution(
input=x,
filter=f1,
padding=padding,
dilation_rate=dilation_rate)
y1 = nn_ops.convolution(
input=y1,
filter=f2,
padding=padding,
dilation_rate=dilation_rate)
y2 = nn_ops.with_space_to_batch(
input=x,
dilation_rate=dilation_rate,
op=combined_op,
padding="VALID")
def check(y1_eval, y2_eval):
self.assertAllClose(y1_eval, y2_eval, rtol=1e-2, atol=1e-2)
add_check(check, y1, y2)
def _test_gradient(self, x_shape, f_shape, dilation_rate, padding):
x_val = np.random.random_sample(x_shape).astype(np.float32)
f_val = np.random.random_sample(f_shape).astype(np.float32)
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.convolution(
input=x, filter=f, dilation_rate=dilation_rate, padding=padding)
y_shape = output.get_shape().as_list()
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
@test_util.run_v1_only("b/120545219")
def testGradient(self):
with self.cached_session():
for padding in ["SAME", "VALID"]:
for rate_width in range(1, 3):
for rate_height in range(1, 3):
self._test_gradient(
x_shape=[2, 5, 6, 2],
f_shape=[3, 3, 2, 2],
dilation_rate=[rate_height, rate_width],
padding=padding)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/atrous_convolution_test.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for unicode_decode and unicode_decode_with_splits."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import test
def _nested_encode(x, encoding):
"""Encode each string in a nested list with `encoding`."""
if isinstance(x, list):
return [_nested_encode(v, encoding) for v in x]
else:
return x.encode(encoding)
def _nested_codepoints(x):
"""Replace each string in a nested list with a list of its codepoints."""
# Works for Python 2 and 3, and for both UCS2 and UCS4 builds
if isinstance(x, list):
return [_nested_codepoints(v) for v in x]
else:
b = list(x.encode("utf-32-be"))
if any(isinstance(c, str) for c in b):
b = [ord(c) for c in b]
return [(b0 << 24) + (b1 << 16) + (b2 << 8) + b3
for b0, b1, b2, b3 in zip(b[::4], b[1::4], b[2::4], b[3::4])]
def _nested_offsets(x, encoding):
"""Replace each string in a nested list with a list of start offsets."""
if isinstance(x, list):
return [_nested_offsets(v, encoding) for v in x]
else:
if not x:
return []
encoded_x = x.encode("utf-32-be")
encoded_chars = [encoded_x[i:i + 4] for i in range(0, len(encoded_x), 4)]
char_lens = [
len(c.decode("utf-32-be").encode(encoding)) for c in encoded_chars
]
return [0] + np.cumsum(char_lens).tolist()[:-1]
def _nested_splitchars(x, encoding):
"""Replace each string in a nested list with a list of char substrings."""
if isinstance(x, list):
return [_nested_splitchars(v, encoding) for v in x]
else:
b = x.encode("utf-32-be")
chars = zip(b[::4], b[1::4], b[2::4], b[3::4])
if str is bytes:
return [b"".join(c).decode("utf-32-be").encode(encoding) for c in chars]
else:
return [bytes(c).decode("utf-32-be").encode(encoding) for c in chars]
def _make_sparse_tensor(indices, values, dense_shape, dtype=np.int32):
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, dtype),
np.array(dense_shape, np.int64))
@test_util.run_all_in_graph_and_eager_modes
class UnicodeDecodeTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
def testScalarDecode(self):
text = constant_op.constant(u"仅今年前".encode("utf-8"))
chars = ragged_string_ops.unicode_decode(text, "utf-8")
self.assertAllEqual(chars, [ord(c) for c in u"仅今年前"])
def testScalarDecodeWithOffset(self):
text = constant_op.constant(u"仅今年前".encode("utf-8"))
chars, starts = ragged_string_ops.unicode_decode_with_offsets(text, "utf-8")
self.assertAllEqual(chars, [ord(c) for c in u"仅今年前"])
self.assertAllEqual(starts, [0, 3, 6, 9])
def testVectorDecode(self):
text = constant_op.constant([u"仅今年前".encode("utf-8"), b"hello"])
chars = ragged_string_ops.unicode_decode(text, "utf-8")
expected_chars = [[ord(c) for c in u"仅今年前"],
[ord(c) for c in u"hello"]]
self.assertRaggedEqual(chars, expected_chars)
def testVectorDecodeWithOffset(self):
text = constant_op.constant([u"仅今年前".encode("utf-8"), b"hello"])
chars, starts = ragged_string_ops.unicode_decode_with_offsets(text, "utf-8")
expected_chars = [[ord(c) for c in u"仅今年前"],
[ord(c) for c in u"hello"]]
self.assertRaggedEqual(chars, expected_chars)
self.assertRaggedEqual(starts, [[0, 3, 6, 9], [0, 1, 2, 3, 4]])
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": []}
]) # pyformat: disable
def testBasicDecode(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_decode(input_tensor, "UTF-8")
expected = _nested_codepoints(texts)
self.assertRaggedEqual(expected, result)
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": []}
]) # pyformat: disable
def testBasicDecodeWithOffsets(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_decode_with_offsets(
input_tensor, "UTF-8")
expected_codepoints = _nested_codepoints(texts)
expected_offsets = _nested_offsets(texts, "UTF-8")
self.assertRaggedEqual(expected_codepoints, result[0])
self.assertRaggedEqual(expected_offsets, result[1])
def testDocstringExamples(self):
texts = [s.encode("utf8") for s in [u"G\xf6\xf6dnight", u"\U0001f60a"]]
codepoints1 = ragged_string_ops.unicode_decode(texts, "UTF-8")
codepoints2, offsets = ragged_string_ops.unicode_decode_with_offsets(
texts, "UTF-8")
self.assertRaggedEqual(
codepoints1, [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]])
self.assertRaggedEqual(
codepoints2, [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]])
self.assertRaggedEqual(offsets, [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]])
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=_make_sparse_tensor(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1],
[1, 2], [1, 3], [1, 4], [3, 0]],
values=[72, 101, 108, 108, 111, 119, 111, 114, 108, 100, 128077],
dense_shape=[4, 5])),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=_make_sparse_tensor(
indices=[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[0, 1, 3], [0, 1, 4], [1, 1, 0]],
values=[72, 105, 116, 104, 101, 114, 101, 128522],
dense_shape=[2, 2, 5])),
dict(
texts=[],
expected=_make_sparse_tensor(np.zeros([0, 2], np.int64), [], [0, 0])),
])
def testDecodeWithSparseOutput(self, texts, expected):
input_tensor = np.array(_nested_encode(texts, "UTF-8"), dtype=bytes)
result = ragged_string_ops.unicode_decode(input_tensor, "UTF-8").to_sparse()
self.assertIsInstance(result, sparse_tensor.SparseTensor)
self.assertAllEqual(expected.indices, result.indices)
self.assertAllEqual(expected.values, result.values)
self.assertAllEqual(expected.dense_shape, result.dense_shape)
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=[[72, 101, 108, 108, 111], [119, 111, 114, 108, 100],
[-1, -1, -1, -1, -1], [128077, -1, -1, -1, -1]]),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=[[[72, 105, -1, -1, -1], [116, 104, 101, 114, 101]],
[[-1, -1, -1, -1, -1], [128522, -1, -1, -1, -1]]],
ragged_rank=0),
dict(
texts=[["Hi", "there", ""], [u"😊"]],
expected=[[[72, 105, -1, -1, -1],
[116, 104, 101, 114, 101],
[-1, -1, -1, -1, -1]],
[[128522, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1]]]),
dict(
texts=[[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]],
expected=[
[[[128522, -1, -1], [129312, 129488, -1]],
[[-1, -1, -1], [-1, -1, -1]]],
[[[129299, 128123, 129302], [-1, -1, -1]],
[[-1, -1, -1], [-1, -1, -1]]]]),
dict(texts=[], expected=np.zeros([0, 0], np.int64)),
]) # pyformat: disable
def testDecodeWithPaddedOutput(self, texts, expected, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_decode(
input_tensor, "UTF-8").to_tensor(default_value=-1)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[65533], [104, 101, 108, 108, 111],
[61, 61, 65533, 61, 61], [119, 111, 114, 108, 100]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[0], [104, 101, 108, 108, 111],
[61, 61, 0, 61, 61], [119, 111, 114, 108, 100]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[], [104, 101, 108, 108, 111],
[61, 61, 61, 61], [119, 111, 114, 108, 100]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
expected=[[65533], [104, 101, 108, 108, 111],
[61, 61, 65533, 61, 61], [119, 111, 114, 108, 100]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
replacement_char=0,
expected=[[0], [104, 101, 108, 108, 111],
[61, 61, 0, 61, 61], [119, 111, 114, 108, 100]]),
]) # pyformat: disable
def testErrorModes(self, expected=None, **args):
result = ragged_string_ops.unicode_decode(**args)
self.assertRaggedEqual(expected, result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[65533], [104, 101, 108, 108, 111],
[61, 61, 65533, 61, 61], [119, 111, 114, 108, 100]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[0], [104, 101, 108, 108, 111],
[61, 61, 0, 61, 61], [119, 111, 114, 108, 100]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[], [104, 101, 108, 108, 111],
[61, 61, 61, 61], [119, 111, 114, 108, 100]],
expected_offsets=[[], [0, 1, 2, 3, 4],
[0, 1, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
expected=[[65533], [104, 101, 108, 108, 111],
[61, 61, 65533, 61, 61], [119, 111, 114, 108, 100]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
replacement_char=0,
expected=[[0], [104, 101, 108, 108, 111],
[61, 61, 0, 61, 61], [119, 111, 114, 108, 100]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
]) # pyformat: disable
def testErrorModesWithOffsets(self,
expected=None,
expected_offsets=None,
**args):
result = ragged_string_ops.unicode_decode_with_offsets(**args)
self.assertRaggedEqual(result[0], expected)
self.assertRaggedEqual(result[1], expected_offsets)
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
("US-ASCII", [u"Hello", "world"]),
("ISO-8859-1", [u"ÀÈÓ", "AEO"]),
("SHIFT-JIS", [u"Hello", u"こんにちは"]),
)
def testDecodeWithDifferentEncodings(self, encoding, texts):
expected = _nested_codepoints(texts)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_decode(input_tensor, encoding)
self.assertRaggedEqual(expected, result)
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
("US-ASCII", [u"Hello", "world"]),
("ISO-8859-1", [u"ÀÈÓ", "AEO"]),
("SHIFT-JIS", [u"Hello", u"こんにちは"]),
)
def testDecodeWithOffsetsWithDifferentEncodings(self, encoding, texts):
expected_codepoints = _nested_codepoints(texts)
expected_offsets = _nested_offsets(texts, encoding)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_decode_with_offsets(
input_tensor, encoding)
self.assertRaggedEqual(expected_codepoints, result[0])
self.assertRaggedEqual(expected_offsets, result[1])
@parameterized.parameters([
dict(input=[b"\xFEED"],
errors="strict",
input_encoding="UTF-8",
exception=errors.InvalidArgumentError,
message="Invalid formatting on input string"),
dict(input="x",
input_encoding="UTF-8",
replacement_char=11141111,
exception=errors.InvalidArgumentError,
message="replacement_char out of unicode codepoint range"),
dict(input="x",
input_encoding="UTF-8",
errors="oranguatan",
exception=(ValueError, errors.InvalidArgumentError)),
]) # pyformat: disable
def testExceptions(self, exception=None, message=None, **args):
with self.assertRaisesRegexp(exception, message):
self.evaluate(ragged_string_ops.unicode_decode(**args))
def testUnknownRankError(self):
if context.executing_eagerly():
return
s = array_ops.placeholder(dtypes.string)
message = "Rank of `input` must be statically known."
with self.assertRaisesRegexp(ValueError, message):
self.evaluate(ragged_string_ops.unicode_decode(s, input_encoding="UTF-8"))
@parameterized.parameters([
dict(
doc="Single string",
input=_nested_encode([u"仅今年前"], "utf-8"),
input_encoding="UTF-8",
expected_char_values=_nested_codepoints(u"仅今年前"),
expected_row_splits=[0, 4],
expected_char_to_byte_starts=[0, 3, 6, 9]),
dict(
doc="Multiple strings",
input=_nested_encode([u"仅今年前", u"你好"], "utf-8"),
input_encoding="UTF-8",
expected_char_values=_nested_codepoints(u"仅今年前你好"),
expected_row_splits=[0, 4, 6],
expected_char_to_byte_starts=[0, 3, 6, 9, 0, 3]),
dict(
doc="errors=replace",
input=b"=\xFE=",
input_encoding="UTF-8",
errors="replace",
expected_char_values=[61, 65533, 61],
expected_row_splits=[0, 3],
expected_char_to_byte_starts=[0, 1, 2]),
dict(
doc="errors=ignore",
input=b"=\xFE=",
input_encoding="UTF-8",
errors="ignore",
expected_char_values=[61, 61],
expected_row_splits=[0, 2],
expected_char_to_byte_starts=[0, 2]),
])
def testDecodeGenOp(self,
doc,
expected_row_splits=None,
expected_char_values=None,
expected_char_to_byte_starts=None,
**args):
"""Test for the c++ interface (gen_string_ops.unicode_decode)."""
result = gen_string_ops.unicode_decode_with_offsets(**args)
self.assertAllEqual(expected_row_splits, result.row_splits)
self.assertAllEqual(expected_char_values, result.char_values)
self.assertAllEqual(expected_char_to_byte_starts,
result.char_to_byte_starts)
@test_util.run_all_in_graph_and_eager_modes
class UnicodeSplitTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
def testScalarSplit(self):
text = constant_op.constant(u"仅今年前".encode("UTF-8"))
chars = ragged_string_ops.unicode_split(text, "UTF-8")
self.assertAllEqual(chars, [c.encode("UTF-8") for c in u"仅今年前"])
def testScalarSplitWithOffset(self):
text = constant_op.constant(u"仅今年前".encode("UTF-8"))
chars, starts = ragged_string_ops.unicode_split_with_offsets(text, "UTF-8")
self.assertAllEqual(chars, [c.encode("UTF-8") for c in u"仅今年前"])
self.assertAllEqual(starts, [0, 3, 6, 9])
def testVectorSplit(self):
text = constant_op.constant([u"仅今年前".encode("UTF-8"), b"hello"])
chars = ragged_string_ops.unicode_split(text, "UTF-8")
expected_chars = [[c.encode("UTF-8") for c in u"仅今年前"],
[c.encode("UTF-8") for c in u"hello"]]
self.assertRaggedEqual(chars, expected_chars)
def testVectorSplitWithOffset(self):
text = constant_op.constant([u"仅今年前".encode("UTF-8"), b"hello"])
chars, starts = ragged_string_ops.unicode_split_with_offsets(text, "UTF-8")
expected_chars = [[c.encode("UTF-8") for c in u"仅今年前"],
[c.encode("UTF-8") for c in u"hello"]]
self.assertRaggedEqual(chars, expected_chars)
self.assertRaggedEqual(starts, [[0, 3, 6, 9], [0, 1, 2, 3, 4]])
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": []}
]) # pyformat: disable
def testBasicSplit(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_split(input_tensor, "UTF-8")
expected = _nested_splitchars(texts, "UTF-8")
self.assertRaggedEqual(expected, result)
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": []}
]) # pyformat: disable
def testBasicSplitWithOffsets(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_split_with_offsets(input_tensor, "UTF-8")
expected_codepoints = _nested_splitchars(texts, "UTF-8")
expected_offsets = _nested_offsets(texts, "UTF-8")
self.assertRaggedEqual(expected_codepoints, result[0])
self.assertRaggedEqual(expected_offsets, result[1])
def testDocstringExamples(self):
texts = [s.encode("utf8") for s in [u"G\xf6\xf6dnight", u"\U0001f60a"]]
codepoints1 = ragged_string_ops.unicode_split(texts, "UTF-8")
codepoints2, offsets = ragged_string_ops.unicode_split_with_offsets(
texts, "UTF-8")
self.assertRaggedEqual(
codepoints1,
[[b"G", b"\xc3\xb6", b"\xc3\xb6", b"d", b"n", b"i", b"g", b"h", b"t"],
[b"\xf0\x9f\x98\x8a"]])
self.assertRaggedEqual(
codepoints2,
[[b"G", b"\xc3\xb6", b"\xc3\xb6", b"d", b"n", b"i", b"g", b"h", b"t"],
[b"\xf0\x9f\x98\x8a"]])
self.assertRaggedEqual(offsets, [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]])
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=_make_sparse_tensor(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1],
[1, 2], [1, 3], [1, 4], [3, 0]],
values=[b"H", b"e", b"l", b"l", b"o",
b"w", b"o", b"r", b"l", b"d", b"\xf0\x9f\x91\x8d"],
dense_shape=[4, 5],
dtype=bytes)),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=_make_sparse_tensor(
indices=[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[0, 1, 3], [0, 1, 4], [1, 1, 0]],
values=[b"H", b"i", b"t", b"h", b"e", b"r", b"e",
b"\xf0\x9f\x98\x8a"],
dense_shape=[2, 2, 5],
dtype=bytes)),
dict(
texts=[],
expected=_make_sparse_tensor(
np.zeros([0, 2], np.int64), [], [0, 0], dtype=bytes)),
]) # pyformat: disable
def testSplitWithSparseOutput(self, texts, expected):
input_tensor = np.array(_nested_encode(texts, "UTF-8"), dtype=bytes)
result = ragged_string_ops.unicode_split(input_tensor, "UTF-8").to_sparse()
self.assertIsInstance(result, sparse_tensor.SparseTensor)
self.assertAllEqual(expected.indices, result.indices)
self.assertAllEqual(expected.values, result.values)
self.assertAllEqual(expected.dense_shape, result.dense_shape)
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=[[b"H", b"e", b"l", b"l", b"o"],
[b"w", b"o", b"r", b"l", b"d"],
["", "", "", "", ""],
[b"\xf0\x9f\x91\x8d", "", "", "", ""]]),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=[[[b"H", b"i", "", "", ""],
[b"t", b"h", b"e", b"r", b"e"]],
[["", "", "", "", ""],
[b"\xf0\x9f\x98\x8a", "", "", "", ""]]],
ragged_rank=0),
dict(
texts=[["Hi", "there", ""], [u"😊"]],
expected=[[[b"H", b"i", "", "", ""],
[b"t", b"h", b"e", b"r", b"e"],
["", "", "", "", ""]],
[[b"\xf0\x9f\x98\x8a", "", "", "", ""],
["", "", "", "", ""],
["", "", "", "", ""]]]),
dict(
texts=[[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]],
expected=[[[[b"\xf0\x9f\x98\x8a", "", ""],
[b"\xf0\x9f\xa4\xa0", b"\xf0\x9f\xa7\x90", ""]],
[["", "", ""],
["", "", ""]]],
[[[b"\xf0\x9f\xa4\x93", b"\xf0\x9f\x91\xbb",
b"\xf0\x9f\xa4\x96"],
["", "", ""]],
[["", "", ""],
["", "", ""]]]]),
dict(texts=[], expected=np.zeros([0, 0], np.int64)),
]) # pyformat: disable
def testSplitWithPaddedOutput(self, texts, expected, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_split(
input_tensor, "UTF-8").to_tensor(default_value="")
self.assertAllEqual(np.array(expected, dtype=bytes), result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[b"\xef\xbf\xbd"],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"\xef\xbf\xbd", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[b"\x00"],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"\x00", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]]),
]) # pyformat: disable
def testErrorModes(self, expected=None, **args):
result = ragged_string_ops.unicode_split(**args)
self.assertRaggedEqual(expected, result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[b"\xef\xbf\xbd"],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"\xef\xbf\xbd", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[b"\x00"],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"\x00", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]],
expected_offsets=[[], [0, 1, 2, 3, 4],
[0, 1, 3, 4], [0, 1, 2, 3, 4]]),
]) # pyformat: disable
def testErrorModesWithOffsets(self,
expected=None,
expected_offsets=None,
**args):
result = ragged_string_ops.unicode_split_with_offsets(**args)
self.assertRaggedEqual(expected, result[0])
self.assertRaggedEqual(expected_offsets, result[1])
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
)
def testSplitWithDifferentEncodings(self, encoding, texts):
expected = _nested_splitchars(texts, encoding)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_split(input_tensor, encoding)
self.assertRaggedEqual(expected, result)
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
)
def testSplitWithOffsetsWithDifferentEncodings(self, encoding, texts):
expected_codepoints = _nested_splitchars(texts, encoding)
expected_offsets = _nested_offsets(texts, encoding)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_split_with_offsets(
input_tensor, encoding)
self.assertRaggedEqual(expected_codepoints, result[0])
self.assertRaggedEqual(expected_offsets, result[1])
@parameterized.parameters([
dict(input=[b"\xFEED"],
errors="strict",
input_encoding="UTF-8",
exception=errors.InvalidArgumentError,
message="Invalid formatting on input string"),
dict(input="x",
input_encoding="UTF-8",
replacement_char=11141111,
exception=errors.InvalidArgumentError,
message="replacement_char out of unicode codepoint range"),
dict(input="x",
input_encoding="UTF-8",
errors="oranguatan",
exception=(ValueError, errors.InvalidArgumentError)),
]) # pyformat: disable
def testExceptions(self, exception=None, message=None, **args):
with self.assertRaisesRegexp(exception, message):
self.evaluate(ragged_string_ops.unicode_split(**args))
def testUnknownRankError(self):
if context.executing_eagerly():
return
s = array_ops.placeholder(dtypes.string)
message = "Rank of `input` must be statically known."
with self.assertRaisesRegexp(ValueError, message):
self.evaluate(ragged_string_ops.unicode_decode(s, input_encoding="UTF-8"))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/unicode_decode_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class ConstantTest(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with self.cached_session(use_gpu=False):
tf_ans = ops.convert_to_tensor(x).eval()
dtype = dtypes_lib.as_dtype(np_ans.dtype)
if dtype.is_floating or dtype.is_complex:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
np_ans = np.array(x)
with self.cached_session(use_gpu=True):
tf_ans = ops.convert_to_tensor(x).eval()
dtype = dtypes_lib.as_dtype(np_ans.dtype)
if dtype.is_floating or dtype.is_complex:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testInvalidDType(self):
# Test case for GitHub issue 18474
with self.assertRaises(TypeError):
constant_op.constant(dtypes_lib.string, "[,]")
@test_util.run_deprecated_v1
def testBFloat16(self):
bfloat16 = dtypes_lib.bfloat16.as_numpy_dtype
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(bfloat16))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(bfloat16))
self._testAll(np.empty((2, 0, 5)).astype(bfloat16))
@test_util.run_deprecated_v1
def testHalf(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float16))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float16))
self._testAll(np.empty((2, 0, 5)).astype(np.float16))
@test_util.run_deprecated_v1
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
@test_util.run_deprecated_v1
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
@test_util.run_deprecated_v1
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
@test_util.run_deprecated_v1
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
@test_util.run_deprecated_v1
def testComplex64(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
@test_util.run_deprecated_v1
def testComplex128(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex128))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
@test_util.run_deprecated_v1
def testString(self):
self._testCpu(
np.array([compat.as_bytes(str(x)) for x in np.arange(-15, 15)]).reshape(
[2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
@test_util.run_deprecated_v1
def testVariant(self):
# TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant
# copying between CPU and GPU is supported.
with self.session(use_gpu=False):
variant_tensor = tensor_pb2.TensorProto(
dtype=dtypes_lib.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(1, dtype=np.int32).tobytes())
])
const = constant_op.constant(variant_tensor)
const_value = const.op.get_attr("value")
# Ensure we stored the tensor proto properly.
self.assertProtoEquals(variant_tensor, const_value)
# Smoke test -- ensure this executes without trouble.
# Right now, non-numpy-compatible objects cannot be returned from a
# session.run call; similarly, objects that can't be converted to
# native numpy types cannot be passed to ops.convert_to_tensor.
# TODO(ebrevdo): Add registration mechanism for
# ops.convert_to_tensor and for session.run output.
logging_const_op = logging_ops.Print(
const, [const],
message="Variant storing an int, decoded const value:").op
logging_const_op.run()
@test_util.run_deprecated_v1
def testStringWithNulls(self):
with self.cached_session():
val = ops.convert_to_tensor(b"\0\0\0\0").eval()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
with self.cached_session():
val = ops.convert_to_tensor(b"xx\0xx").eval()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
with self.cached_session():
val = ops.convert_to_tensor(nested).eval()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
with ops.Graph().as_default():
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testEagerMemory(self):
"""Tests PyObject refs are managed correctly when executing eagerly."""
constant_op.constant([[1.]])
def testImplicitShapeNumPy(self):
with ops.Graph().as_default():
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testImplicitShapeList(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
with ops.Graph().as_default():
c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
with ops.Graph().as_default():
c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeInconsistent(self):
with ops.Graph().as_default():
c = constant_op.constant_v1([1, 2, 3, 4, 5, 6, 7], shape=[10])
self.assertEqual(c.get_shape(), [10])
with ops.Graph().as_default():
with self.assertRaisesRegexp(
TypeError, "Expected Tensor's shape"):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
def testPromotionShapes(self):
with ops.Graph().as_default():
c = constant_op.constant([7], shape=[10])
self.assertEqual(c.get_shape(), [10])
with ops.Graph().as_default():
c = constant_op.constant(3, shape=[10])
self.assertEqual(c.get_shape(), [10])
# pylint: disable=g-long-lambda
def testShapeWrong(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "Too many elements provided."):
constant_op.constant_v1([1, 2, 3, 4, 5, 6, 7], shape=[5])
with self.assertRaisesRegexp(TypeError, "Expected Tensor's shape"):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
# pylint: enable=g-long-lambda
# TODO(b/35396543): Temporarily disable: suspicion that
# this is causing test timeouts.
def _testTooLargeConstant(self):
with ops.Graph().as_default():
large_array = np.zeros((512, 1024, 1024), dtype=np.float32)
with self.assertRaisesRegexp(
ValueError,
"Cannot create a tensor proto whose content is larger than 2GB."):
c = constant_op.constant(large_array)
# TODO(b/35396543): Temporarily disable: suspicion that
# this is causing test timeouts.
def _testTooLargeGraph(self):
with ops.Graph().as_default() as g:
large_array = np.zeros((256, 1024, 1024), dtype=np.float32)
c = constant_op.constant(large_array)
d = constant_op.constant(large_array)
with self.assertRaisesRegexp(ValueError,
"GraphDef cannot be larger than 2GB."):
g.as_graph_def()
@test_util.run_deprecated_v1
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegexp(ValueError,
"setting an array element with a sequence"):
c = constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = constant_op.constant([[1, 2], [3], [4, 5]])
class AsTensorTest(test.TestCase):
def testAsTensorForTensorInput(self):
with ops.Graph().as_default():
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
with ops.Graph().as_default():
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, ops.Tensor))
def testAsTensorForShapeInput(self):
with self.cached_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([2**31 - 1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3]),
dtype=dtypes_lib.int32)
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([2**31 - 1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]))
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([2**31, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]),
dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([2**31, 2, 3], self.evaluate(x))
with self.assertRaisesRegexp(
ValueError, "a dimension is too large .2147483648."):
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]),
dtype=dtypes_lib.int32)
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([1, 2, 3], self.evaluate(x))
x = array_ops.reshape(
array_ops.zeros([6]), tensor_shape.TensorShape([2, 3]))
self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], self.evaluate(x))
with self.assertRaisesRegexp(ValueError, "partially known"):
ops.convert_to_tensor(tensor_shape.TensorShape(None))
with self.assertRaisesRegexp(ValueError, "partially known"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64]))
with self.assertRaises(TypeError):
ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.float32)
@test_util.run_deprecated_v1
def testAsTensorForDimensionInput(self):
with self.cached_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3])[1])
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual(2, self.evaluate(x))
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual(2, self.evaluate(x))
shape = tensor_shape.TensorShape(None)
if shape._v2_behavior:
with self.assertRaisesRegexp(ValueError, "None values not supported"):
ops.convert_to_tensor(shape[1])
with self.assertRaisesRegexp(ValueError, "None values not supported"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
else:
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
ops.convert_to_tensor(shape[1])
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
class IdentityOpTest(test.TestCase):
def testIdTensor(self):
with ops.Graph().as_default():
x = constant_op.constant(2.0, shape=[6], name="input")
id_op = array_ops.identity(x, name="id")
self.assertTrue(isinstance(id_op.op.inputs[0], ops.Tensor))
self.assertProtoEquals("name: 'id' op: 'Identity' input: 'input' "
"attr { key: 'T' value { type: DT_FLOAT } }",
id_op.op.node_def)
class ZerosTest(test.TestCase):
def _Zeros(self, shape):
with self.cached_session():
ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return self.evaluate(ret)
def testConst(self):
self.assertTrue(
np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
with self.cached_session():
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, self.evaluate(scalar))
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
with self.cached_session():
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = array_ops.zeros(array_ops.shape(d))
out = self.evaluate(z)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
@test_util.run_deprecated_v1
def testDtype(self):
with self.cached_session():
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool, dtypes_lib.string
]:
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = self.evaluate(z)
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = self.evaluate(z)
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
class ZerosLikeTest(test.TestCase):
def _compareZeros(self, dtype, fully_defined_shape, use_gpu):
with self.cached_session(use_gpu=use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
# NOTE(kearnes): The default numpy dtype associated with tf.string is
# np.object (and can't be changed without breaking a lot things), which
# causes a TypeError in constant_op.constant below. Here we catch the
# special case of tf.string and set the numpy dtype appropriately.
if dtype == dtypes_lib.string:
numpy_dtype = np.string_
else:
numpy_dtype = dtype.as_numpy_dtype
if fully_defined_shape:
d = constant_op.constant(
np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
else:
d = array_ops.placeholder(dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
# Test that the shape is correct
if fully_defined_shape:
self.assertEqual([2, 3], z_var.get_shape())
# Test that the value is correct
feed_dict = {}
if not fully_defined_shape:
feed_dict[d] = np.ones((2, 3), dtype=numpy_dtype)
z_value = z_var.eval(feed_dict=feed_dict)
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
@test_util.run_deprecated_v1
def testZerosLikeCPU(self):
for dtype in [
dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.int8, dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.uint16,
dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.bool,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.string
]:
self._compareZeros(dtype, fully_defined_shape=False, use_gpu=False)
self._compareZeros(dtype, fully_defined_shape=True, use_gpu=False)
@test_util.run_deprecated_v1
def testZerosLikeGPU(self):
for dtype in [
dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.complex64,
dtypes_lib.complex128, dtypes_lib.bool
]:
self._compareZeros(dtype, fully_defined_shape=False, use_gpu=True)
self._compareZeros(dtype, fully_defined_shape=True, use_gpu=True)
@test_util.run_deprecated_v1
def testZerosLikePartialShape(self):
d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
z = array_ops.zeros_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
@test_util.run_deprecated_v1
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
with self.cached_session():
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = array_ops.zeros_like(x, dtype=out_type).eval()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
@test_util.run_deprecated_v1
def testZerosLikeVariant(self):
# TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant
# copying between CPU and GPU is supported AND we register a
# ZerosLike callback for GPU for Variant storing primitive types
# in variant_op_registry.cc.
with self.session(use_gpu=False):
variant_tensor = tensor_pb2.TensorProto(
dtype=dtypes_lib.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(1, dtype=np.int32).tobytes())
])
const_variant = constant_op.constant(variant_tensor)
zeros_like = array_ops.zeros_like(const_variant)
zeros_like_op = logging_ops.Print(
zeros_like, [const_variant, zeros_like],
message="Variant storing an int, input and output of zeros_like:").op
# Smoke test -- ensure this executes without trouble.
# Right now, non-numpy-compatible objects cannot be returned from a
# session.run call; similarly, objects that can't be converted to
# native numpy types cannot be passed to ops.convert_to_tensor.
# TODO(ebrevdo): Add registration mechanism for
# ops.convert_to_tensor and for session.run output.
zeros_like_op.run()
class OnesTest(test.TestCase):
def _Ones(self, shape):
with self.cached_session():
ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return self.evaluate(ret)
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testScalar(self):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
with self.cached_session():
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, self.evaluate(scalar))
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
with self.cached_session():
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = array_ops.ones(array_ops.shape(d))
out = self.evaluate(z)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
@test_util.run_deprecated_v1
def testAutoPack(self):
with self.cached_session():
h = array_ops.placeholder(dtypes_lib.int32, shape=[])
w = array_ops.placeholder(dtypes_lib.int32, shape=[])
z = array_ops.ones([h, w])
out = z.eval(feed_dict={h: 4, w: 16})
self.assertAllEqual(out, np.array([[1] * 16] * 4))
@test_util.run_deprecated_v1
def testDtype(self):
with self.cached_session():
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.ones([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
# Test explicit type control
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128,
dtypes_lib.int64, dtypes_lib.bool):
z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
class OnesLikeTest(test.TestCase):
def testOnesLike(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int8,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.uint16, dtypes_lib.int32,
dtypes_lib.int64, dtypes_lib.bool, dtypes_lib.complex64,
dtypes_lib.complex128
]:
numpy_dtype = dtype.as_numpy_dtype
with self.cached_session():
# Creates a tensor of non-zero values with shape 2 x 3.
d = constant_op.constant(
np.ones(
(2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = self.evaluate(z_var)
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
@test_util.run_deprecated_v1
def testOnesLikePartialShape(self):
d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
z = array_ops.ones_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.fill(dims, val, name="fill")
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out)
# Fill does not set the shape.
# self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15 + 0.3j] * 3] * 2).astype(np.complex64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex128(self):
np_ans = np.array([[0.15 + 0.3j] * 3] * 2).astype(np.complex128)
self._compareAll([2, 3], np_ans[0][0], np_ans)
@test_util.run_deprecated_v1
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
with self.session(use_gpu=False):
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").eval()
self.assertAllEqual(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testFillNegative(self):
with self.cached_session():
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(ValueError):
array_ops.fill(shape, 7)
# Using a placeholder so this won't be caught in static analysis.
dims = array_ops.placeholder(dtypes_lib.int32)
fill_t = array_ops.fill(dims, 3.0)
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
fill_t.eval({dims: shape})
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(ValueError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(ValueError):
array_ops.fill([3, 2], [1.0, 2.0])
# Partial dimension information.
f = array_ops.fill(array_ops.placeholder(dtypes_lib.int32, shape=(4,)), 3.0)
self.assertEqual([None, None, None, None], f.get_shape().as_list())
f = array_ops.fill(
[array_ops.placeholder(
dtypes_lib.int32, shape=()), 17], 1.0)
self.assertEqual([None, 17], f.get_shape().as_list())
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
in_v = constant_op.constant(5.0)
out_shape = [3, 2]
out_filled = array_ops.fill(out_shape, in_v)
err = gradient_checker.compute_gradient_error(in_v, [], out_filled,
out_shape)
self.assertLess(err, 1e-3)
class PlaceholderTest(test.TestCase):
@test_util.run_deprecated_v1
def testDtype(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=(10, 10), name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
self.evaluate(p_identity)
@test_util.run_deprecated_v1
def testShape(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=(10, 10), name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float and "
r"shape \[10,10\]"):
self.evaluate(p_identity)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :5]})
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=None, name="p")
p_identity = array_ops.identity(p)
# can feed anything
feed_array = np.random.rand(10, 3)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
feed_array = np.random.rand(4, 2, 5)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
@test_util.run_deprecated_v1
def testScalarShape(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=[], name="p")
p_identity = array_ops.identity(p)
self.assertAllClose(p_identity.eval(feed_dict={p: 5}), 5)
@test_util.run_deprecated_v1
def testPartialShape(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=[None, 3], name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 3)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :2]})
@test_util.run_deprecated_v1
def testPartialShapeWhenNotFed(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=[None, 3], name="p")
p_identity = array_ops.identity(p)
# Should trigger an operator error, not a shape error.
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
self.evaluate(p_identity)
@test_util.run_deprecated_v1
def testControlDependency(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.int32, shape=[], name="p")
with ops.control_dependencies([p]):
c = constant_op.constant(5, dtypes_lib.int32)
d = math_ops.multiply(p, c)
val = np.array(2).astype(np.int)
self.assertEqual(10, d.eval(feed_dict={p: val}))
@test_util.run_deprecated_v1
def testBadShape(self):
with self.assertRaises(ValueError):
array_ops.placeholder(dtypes_lib.float32, shape=(-1, 10))
@test_util.run_deprecated_v1
def testTensorStr(self):
a = array_ops.placeholder(dtypes_lib.float32, shape=None, name="a")
self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a))
b = array_ops.placeholder(dtypes_lib.int32, shape=(32, 40), name="b")
self.assertEqual("<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>", repr(b))
c = array_ops.placeholder(dtypes_lib.qint32, shape=(32, None, 2), name="c")
if c.shape._v2_behavior:
self.assertEqual(
"<tf.Tensor 'c:0' shape=(32, None, 2) dtype=qint32>", repr(c))
else:
self.assertEqual(
"<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>", repr(c))
@test_util.run_deprecated_v1
def testOldGraph(self):
# Load graph generated from earlier version of TF where
# placeholder shape was not set.
#
# a = tf.compat.v1.placeholder(tf.float32)
# b = a + 1.0
#
# Older graph's default shape is 'shape {}', not 'shape {
# unknown_rank: true }'
graph = """
node {
name: "Placeholder"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
}
}
}
}
node {
name: "add/y"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
}
float_val: 1.0
}
}
}
}
node {
name: "add"
op: "Add"
input: "Placeholder"
input: "add/y"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
versions {
producer: 21
}
"""
gdef = graph_pb2.GraphDef()
text_format.Merge(graph, gdef)
with self.cached_session():
p, ret = importer.import_graph_def(
gdef, return_elements=["Placeholder:0", "add:0"])
# Feed in a vector of two elements. Since the producer version
# of 21, a shape of {} is interpreted as "any shape". If
# producer version were 22, then we'd get a shape mismatch
# error.
self.assertAllEqual([2.0, 3.0], ret.eval(feed_dict={p: [1.0, 2.0]}))
class PlaceholderWithDefaultTest(test.TestCase):
@test_util.run_deprecated_v1
def testFullShape(self):
with self.session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2])
a = array_ops.identity(p)
self.assertAllEqual([[2, 2], [2, 2]], self.evaluate(a))
self.assertAllEqual(
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[6, 6, 6], [6, 6, 6]]})
@test_util.run_deprecated_v1
def testPartialShape(self):
with self.session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([1, 2, 3], shape=[None])
a = array_ops.identity(p)
self.assertAllEqual([1, 2, 3], self.evaluate(a))
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[2, 2], [2, 2]]})
@test_util.run_deprecated_v1
def testNoShape(self):
with self.session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([17], shape=None)
a = array_ops.identity(p)
self.assertAllEqual([17], self.evaluate(a))
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
self.assertAllEqual(
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
@test_util.run_deprecated_v1
def testGradient(self):
with self.session(force_gpu=test_util.is_gpu_available()):
x = array_ops.placeholder(dtypes_lib.float32, [5, 7])
y = array_ops.placeholder_with_default(x, None)
err = gradient_checker.compute_gradient_error(x, [5, 7], y, [5, 7])
self.assertLess(err, 1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/constant_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for morphological filtering operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class DilationTest(test.TestCase):
def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu):
"""Verifies the output values of the dilation function.
Args:
image: Input tensor with shape: [batch, in_height, in_width, channels].
kernel: Filter tensor with shape: [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
out: Expected output.
use_gpu: Whether we are running on GPU.
"""
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
out_tensor = nn_ops.dilation2d(
constant_op.constant(image),
constant_op.constant(kernel),
strides=strides,
rates=rates,
padding=padding,
name="dilation2d")
self.assertAllClose(out, self.evaluate(out_tensor))
def _testDilationValidPadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 1, 1, 1]
out = [[[[.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testDilationSamePadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 2, 2, 1]
out = [[[[.5], [.6]], [[.7], [.8]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationSamePaddingDepth(self, use_gpu):
# [1, 2, 2, 3]
image = [[[[.1, .2, .0], [.2, .3, .1]], [[.3, .4, .2], [.4, .5, .3]]]]
# [2, 2, 3]
kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]]
# [1, 2, 2, 3]
out = [[[[.5, .7, .3], [.6, .8, .4]], [[.7, .9, .5], [.8, 1., .6]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationSamePaddingBatch(self, use_gpu):
# [2, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]], [[[.2], [.3]], [[.4], [.5]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [2, 2, 2, 1]
out = [[[[.5], [.6]], [[.7], [.8]]], [[[.6], [.7]], [[.8], [.9]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationValidPaddingNonSquareWindow(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [1, 2, 1]
kernel = [[[.4], [.3]]]
# [1, 2, 1, 1]
out = [[[[.5]], [[.7]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testDilationSamePaddingRate(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3]], [[.4], [.5], [.6]], [[.7], [.8], [.9]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# Because rate = 2, the effective kernel is [3, 3, 1]:
# kernel_eff = [[[.4], [.0], [.3]],
# [[.0], [.0], [.0]],
# [[.1], [.0], [.2]]]
# [1, 3, 3, 1]
out = [[[[.7], [.8], [.6]], [[1.0], [1.1], [.9]], [[.8], [.9], [.9]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[2, 2],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationValidPaddingUnevenStride(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3], [.4]], [[.5], [.6], [.7], [.8]],
[[.9], [1.0], [1.1], [1.2]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# [1, 2, 2, 1]
out = [[[[.8], [1.0]], [[1.2], [1.4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 2],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def testDilation(self):
for use_gpu in True, False:
self._testDilationValidPadding(use_gpu)
self._testDilationSamePadding(use_gpu)
self._testDilationSamePaddingDepth(use_gpu)
self._testDilationSamePaddingBatch(use_gpu)
self._testDilationValidPaddingNonSquareWindow(use_gpu)
self._testDilationSamePaddingRate(use_gpu)
self._testDilationValidPaddingUnevenStride(use_gpu)
def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates,
padding, use_gpu):
"""Verifies the gradients of the dilation function.
Args:
image_shape: Input shape, [batch, in_height, in_width, channels].
kernel_shape: Filter shape, [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
use_gpu: Whether we are running on GPU.
"""
assert image_shape[3] == kernel_shape[2]
np.random.seed(1) # Make it reproducible.
image = np.random.random_sample(image_shape).astype(np.float32)
kernel = np.random.random_sample(kernel_shape).astype(np.float32)
image_init = np.random.random_sample(image_shape).astype(np.float32)
kernel_init = np.random.random_sample(kernel_shape).astype(np.float32)
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
image_tensor = constant_op.constant(
image, shape=image_shape, name="input")
kernel_tensor = constant_op.constant(
kernel, shape=kernel_shape, name="filter")
out_tensor = nn_ops.dilation2d(
image_tensor,
kernel_tensor,
strides=strides,
rates=rates,
padding=padding,
name="dilation2d")
out_shape = self.evaluate(out_tensor).shape
# Small delta is necessary for argmax to remain the same.
err = gradient_checker.compute_gradient_error(
[image_tensor, kernel_tensor], [image_shape, kernel_shape],
out_tensor,
out_shape, [image_init, kernel_init],
delta=1e-3)
print("Dilation gradient error = %f" % err)
self.assertLess(err, 1e-4)
def _testDilationGradValidPadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testDilationGradSamePadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradSamePadding_1x1x2(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 2],
kernel_shape=[1, 1, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradValidPadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testDilationGradSamePadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradSamePaddingBatch_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[4, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradSamePadding_2x2x4(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 4],
kernel_shape=[2, 2, 4],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testDilationGrad(self):
for use_gpu in True, False:
self._testDilationGradValidPadding_1x1x1(use_gpu)
self._testDilationGradSamePadding_1x1x1(use_gpu)
self._testDilationGradSamePadding_1x1x2(use_gpu)
self._testDilationGradValidPadding_2x2x1(use_gpu)
self._testDilationGradSamePadding_2x2x1(use_gpu)
self._testDilationGradSamePaddingBatch_2x2x1(use_gpu)
self._testDilationGradSamePadding_2x2x4(use_gpu)
class ErosionTest(test.TestCase):
def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu):
"""Verifies the output values of the erosion function.
Args:
image: Input tensor with shape: [batch, in_height, in_width, channels].
kernel: Filter tensor with shape: [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
out: Expected output.
use_gpu: Whether we are running on GPU.
"""
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
out_tensor = nn_ops.erosion2d(
constant_op.constant(image),
constant_op.constant(kernel),
strides=strides,
rates=rates,
padding=padding,
name="erosion2d")
self.assertAllClose(out, self.evaluate(out_tensor))
def _testErosionValidPadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 1, 1, 1]
out = [[[[.0]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testErosionSamePadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 2, 2, 1]
out = [[[[.0], [.1]], [[.3], [.4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingDepth(self, use_gpu):
# [1, 2, 2, 3]
image = [[[[.1, .2, .0], [.2, .3, .1]], [[.3, .4, .2], [.4, .5, .3]]]]
# [2, 2, 3]
kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]]
# [1, 2, 2, 3]
out = [[[[.0, .0, .0], [.1, .1, .1]], [[.3, .3, .3], [.4, .4, .4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingBatch(self, use_gpu):
# [2, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]], [[[.2], [.3]], [[.4], [.5]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [2, 2, 2, 1]
out = [[[[.0], [.1]], [[.3], [.4]]], [[[.1], [.2]], [[.4], [.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionValidPaddingNonSquareWindow(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [1, 2, 1]
kernel = [[[.4], [.3]]]
# [1, 2, 1, 1]
out = [[[[-.2]], [[.0]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingRate(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3]], [[.4], [.5], [.6]], [[.7], [.8], [.9]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# Because rate = 2, the effective kernel is [3, 3, 1]:
# kernel_eff = [[[.4], [.0], [.3]],
# [[.0], [.0], [.0]],
# [[.1], [.0], [.2]]]
# [1, 3, 3, 1]
out = [[[[.1], [.1], [.2]], [[0.1], [-.1], [.0]], [[.4], [.2], [.3]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[2, 2],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionValidPaddingUnevenStride(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3], [.4]], [[.5], [.6], [.7], [.8]],
[[.9], [1.0], [1.1], [1.2]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# [1, 2, 2, 1]
out = [[[[-.1], [.1]], [[.3], [.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 2],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def testErosion(self):
for use_gpu in True, False:
self._testErosionValidPadding(use_gpu)
self._testErosionSamePadding(use_gpu)
self._testErosionSamePaddingDepth(use_gpu)
self._testErosionSamePaddingBatch(use_gpu)
self._testErosionValidPaddingNonSquareWindow(use_gpu)
self._testErosionSamePaddingRate(use_gpu)
self._testErosionValidPaddingUnevenStride(use_gpu)
def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates,
padding, use_gpu):
"""Verifies the gradients of the erosion function.
Args:
image_shape: Input shape, [batch, in_height, in_width, channels].
kernel_shape: Filter shape, [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
use_gpu: Whether we are running on GPU.
"""
assert image_shape[3] == kernel_shape[2]
np.random.seed(1) # Make it reproducible.
image = np.random.random_sample(image_shape).astype(np.float32)
kernel = np.random.random_sample(kernel_shape).astype(np.float32)
image_init = np.random.random_sample(image_shape).astype(np.float32)
kernel_init = np.random.random_sample(kernel_shape).astype(np.float32)
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
image_tensor = constant_op.constant(
image, shape=image_shape, name="input")
kernel_tensor = constant_op.constant(
kernel, shape=kernel_shape, name="filter")
out_tensor = nn_ops.erosion2d(
image_tensor,
kernel_tensor,
strides=strides,
rates=rates,
padding=padding,
name="erosion2d")
out_shape = self.evaluate(out_tensor).shape
# Small delta is necessary for argmax to remain the same.
err = gradient_checker.compute_gradient_error(
[image_tensor, kernel_tensor], [image_shape, kernel_shape],
out_tensor,
out_shape, [image_init, kernel_init],
delta=1e-3)
print("Erosion gradient error = %f" % err)
self.assertLess(err, 1e-4)
def _testErosionGradValidPadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testErosionGradSamePadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePadding_1x1x2(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 2],
kernel_shape=[1, 1, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradValidPadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testErosionGradSamePadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePaddingBatch_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[4, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePadding_2x2x4(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 4],
kernel_shape=[2, 2, 4],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testErosionGrad(self):
for use_gpu in True, False:
self._testErosionGradValidPadding_1x1x1(use_gpu)
self._testErosionGradSamePadding_1x1x1(use_gpu)
self._testErosionGradSamePadding_1x1x2(use_gpu)
self._testErosionGradValidPadding_2x2x1(use_gpu)
self._testErosionGradSamePadding_2x2x1(use_gpu)
self._testErosionGradSamePaddingBatch_2x2x1(use_gpu)
self._testErosionGradSamePadding_2x2x4(use_gpu)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/morphological_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReorder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SparseReorderTest(test.TestCase):
def _SparseTensorPlaceholder(self):
return sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.float64),
array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.float64)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def testStaticShapeInfoPreserved(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_5x6(np.arange(6)))
self.assertAllEqual((5, 6), sp_input.get_shape())
sp_output = sparse_ops.sparse_reorder(sp_input)
self.assertAllEqual((5, 6), sp_output.get_shape())
def testAlreadyInOrder(self):
with self.session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
sp_output = sparse_ops.sparse_reorder(input_val)
output_val = self.evaluate(sp_output)
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
@test_util.run_deprecated_v1
def testFeedAlreadyInOrder(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.arange(6))
sp_output = sparse_ops.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testOutOfOrder(self):
expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
with self.session(use_gpu=False) as sess:
for _ in range(5): # To test various random permutations
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_output = sparse_ops.sparse_reorder(input_val)
output_val = self.evaluate(sp_output)
self.assertAllEqual(output_val.indices, expected_output_val.indices)
self.assertAllEqual(output_val.values, expected_output_val.values)
self.assertAllEqual(output_val.dense_shape,
expected_output_val.dense_shape)
@test_util.run_deprecated_v1
def testFeedOutOfOrder(self):
expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
with self.session(use_gpu=False) as sess:
for _ in range(5): # To test various random permutations
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_output = sparse_ops.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, expected_output_val.indices)
self.assertAllEqual(output_val.values, expected_output_val.values)
self.assertAllEqual(output_val.dense_shape,
expected_output_val.dense_shape)
@test_util.run_deprecated_v1
def testGradients(self):
with self.session(use_gpu=False):
for _ in range(5): # To test various random permutations
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_input = sparse_tensor.SparseTensor(input_val.indices,
input_val.values,
input_val.dense_shape)
sp_output = sparse_ops.sparse_reorder(sp_input)
err = gradient_checker.compute_gradient_error(
sp_input.values,
input_val.values.shape,
sp_output.values,
input_val.values.shape,
x_init_value=input_val.values)
self.assertLess(err, 1e-11)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/sparse_reorder_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.bitcast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class BitcastTest(test.TestCase):
def _testBitcast(self, x, datatype, shape):
with test_util.use_gpu():
tf_ans = array_ops.bitcast(x, datatype)
out = self.evaluate(tf_ans)
buff_after = memoryview(out).tobytes()
buff_before = memoryview(x).tobytes()
self.assertEqual(buff_before, buff_after)
self.assertEqual(tf_ans.get_shape(), shape)
self.assertEqual(tf_ans.dtype, datatype)
def testSmaller(self):
x = np.random.rand(3, 2)
datatype = dtypes.int8
shape = [3, 2, 8]
self._testBitcast(x, datatype, shape)
def testLarger(self):
x = np.arange(16, dtype=np.int8).reshape([4, 4])
datatype = dtypes.int32
shape = [4]
self._testBitcast(x, datatype, shape)
def testSameDtype(self):
x = np.random.rand(3, 4)
shape = [3, 4]
self._testBitcast(x, x.dtype, shape)
def testSameSize(self):
x = np.random.rand(3, 4)
shape = [3, 4]
self._testBitcast(x, dtypes.int64, shape)
@test_util.run_deprecated_v1
def testErrors(self):
x = np.zeros([1, 1], np.int8)
datatype = dtypes.int32
with self.assertRaisesRegexp(ValueError, "Cannot bitcast due to shape"):
array_ops.bitcast(x, datatype, None)
def testEmpty(self):
x = np.ones([], np.int32)
datatype = dtypes.int8
shape = [4]
self._testBitcast(x, datatype, shape)
@test_util.run_deprecated_v1
def testUnknown(self):
x = array_ops.placeholder(dtypes.float32)
datatype = dtypes.int8
array_ops.bitcast(x, datatype, None)
def testQuantizedType(self):
shape = [3, 4]
x = np.zeros(shape, np.uint16)
datatype = dtypes.quint16
self._testBitcast(x, datatype, shape)
def testUnsignedType(self):
shape = [3, 4]
x = np.zeros(shape, np.int64)
datatype = dtypes.uint64
self._testBitcast(x, datatype, shape)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/bitcast_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.numerics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.platform import test
class VerifyTensorAllFiniteTest(test.TestCase):
def testVerifyTensorAllFiniteSucceeds(self):
x_shape = [5, 4]
x = np.random.random_sample(x_shape).astype(np.float32)
with test_util.use_gpu():
t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
t_verified = numerics.verify_tensor_all_finite(t,
"Input is not a number.")
self.assertAllClose(x, self.evaluate(t_verified))
def testVerifyTensorAllFiniteFails(self):
x_shape = [5, 4]
x = np.random.random_sample(x_shape).astype(np.float32)
my_msg = "Input is not a number."
# Test NaN.
x[0] = np.nan
with test_util.use_gpu():
with self.assertRaisesOpError(my_msg):
t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
t_verified = numerics.verify_tensor_all_finite(t, my_msg)
self.evaluate(t_verified)
# Test Inf.
x[0] = np.inf
with test_util.use_gpu():
with self.assertRaisesOpError(my_msg):
t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
t_verified = numerics.verify_tensor_all_finite(t, my_msg)
self.evaluate(t_verified)
@test_util.run_v1_only("b/120545219")
class NumericsTest(test.TestCase):
def testInf(self):
with self.session(graph=ops.Graph()):
t1 = constant_op.constant(1.0)
t2 = constant_op.constant(0.0)
a = math_ops.div(t1, t2)
check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("Inf"):
self.evaluate(a)
def testNaN(self):
with self.session(graph=ops.Graph()):
t1 = constant_op.constant(0.0)
t2 = constant_op.constant(0.0)
a = math_ops.div(t1, t2)
check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("NaN"):
self.evaluate(a)
def testBoth(self):
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([1.0, 0.0])
t2 = constant_op.constant([0.0, 0.0])
a = math_ops.div(t1, t2)
check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("Inf and NaN"):
self.evaluate(a)
def testPassThrough(self):
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
checked = array_ops.check_numerics(t1, message="pass through test")
value = self.evaluate(checked)
self.assertAllEqual(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), value)
self.assertEqual([2, 3], checked.get_shape())
def testControlFlowCond(self):
predicate = array_ops.placeholder(dtypes.bool, shape=[])
_ = control_flow_ops.cond(predicate,
lambda: constant_op.constant([37.]),
lambda: constant_op.constant([42.]))
with self.assertRaisesRegexp(
ValueError,
r"`tf\.add_check_numerics_ops\(\) is not compatible with "
r"TensorFlow control flow operations such as `tf\.cond\(\)` "
r"or `tf.while_loop\(\)`\."):
numerics.add_check_numerics_ops()
def testControlFlowWhile(self):
predicate = array_ops.placeholder(dtypes.bool, shape=[])
_ = control_flow_ops.while_loop(lambda _: predicate,
lambda _: constant_op.constant([37.]),
[constant_op.constant([42.])])
with self.assertRaisesRegexp(
ValueError,
r"`tf\.add_check_numerics_ops\(\) is not compatible with "
r"TensorFlow control flow operations such as `tf\.cond\(\)` "
r"or `tf.while_loop\(\)`\."):
numerics.add_check_numerics_ops()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/numerics_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.unique_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.platform import test
class UniqueTest(test.TestCase):
def testInt32(self):
x = np.random.randint(2, high=10, size=7000)
with self.cached_session() as sess:
y, idx = array_ops.unique(x)
tf_y, tf_idx = self.evaluate([y, idx])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
def testInt32OutIdxInt64(self):
x = np.random.randint(2, high=10, size=7000)
with self.cached_session() as sess:
y, idx = array_ops.unique(x, out_idx=dtypes.int64)
tf_y, tf_idx = self.evaluate([y, idx])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
def testString(self):
indx = np.random.randint(65, high=122, size=7000)
x = [chr(i) for i in indx]
with self.cached_session() as sess:
y, idx = array_ops.unique(x)
tf_y, tf_idx = self.evaluate([y, idx])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]].decode('ascii'))
def testInt32Axis(self):
for dtype in [np.int32, np.int64]:
x = np.array([[1, 0, 0], [1, 0, 0], [2, 0, 0]])
with self.cached_session() as sess:
y0, idx0 = gen_array_ops.unique_v2(x, axis=np.array([0], dtype))
tf_y0, tf_idx0 = self.evaluate([y0, idx0])
y1, idx1 = gen_array_ops.unique_v2(x, axis=np.array([1], dtype))
tf_y1, tf_idx1 = self.evaluate([y1, idx1])
self.assertAllEqual(tf_y0, np.array([[1, 0, 0], [2, 0, 0]]))
self.assertAllEqual(tf_idx0, np.array([0, 0, 1]))
self.assertAllEqual(tf_y1, np.array([[1, 0], [1, 0], [2, 0]]))
self.assertAllEqual(tf_idx1, np.array([0, 1, 1]))
def testInt32V2(self):
# This test is only temporary, once V2 is used
# by default, the axis will be wrapped to allow `axis=None`.
x = np.random.randint(2, high=10, size=7000)
with self.cached_session() as sess:
y, idx = gen_array_ops.unique_v2(x, axis=np.array([], np.int32))
tf_y, tf_idx = self.evaluate([y, idx])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
class UniqueWithCountsTest(test.TestCase):
def testInt32(self):
x = np.random.randint(2, high=10, size=7000)
with self.cached_session() as sess:
y, idx, count = array_ops.unique_with_counts(x)
tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
for value, count in zip(tf_y, tf_count):
self.assertEqual(count, np.sum(x == value))
def testInt32OutIdxInt64(self):
x = np.random.randint(2, high=10, size=7000)
with self.cached_session() as sess:
y, idx, count = array_ops.unique_with_counts(x, out_idx=dtypes.int64)
tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
for value, count in zip(tf_y, tf_count):
self.assertEqual(count, np.sum(x == value))
def testString(self):
indx = np.random.randint(65, high=122, size=7000)
x = [chr(i) for i in indx]
with self.cached_session() as sess:
y, idx, count = array_ops.unique_with_counts(x)
tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]].decode('ascii'))
for value, count in zip(tf_y, tf_count):
v = [1 if x[i] == value.decode('ascii') else 0 for i in range(7000)]
self.assertEqual(count, sum(v))
def testInt32Axis(self):
for dtype in [np.int32, np.int64]:
x = np.array([[1, 0, 0], [1, 0, 0], [2, 0, 0]])
with self.cached_session() as sess:
y0, idx0, count0 = gen_array_ops.unique_with_counts_v2(
x, axis=np.array([0], dtype))
tf_y0, tf_idx0, tf_count0 = self.evaluate([y0, idx0, count0])
y1, idx1, count1 = gen_array_ops.unique_with_counts_v2(
x, axis=np.array([1], dtype))
tf_y1, tf_idx1, tf_count1 = self.evaluate([y1, idx1, count1])
self.assertAllEqual(tf_y0, np.array([[1, 0, 0], [2, 0, 0]]))
self.assertAllEqual(tf_idx0, np.array([0, 0, 1]))
self.assertAllEqual(tf_count0, np.array([2, 1]))
self.assertAllEqual(tf_y1, np.array([[1, 0], [1, 0], [2, 0]]))
self.assertAllEqual(tf_idx1, np.array([0, 1, 1]))
self.assertAllEqual(tf_count1, np.array([1, 2]))
def testInt32V2(self):
# This test is only temporary, once V2 is used
# by default, the axis will be wrapped to allow `axis=None`.
x = np.random.randint(2, high=10, size=7000)
with self.cached_session() as sess:
y, idx, count = gen_array_ops.unique_with_counts_v2(
x, axis=np.array([], np.int32))
tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
for value, count in zip(tf_y, tf_count):
self.assertEqual(count, np.sum(x == value))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/unique_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image.extract_glimpse()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
class ExtractGlimpseTest(test.TestCase):
def _VerifyValues(self, tensor_in_sizes, glimpse_sizes, offsets,
expected_rows, expected_cols):
"""Verifies the output values of the glimpse extraction kernel.
Args:
tensor_in_sizes: Input tensor dimensions in [input_rows, input_cols].
glimpse_sizes: Dimensions of the glimpse in [glimpse_rows, glimpse_cols].
offsets: Relative location of the center of the glimpse in the input
image expressed as [row_offset, col_offset].
expected_rows: A list containing the expected row numbers (None for
out of bound entries that are expected to be replaced by uniform
random entries in [0,1) ).
expected_cols: Same as expected_rows, but for column numbers.
"""
rows = tensor_in_sizes[0]
cols = tensor_in_sizes[1]
# Row Tensor with entries by row.
# [[ 1 1 1 ... ]
# [ 2 2 2 ... ]
# [ 3 3 3 ... ]
# [ ...
# ]
t_rows = array_ops.tile(
[[1.0 * r] for r in range(1, rows + 1)], [1, cols], name='tile_rows')
# Shuffle to switch to a convention of (batch_size, height, width, depth).
t_rows_4d = array_ops.transpose(
array_ops.expand_dims(array_ops.expand_dims(t_rows, 0), 3),
[0, 2, 1, 3])
# Column Tensor with entries by column.
# [[ 1 2 3 4 ... ]
# [ 1 2 3 4 ... ]
# [ 1 2 3 4 ... ]
# [ ... ]
# ]
t_cols = array_ops.tile(
[[1.0 * r for r in range(1, cols + 1)]], [rows, 1], name='tile_cols')
# Shuffle to switch to a convention of (batch_size, height, width, depth).
t_cols_4d = array_ops.transpose(
array_ops.expand_dims(array_ops.expand_dims(t_cols, 0), 3),
[0, 2, 1, 3])
# extract_glimpses from Row and Column Tensor, respectively.
# Switch order for glimpse_sizes and offsets to switch from (row, col)
# convention to tensorflows (height, width) convention.
t1 = constant_op.constant([glimpse_sizes[1], glimpse_sizes[0]], shape=[2])
t2 = constant_op.constant([offsets[1], offsets[0]], shape=[1, 2])
glimpse_rows = (array_ops.transpose(
image_ops.extract_glimpse(t_rows_4d, t1, t2), [0, 2, 1, 3]))
glimpse_cols = (array_ops.transpose(
image_ops.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3]))
# Evaluate the TensorFlow Graph.
with self.cached_session() as sess:
value_rows, value_cols = self.evaluate([glimpse_rows, glimpse_cols])
# Check dimensions of returned glimpse.
self.assertEqual(value_rows.shape[1], glimpse_sizes[0])
self.assertEqual(value_rows.shape[2], glimpse_sizes[1])
self.assertEqual(value_cols.shape[1], glimpse_sizes[0])
self.assertEqual(value_cols.shape[2], glimpse_sizes[1])
# Check entries.
min_random_val = 0
max_random_val = max(rows, cols)
for i in range(glimpse_sizes[0]):
for j in range(glimpse_sizes[1]):
if expected_rows[i] is None or expected_cols[j] is None:
self.assertGreaterEqual(value_rows[0][i][j][0], min_random_val)
self.assertLessEqual(value_rows[0][i][j][0], max_random_val)
self.assertGreaterEqual(value_cols[0][i][j][0], min_random_val)
self.assertLessEqual(value_cols[0][i][j][0], max_random_val)
else:
self.assertEqual(value_rows[0][i][j][0], expected_rows[i])
self.assertEqual(value_cols[0][i][j][0], expected_cols[j])
def testCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[3, 5],
offsets=[0.0, 0.0],
expected_rows=[20, 21, 22],
expected_cols=[29, 30, 31, 32, 33])
def testEmptyTensor(self):
empty_image = np.zeros((0, 4, 3, 0))
offsets = np.zeros((0, 2))
with self.cached_session():
result = image_ops.extract_glimpse(empty_image, [1, 1], offsets)
self.assertAllEqual(
np.zeros((0, 1, 1, 0), dtype=np.float32), self.evaluate(result))
def testLargeCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[41, 61],
offsets=[0.0, 0.0],
expected_rows=list(range(1, 42)),
expected_cols=list(range(1, 62)))
def testTooLargeCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[43, 63],
offsets=[0.0, 0.0],
expected_rows=[None] + list(range(1, 42)) + [None],
expected_cols=[None] + list(range(1, 62)) + [None])
def testGlimpseFullOverlap(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[3, 5],
offsets=[0.1, 0.3],
expected_rows=[22, 23, 24],
expected_cols=[38, 39, 40, 41, 42])
def testGlimpseFullOverlap2(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 3],
offsets=[-0.7, -0.7],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[8, 9, 10])
def testGlimpseBeforeLeftMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 5],
offsets=[-0.7, -0.9],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[1, 2, 3, 4, 5])
def testGlimpseLowerRightCorner(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[7, 5],
offsets=[1.0, 1.0],
expected_rows=[38, 39, 40, 41, None, None, None],
expected_cols=[59, 60, 61, None, None])
def testGlimpseNoOverlap(self):
self._VerifyValues(
tensor_in_sizes=[20, 30],
glimpse_sizes=[3, 3],
offsets=[-2.0, 2.0],
expected_rows=[None, None, None],
expected_cols=[None, None, None])
def testGlimpseOnLeftMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 7],
offsets=[-0.7, -1.0],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[None, None, None, 1, 2, 3, 4])
def testGlimpseUpperMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[7, 5],
offsets=[-1, 0.9],
expected_rows=[None, None, None, 1, 2, 3, 4],
expected_cols=[56, 57, 58, 59, 60])
def testGlimpseNoiseZero(self):
# Image:
# [ 0. 1. 2. 3. 4.]
# [ 5. 6. 7. 8. 9.]
# [ 10. 11. 12. 13. 14.]
# [ 15. 16. 17. 18. 19.]
# [ 20. 21. 22. 23. 24.]
img = constant_op.constant(
np.arange(25).reshape((1, 5, 5, 1)), dtype=dtypes.float32)
with self.test_session():
# Result 1:
# [ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]
result1 = image_ops.extract_glimpse_v2(
img, [3, 3], [[-2, 2]],
centered=False,
normalized=False,
noise='zero')
self.assertAllEqual(
np.asarray([[0, 0, 0], [0, 0, 0], [0, 0, 0]]),
self.evaluate(result1)[0, :, :, 0])
# Result 2:
# [ 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 1. 2. 3. 4. 0.]
# [ 0. 5. 6. 7. 8. 9. 0.]
# [ 0. 10. 11. 12. 13. 14. 0.]
# [ 0. 15. 16. 17. 18. 19. 0.]
# [ 0. 20. 21. 22. 23. 24. 0.]
# [ 0. 0. 0. 0. 0. 0. 0.]]
result2 = image_ops.extract_glimpse_v2(
img, [7, 7], [[0, 0]], normalized=False, noise='zero')
self.assertAllEqual(
np.asarray([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 3, 4, 0],
[0, 5, 6, 7, 8, 9, 0], [0, 10, 11, 12, 13, 14, 0],
[0, 15, 16, 17, 18, 19, 0], [0, 20, 21, 22, 23, 24, 0],
[0, 0, 0, 0, 0, 0, 0]]),
self.evaluate(result2)[0, :, :, 0])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/attention_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeBmpOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
class DecodeBmpOpTest(test.TestCase):
def testex1(self):
img_bytes = [[[0, 0, 255], [0, 255, 0]], [[255, 0, 0], [255, 255, 255]]]
# Encoded BMP bytes from Wikipedia
encoded_bytes = [
0x42, 0x40,
0x46, 0, 0, 0,
0, 0,
0, 0,
0x36, 0, 0, 0,
0x28, 0, 0, 0,
0x2, 0, 0, 0,
0x2, 0, 0, 0,
0x1, 0,
0x18, 0,
0, 0, 0, 0,
0x10, 0, 0, 0,
0x13, 0xb, 0, 0,
0x13, 0xb, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0xff,
0xff, 0xff, 0xff,
0, 0,
0xff, 0, 0,
0, 0xff, 0,
0, 0,
]
byte_string = bytes(bytearray(encoded_bytes))
img_in = constant_op.constant(byte_string, dtype=dtypes.string)
decode = array_ops.squeeze(image_ops.decode_bmp(img_in))
with self.cached_session():
decoded = self.evaluate(decode)
self.assertAllEqual(decoded, img_bytes)
def testGrayscale(self):
img_bytes = [[[255], [0]], [[255], [0]]]
encoded_bytes = [
0x42,
0x40,
0x3d,
0,
0,
0,
0,
0,
0,
0,
0x36,
0,
0,
0,
0x28,
0,
0,
0,
0x2,
0,
0,
0,
0x2,
0,
0,
0,
0x1,
0,
0x8,
0,
0,
0,
0,
0,
0x10,
0,
0,
0,
0x13,
0xb,
0,
0,
0x13,
0xb,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0xff,
0,
0,
0,
0xff,
0,
0,
0,
]
byte_string = bytes(bytearray(encoded_bytes))
img_in = constant_op.constant(byte_string, dtype=dtypes.string)
decode = image_ops.decode_bmp(img_in)
with self.cached_session():
decoded = self.evaluate(decode)
self.assertAllEqual(decoded, img_bytes)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/decode_bmp_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BAvSIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for summary V1 tensor op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.summary import summary as summary_lib
class SummaryV1TensorOpTest(test.TestCase):
def _SummarySingleValue(self, s):
summ = summary_pb2.Summary()
summ.ParseFromString(s)
self.assertEqual(len(summ.value), 1)
return summ.value[0]
def _AssertNumpyEq(self, actual, expected):
self.assertTrue(np.array_equal(actual, expected))
def testTags(self):
with self.cached_session() as sess:
c = constant_op.constant(1)
s1 = summary_lib.tensor_summary("s1", c)
with ops.name_scope("foo"):
s2 = summary_lib.tensor_summary("s2", c)
with ops.name_scope("zod"):
s3 = summary_lib.tensor_summary("s3", c)
s4 = summary_lib.tensor_summary("TensorSummary", c)
summ1, summ2, summ3, summ4 = self.evaluate([s1, s2, s3, s4])
v1 = self._SummarySingleValue(summ1)
self.assertEqual(v1.tag, "s1")
v2 = self._SummarySingleValue(summ2)
self.assertEqual(v2.tag, "foo/s2")
v3 = self._SummarySingleValue(summ3)
self.assertEqual(v3.tag, "foo/zod/s3")
v4 = self._SummarySingleValue(summ4)
self.assertEqual(v4.tag, "foo/zod/TensorSummary")
def testScalarSummary(self):
with self.cached_session() as sess:
const = constant_op.constant(10.0)
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, 10)
def testStringSummary(self):
s = six.b("foobar")
with self.cached_session() as sess:
const = constant_op.constant(s)
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, s)
def testManyScalarSummary(self):
with self.cached_session() as sess:
const = array_ops.ones([5, 5, 5])
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, np.ones([5, 5, 5]))
def testManyStringSummary(self):
strings = [[six.b("foo bar"), six.b("baz")], [six.b("zoink"), six.b("zod")]]
with self.cached_session() as sess:
const = constant_op.constant(strings)
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, strings)
def testManyBools(self):
bools = [True, True, True, False, False, False]
with self.cached_session() as sess:
const = constant_op.constant(bools)
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, bools)
def testSummaryDescriptionAndDisplayName(self):
with self.cached_session() as sess:
def get_description(summary_op):
summ_str = self.evaluate(summary_op)
summ = summary_pb2.Summary()
summ.ParseFromString(summ_str)
return summ.value[0].metadata
const = constant_op.constant(1)
# Default case; no description or display name
simple_summary = summary_lib.tensor_summary("simple", const)
descr = get_description(simple_summary)
self.assertEqual(descr.display_name, "")
self.assertEqual(descr.summary_description, "")
# Values are provided via function args
with_values = summary_lib.tensor_summary(
"simple",
const,
display_name="my name",
summary_description="my description")
descr = get_description(with_values)
self.assertEqual(descr.display_name, "my name")
self.assertEqual(descr.summary_description, "my description")
# Values are provided via the SummaryMetadata arg
metadata = summary_pb2.SummaryMetadata()
metadata.display_name = "my name"
metadata.summary_description = "my description"
with_metadata = summary_lib.tensor_summary(
"simple", const, summary_metadata=metadata)
descr = get_description(with_metadata)
self.assertEqual(descr.display_name, "my name")
self.assertEqual(descr.summary_description, "my description")
# If both SummaryMetadata and explicit args are provided, the args win
overwrite = summary_lib.tensor_summary(
"simple",
const,
summary_metadata=metadata,
display_name="overwritten",
summary_description="overwritten")
descr = get_description(overwrite)
self.assertEqual(descr.display_name, "overwritten")
self.assertEqual(descr.summary_description, "overwritten")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/summary_v1_tensor_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.linalg_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.platform import test
def _AddTest(test_class, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test_class, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test_class, test_name, fn)
def _RandomPDMatrix(n, rng, dtype=np.float64):
"""Random positive definite matrix."""
temp = rng.randn(n, n).astype(dtype)
if dtype in [np.complex64, np.complex128]:
temp.imag = rng.randn(n, n)
return np.conj(temp).dot(temp.T)
class CholeskySolveTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
@test_util.run_deprecated_v1
def test_works_with_five_different_random_pos_def_matrices(self):
for n in range(1, 6):
for np_type, atol in [(np.float32, 0.05), (np.float64, 1e-5)]:
with self.session(use_gpu=True):
# Create 2 x n x n matrix
array = np.array(
[_RandomPDMatrix(n, self.rng),
_RandomPDMatrix(n, self.rng)]).astype(np_type)
chol = linalg_ops.cholesky(array)
for k in range(1, 3):
rhs = self.rng.randn(2, n, k).astype(np_type)
x = linalg_ops.cholesky_solve(chol, rhs)
self.assertAllClose(
rhs, math_ops.matmul(array, x).eval(), atol=atol)
class LogdetTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(42)
@test_util.run_deprecated_v1
def test_works_with_five_different_random_pos_def_matrices(self):
for n in range(1, 6):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
(np.complex64, 0.05), (np.complex128, 1e-5)]:
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
_, logdet_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
# Create 2 x n x n matrix
# matrix = np.array(
# [_RandomPDMatrix(n, self.rng, np_dtype),
# _RandomPDMatrix(n, self.rng, np_dtype)]).astype(np_dtype)
logdet_tf = linalg.logdet(matrix)
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
def test_works_with_underflow_case(self):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
(np.complex64, 0.05), (np.complex128, 1e-5)]:
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
_, logdet_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
logdet_tf = linalg.logdet(matrix)
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
class SlogdetTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(42)
@test_util.run_deprecated_v1
def test_works_with_five_different_random_pos_def_matrices(self):
for n in range(1, 6):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
(np.complex64, 0.05), (np.complex128, 1e-5)]:
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
self.assertAllClose(
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
self.assertAllClose(sign_np, self.evaluate(sign_tf), atol=atol)
def test_works_with_underflow_case(self):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
(np.complex64, 0.05), (np.complex128, 1e-5)]:
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
self.assertAllClose(
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
self.assertAllClose(sign_np, self.evaluate(sign_tf), atol=atol)
class AdjointTest(test.TestCase):
def test_compare_to_numpy(self):
for dtype in np.float64, np.float64, np.complex64, np.complex128:
matrix_np = np.array([[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j,
6 + 6j]]).astype(dtype)
expected_transposed = np.conj(matrix_np.T)
with self.session():
matrix = ops.convert_to_tensor(matrix_np)
transposed = linalg.adjoint(matrix)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, self.evaluate(transposed))
class EyeTest(parameterized.TestCase, test.TestCase):
def testShapeInferenceNoBatch(self):
self.assertEqual((2, 2), linalg_ops.eye(num_rows=2).shape)
self.assertEqual((2, 3), linalg_ops.eye(num_rows=2, num_columns=3).shape)
def testShapeInferenceStaticBatch(self):
batch_shape = (2, 3)
self.assertEqual(
(2, 3, 2, 2),
linalg_ops.eye(num_rows=2, batch_shape=batch_shape).shape)
self.assertEqual(
(2, 3, 2, 3),
linalg_ops.eye(
num_rows=2, num_columns=3, batch_shape=batch_shape).shape)
@parameterized.named_parameters(
("DynamicRow",
lambda: array_ops.placeholder_with_default(2, shape=None),
lambda: None),
("DynamicRowStaticColumn",
lambda: array_ops.placeholder_with_default(2, shape=None),
lambda: 3),
("StaticRowDynamicColumn",
lambda: 2,
lambda: array_ops.placeholder_with_default(3, shape=None)),
("DynamicRowDynamicColumn",
lambda: array_ops.placeholder_with_default(2, shape=None),
lambda: array_ops.placeholder_with_default(3, shape=None)))
def testShapeInferenceStaticBatchWith(self, num_rows_fn, num_columns_fn):
num_rows = num_rows_fn()
num_columns = num_columns_fn()
batch_shape = (2, 3)
identity_matrix = linalg_ops.eye(
num_rows=num_rows,
num_columns=num_columns,
batch_shape=batch_shape)
self.assertEqual(4, identity_matrix.shape.ndims)
self.assertEqual((2, 3), identity_matrix.shape[:2])
if num_rows is not None and not isinstance(num_rows, ops.Tensor):
self.assertEqual(2, identity_matrix.shape[-2])
if num_columns is not None and not isinstance(num_columns, ops.Tensor):
self.assertEqual(3, identity_matrix.shape[-1])
@parameterized.parameters(
itertools.product(
# num_rows
[0, 1, 2, 5],
# num_columns
[None, 0, 1, 2, 5],
# batch_shape
[None, [], [2], [2, 3]],
# dtype
[
dtypes.int32,
dtypes.int64,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128
])
)
def test_eye_no_placeholder(self, num_rows, num_columns, batch_shape, dtype):
eye_np = np.eye(num_rows, M=num_columns, dtype=dtype.as_numpy_dtype)
if batch_shape is not None:
eye_np = np.tile(eye_np, batch_shape + [1, 1])
eye_tf = self.evaluate(linalg_ops.eye(
num_rows,
num_columns=num_columns,
batch_shape=batch_shape,
dtype=dtype))
self.assertAllEqual(eye_np, eye_tf)
@parameterized.parameters(
itertools.product(
# num_rows
[0, 1, 2, 5],
# num_columns
[0, 1, 2, 5],
# batch_shape
[[], [2], [2, 3]],
# dtype
[
dtypes.int32,
dtypes.int64,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128
])
)
@test_util.run_deprecated_v1
def test_eye_with_placeholder(
self, num_rows, num_columns, batch_shape, dtype):
eye_np = np.eye(num_rows, M=num_columns, dtype=dtype.as_numpy_dtype)
eye_np = np.tile(eye_np, batch_shape + [1, 1])
num_rows_placeholder = array_ops.placeholder(
dtypes.int32, name="num_rows")
num_columns_placeholder = array_ops.placeholder(
dtypes.int32, name="num_columns")
batch_shape_placeholder = array_ops.placeholder(
dtypes.int32, name="batch_shape")
eye = linalg_ops.eye(
num_rows_placeholder,
num_columns=num_columns_placeholder,
batch_shape=batch_shape_placeholder,
dtype=dtype)
with self.session(use_gpu=True) as sess:
eye_tf = sess.run(
eye,
feed_dict={
num_rows_placeholder: num_rows,
num_columns_placeholder: num_columns,
batch_shape_placeholder: batch_shape
})
self.assertAllEqual(eye_np, eye_tf)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/linalg_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.argmax_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ArgMaxTest(test.TestCase):
def _testArg(self,
method,
x,
axis,
expected_values,
use_gpu=False,
expected_err_re=None):
with self.session(use_gpu=use_gpu):
ans = method(x, axis=axis)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
# Defaults to int64 output.
self.assertEqual(np.int64, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
self.assertShapeEqual(expected_values, ans)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBothArg(self,
method,
x,
axis,
expected_values,
expected_err_re=None):
self._testArg(method, x, axis, expected_values, True, expected_err_re)
self._testArg(method, x, axis, expected_values, False, expected_err_re)
def _testBasic(self, dtype):
x = np.asarray(100 * np.random.randn(200), dtype=dtype)
# Check that argmin and argmax match numpy along the primary axis
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
def _testDim(self, dtype):
x = np.asarray(100 * np.random.randn(3, 2, 4, 5, 6), dtype=dtype)
# Check that argmin and argmax match numpy along all axes
for axis in range(-5, 5):
self._testBothArg(math_ops.argmax, x, axis, x.argmax(axis))
self._testBothArg(math_ops.argmin, x, axis, x.argmin(axis))
def testFloat(self):
self._testBasic(np.float32)
self._testDim(np.float32)
def testFloatInt32Output(self):
x = np.asarray(100 * np.random.randn(200), dtype=np.float32)
expected_values = x.argmax()
with self.session(use_gpu=True):
ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
# The values are equal when comparing int32 to int64 because
# the values don't have a range that exceeds 32-bit integers.
self.assertAllEqual(tf_ans, expected_values)
expected_values = x.argmin()
with self.session(use_gpu=True):
ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
def testDouble(self):
self._testBasic(np.float64)
self._testDim(np.float64)
def testInt32(self):
self._testBasic(np.int32)
self._testDim(np.int32)
def testInt64(self):
self._testBasic(np.int64)
self._testDim(np.int64)
def testEmpty(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
with self.assertRaisesOpError(
r"Reduction axis 0 is empty in shape \[0\]"):
op([], 0).eval()
@test_util.run_deprecated_v1
def testDefaultAxis(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
ans = op([1]).eval()
self.assertAllEqual(ans, 0)
@test_util.run_deprecated_v1
def testOutputEmpty(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
ret = op(array_ops.zeros(shape=[1, 0, 2]), axis=-1).eval()
self.assertEqual(ret.shape, (1, 0))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/argmax_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeRaw op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import zlib
from six import BytesIO
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
class DecodeCompressedOpTest(test.TestCase):
def _compress(self, bytes_in, compression_type):
if not compression_type:
return bytes_in
elif compression_type == "ZLIB":
return zlib.compress(bytes_in)
else:
out = BytesIO()
with gzip.GzipFile(fileobj=out, mode="wb") as f:
f.write(bytes_in)
return out.getvalue()
@test_util.run_deprecated_v1
def testDecompress(self):
for compression_type in ["ZLIB", "GZIP", ""]:
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[2])
decompressed = parsing_ops.decode_compressed(
in_bytes, compression_type=compression_type)
self.assertEqual([2], decompressed.get_shape().as_list())
result = decompressed.eval(
feed_dict={in_bytes: [self._compress(b"AaAA", compression_type),
self._compress(b"bBbb", compression_type)]})
self.assertAllEqual([b"AaAA", b"bBbb"], result)
@test_util.run_deprecated_v1
def testDecompressWithRaw(self):
for compression_type in ["ZLIB", "GZIP", ""]:
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decompressed = parsing_ops.decode_compressed(
in_bytes, compression_type=compression_type)
decode = parsing_ops.decode_raw(decompressed, out_type=dtypes.int16)
result = decode.eval(
feed_dict={in_bytes: [self._compress(b"AaBC", compression_type)]})
self.assertAllEqual(
[[ord("A") + ord("a") * 256, ord("B") + ord("C") * 256]], result)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/decode_compressed_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
TIMEOUT = 1
class StageTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea([dtypes.float32])
stage = stager.put([v])
y = stager.get()
y = math_ops.reduce_max(math_ops.matmul(y, y))
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(4 * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
@test_util.run_deprecated_v1
def testMultiple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea([dtypes.float32, dtypes.float32])
stage = stager.put([x, v])
z, y = stager.get()
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
@test_util.run_deprecated_v1
def testDictionary(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put({'x': x, 'v': v})
ret = stager.get()
z = ret['x']
y = ret['v']
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testColocation(self):
gpu_dev = test.gpu_device_name()
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(gpu_dev):
stager = data_flow_ops.StagingArea([dtypes.float32])
y = stager.put([v])
expected_name = gpu_dev if 'gpu' not in gpu_dev else '/device:GPU:0'
self.assertEqual(y.device, expected_name)
with ops.device('/cpu:0'):
x = stager.get()[0]
self.assertEqual(x.device, '/device:CPU:0')
G.finalize()
@test_util.run_deprecated_v1
def testPeek(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
p = array_ops.placeholder(dtypes.int32, name='p')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[
dtypes.int32,
], shapes=[[]])
stage = stager.put([x])
peek = stager.peek(p)
ret = stager.get()
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
for i in range(10):
sess.run(stage, feed_dict={x: i})
for i in range(10):
self.assertTrue(sess.run(peek, feed_dict={p: i}) == [i])
@test_util.run_deprecated_v1
def testSizeAndClear(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32, name='x')
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put({'x': x, 'v': v})
ret = stager.get()
size = stager.size()
clear = stager.clear()
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
self.assertEqual(sess.run(size), 1)
sess.run(stage, feed_dict={x: -1})
self.assertEqual(sess.run(size), 2)
sess.run(clear)
self.assertEqual(sess.run(size), 0)
@test_util.run_deprecated_v1
def testCapacity(self):
self.skipTest('b/123423516 this test is flaky on gpu.')
capacity = 3
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[
dtypes.int32,
], capacity=capacity, shapes=[[]])
stage = stager.put([x])
ret = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
queue = Queue.Queue()
n = 8
with self.session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
self.assertTrue(sess.run(ret) == [i])
# It should now be empty
self.assertTrue(sess.run(size) == 0)
@test_util.run_deprecated_v1
def testMemoryLimit(self):
memory_limit = 512 * 1024 # 512K
chunk = 200 * 1024 # 256K
capacity = memory_limit // chunk
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.uint8, name='x')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[
dtypes.uint8,
], memory_limit=memory_limit, shapes=[[]])
stage = stager.put([x])
ret = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
import numpy as np
queue = Queue.Queue()
n = 8
with self.session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: np.full(chunk, i, dtype=np.uint8)})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
self.assertTrue(np.all(sess.run(ret)[0] == i))
self.assertTrue(sess.run(size) == 0)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/stage_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StringToNumber op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
_ERROR_MESSAGE = "StringToNumberOp could not correctly convert string: "
class StringToNumberOpTest(test.TestCase):
def _test(self, tf_type, good_pairs, bad_pairs):
with self.cached_session():
# Build a small testing graph.
input_string = array_ops.placeholder(dtypes.string)
output = parsing_ops.string_to_number(
input_string, out_type=tf_type)
# Check all the good input/output pairs.
for instr, outnum in good_pairs:
result, = output.eval(feed_dict={input_string: [instr]})
self.assertAllClose([outnum], [result])
# Check that the bad inputs produce the right errors.
for instr, outstr in bad_pairs:
with self.assertRaisesOpError(outstr):
output.eval(feed_dict={input_string: [instr]})
@test_util.run_deprecated_v1
def testToFloat(self):
self._test(dtypes.float32,
[("0", 0), ("3", 3), ("-1", -1),
("1.12", 1.12), ("0xF", 15), (" -10.5", -10.5),
("3.40282e+38", 3.40282e+38),
# Greater than max value of float.
("3.40283e+38", float("INF")),
("-3.40283e+38", float("-INF")),
# Less than min value of float.
("NAN", float("NAN")),
("INF", float("INF"))],
[("10foobar", _ERROR_MESSAGE + "10foobar")])
@test_util.run_deprecated_v1
def testToDouble(self):
self._test(dtypes.float64,
[("0", 0), ("3", 3), ("-1", -1),
("1.12", 1.12), ("0xF", 15), (" -10.5", -10.5),
("3.40282e+38", 3.40282e+38),
# Greater than max value of float.
("3.40283e+38", 3.40283e+38),
# Less than min value of float.
("-3.40283e+38", -3.40283e+38),
("NAN", float("NAN")),
("INF", float("INF"))],
[("10foobar", _ERROR_MESSAGE + "10foobar")])
@test_util.run_deprecated_v1
def testToInt32(self):
self._test(dtypes.int32,
[("0", 0), ("3", 3), ("-1", -1),
(" -10", -10),
("-2147483648", -2147483648),
("2147483647", 2147483647)],
[ # Less than min value of int32.
("-2147483649", _ERROR_MESSAGE + "-2147483649"),
# Greater than max value of int32.
("2147483648", _ERROR_MESSAGE + "2147483648"),
("2.9", _ERROR_MESSAGE + "2.9"),
("10foobar", _ERROR_MESSAGE + "10foobar")])
@test_util.run_deprecated_v1
def testToInt64(self):
self._test(dtypes.int64,
[("0", 0), ("3", 3), ("-1", -1),
(" -10", -10),
("-2147483648", -2147483648),
("2147483647", 2147483647),
("-2147483649", -2147483649), # Less than min value of int32.
("2147483648", 2147483648)], # Greater than max value of int32.
[("2.9", _ERROR_MESSAGE + "2.9"),
("10foobar", _ERROR_MESSAGE + "10foobar")])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/string_to_number_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_join_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringJoinOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testStringJoin(self):
input0 = ["a", "b"]
input1 = "a"
input2 = [["b"], ["c"]]
with self.cached_session():
output = string_ops.string_join([input0, input1])
self.assertAllEqual(output.eval(), [b"aa", b"ba"])
output = string_ops.string_join([input0, input1], separator="--")
self.assertAllEqual(output.eval(), [b"a--a", b"b--a"])
output = string_ops.string_join([input0, input1, input0], separator="--")
self.assertAllEqual(output.eval(), [b"a--a--a", b"b--a--b"])
output = string_ops.string_join([input1] * 4, separator="!")
self.assertEqual(output.eval(), b"a!a!a!a")
output = string_ops.string_join([input2] * 2, separator="")
self.assertAllEqual(output.eval(), [[b"bb"], [b"cc"]])
with self.assertRaises(ValueError): # Inconsistent shapes
string_ops.string_join([input0, input2]).eval()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/string_join_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
indx = i[:-1] + (indx,)
ref[indx] = updates[i]
_TF_OPS_TO_NUMPY = {
state_ops.batch_scatter_update: _NumpyUpdate,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False,
method=False):
np.random.seed(8)
with self.cached_session(use_gpu=False):
for indices_shape in (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
sparse_dim = len(indices_shape) - 1
indices = np.random.randint(
indices_shape[sparse_dim], size=indices_shape, dtype=itype)
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
old = _AsType(np.random.randn(*(indices_shape + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = variables.Variable(old)
ref.initializer.run()
if method:
ref.batch_scatter_update(ops.IndexedSlices(indices, updates))
else:
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
@test_util.run_deprecated_v1
def testVariableRankUpdate(self):
vtypes = [np.float32, np.float64]
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(
state_ops.batch_scatter_update, vtype, itype)
@test_util.run_deprecated_v1
def testBooleanScatterUpdate(self):
with self.session(use_gpu=False) as session:
var = variables.Variable([True, False])
update0 = state_ops.batch_scatter_update(var, [1], [True])
update1 = state_ops.batch_scatter_update(
var, constant_op.constant(
[0], dtype=dtypes.int64), [False])
var.initializer.run()
session.run([update0, update1])
self.assertAllEqual([False, True], self.evaluate(var))
@test_util.run_deprecated_v1
def testScatterOutOfRange(self):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.session(use_gpu=False):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
state_ops.batch_scatter_update(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = \[-1\] does not index into shape \[6\]'):
state_ops.batch_scatter_update(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = \[6\] does not index into '
r'shape \[6\]'):
state_ops.batch_scatter_update(ref, indices, updates).eval()
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/batch_scatter_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class Conv1DTest(test.TestCase):
def testBasic(self):
"""Test that argument passing to conv1d is handled properly."""
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant([1, 2, 3, 4], dtype=dtype)
x = array_ops.expand_dims(x, 0) # Add batch dimension
x = array_ops.expand_dims(x, 2) # And depth dimension
filters = constant_op.constant([2, 1], dtype=dtype)
filters = array_ops.expand_dims(filters, 1) # in_channels
filters = array_ops.expand_dims(filters, 2) # out_channels
# Filters is 2x1x1
for stride in [1, 2]:
with self.cached_session(use_gpu=test.is_gpu_available()):
c = nn_ops.conv1d(x, filters, stride, padding="VALID")
reduced = array_ops.squeeze(c)
output = self.evaluate(reduced)
if stride == 1:
self.assertEqual(len(output), 3)
self.assertAllClose(output,
[2 * 1 + 1 * 2, 2 * 2 + 1 * 3, 2 * 3 + 1 * 4])
else:
self.assertEqual(len(output), 2)
self.assertAllClose(output, [2 * 1 + 1 * 2, 2 * 3 + 1 * 4])
def testConv1DTranspose(self):
with self.cached_session():
stride = 2
# Input, output: [batch, width, depth]
x_shape = [2, 4, 3]
y_shape = [2, 9, 2]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=stride, padding="VALID")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[1]):
for w in xrange(pad, y_shape[1] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
w_in = w % stride == 0 and w > pad and w < y_shape[1] - 1 - pad
if w_in:
target += 3.0
cache_values[n, w, k] = target
# copy values in the border
cache_values[n, 0, k] = cache_values[n, 1, k]
cache_values[n, -1, k] = cache_values[n, -2, k]
self.assertAllClose(cache_values, value)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/conv1d_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Lu."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class LuOpTest(test.TestCase):
@property
def float_types(self):
return set((np.float64, np.float32, np.complex64, np.complex128))
def _verifyLuBase(self, x, lower, upper, perm, verification,
output_idx_type):
lower_np, upper_np, perm_np, verification_np = self.evaluate(
[lower, upper, perm, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, lower)
self.assertShapeEqual(x, upper)
self.assertAllEqual(x.shape[:-1], perm.shape.as_list())
# Check dtypes are as expected.
self.assertEqual(x.dtype, lower_np.dtype)
self.assertEqual(x.dtype, upper_np.dtype)
self.assertEqual(output_idx_type.as_numpy_dtype, perm_np.dtype)
# Check that the permutation is valid.
if perm_np.shape[-1] > 0:
perm_reshaped = np.reshape(perm_np, (-1, perm_np.shape[-1]))
for perm_vector in perm_reshaped:
self.assertAllClose(np.arange(len(perm_vector)), np.sort(perm_vector))
def _verifyLu(self, x, output_idx_type=dtypes.int64):
# Verify that Px = LU.
lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)
# Prepare the lower factor of shape num_rows x num_rows
lu_shape = np.array(lu.shape.as_list())
batch_shape = lu_shape[:-2]
num_rows = lu_shape[-2]
num_cols = lu_shape[-1]
lower = array_ops.matrix_band_part(lu, -1, 0)
if num_rows > num_cols:
eye = linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=lower.dtype)
lower = array_ops.concat([lower, eye[..., num_cols:]], axis=-1)
elif num_rows < num_cols:
lower = lower[..., :num_rows]
# Fill the diagonal with ones.
ones_diag = array_ops.ones(
np.append(batch_shape, num_rows), dtype=lower.dtype)
lower = array_ops.matrix_set_diag(lower, ones_diag)
# Prepare the upper factor.
upper = array_ops.matrix_band_part(lu, 0, -1)
verification = math_ops.matmul(lower, upper)
# Permute the rows of product of the Cholesky factors.
if num_rows > 0:
# Reshape the product of the triangular factors and permutation indices
# to a single batch dimension. This makes it easy to apply
# invert_permutation and gather_nd ops.
perm_reshaped = array_ops.reshape(perm, [-1, num_rows])
verification_reshaped = array_ops.reshape(verification,
[-1, num_rows, num_cols])
# Invert the permutation in each batch.
inv_perm_reshaped = map_fn.map_fn(array_ops.invert_permutation,
perm_reshaped)
batch_size = perm_reshaped.shape.as_list()[0]
# Prepare the batch indices with the same shape as the permutation.
# The corresponding batch index is paired with each of the `num_rows`
# permutation indices.
batch_indices = math_ops.cast(
array_ops.broadcast_to(
math_ops.range(batch_size)[:, None], perm_reshaped.shape),
dtype=output_idx_type)
permuted_verification_reshaped = array_ops.gather_nd(
verification_reshaped,
array_ops.stack([batch_indices, inv_perm_reshaped], axis=-1))
# Reshape the verification matrix back to the original shape.
verification = array_ops.reshape(permuted_verification_reshaped,
lu_shape)
self._verifyLuBase(x, lower, upper, perm, verification,
output_idx_type)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [10., 0., 5.]])
for dtype in (np.float32, np.float64):
for output_idx_type in (dtypes.int32, dtypes.int64):
self._verifyLu(data.astype(dtype), output_idx_type=output_idx_type)
for dtype in (np.complex64, np.complex128):
for output_idx_type in (dtypes.int32, dtypes.int64):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyLu(complex_data, output_idx_type=output_idx_type)
def testPivoting(self):
# This matrix triggers partial pivoting because the first diagonal entry
# is small.
data = np.array([[1e-9, 1., 0.], [1., 0., 0], [0., 1., 5]])
self._verifyLu(data.astype(np.float32))
for dtype in (np.float32, np.float64):
self._verifyLu(data.astype(dtype))
_, p = linalg_ops.lu(data)
p_val = self.evaluate([p])
# Make sure p_val is not the identity permutation.
self.assertNotAllClose(np.arange(3), p_val)
for dtype in (np.complex64, np.complex128):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyLu(complex_data)
_, p = linalg_ops.lu(data)
p_val = self.evaluate([p])
# Make sure p_val is not the identity permutation.
self.assertNotAllClose(np.arange(3), p_val)
def testInvalidMatrix(self):
# LU factorization gives an error when the input is singular.
# Note: A singular matrix may return without error but it won't be a valid
# factorization.
for dtype in self.float_types:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
linalg_ops.lu(
np.array([[1., 2., 3.], [2., 4., 6.], [2., 3., 4.]],
dtype=dtype)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
linalg_ops.lu(
np.array([[[1., 2., 3.], [2., 4., 6.], [1., 2., 3.]],
[[1., 2., 3.], [3., 4., 5.], [5., 6., 7.]]],
dtype=dtype)))
def testBatch(self):
simple_array = np.array([[[1., -1.], [2., 5.]]]) # shape (1, 2, 2)
self._verifyLu(simple_array)
self._verifyLu(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyLu(np.vstack((odd_sized_array, odd_sized_array)))
batch_size = 200
# Generate random matrices.
np.random.seed(42)
matrices = np.random.rand(batch_size, 5, 5)
self._verifyLu(matrices)
# Generate random complex valued matrices.
np.random.seed(52)
matrices = np.random.rand(batch_size, 5,
5) + 1j * np.random.rand(batch_size, 5, 5)
self._verifyLu(matrices)
def testLargeMatrix(self):
# Generate random matrices.
n = 500
np.random.seed(64)
data = np.random.rand(n, n)
self._verifyLu(data)
# Generate random complex valued matrices.
np.random.seed(129)
data = np.random.rand(n, n) + 1j * np.random.rand(n, n)
self._verifyLu(data)
@test_util.run_v1_only("b/120545219")
def testEmpty(self):
self._verifyLu(np.empty([0, 2, 2]))
self._verifyLu(np.empty([2, 0, 0]))
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
lu1, p1 = linalg_ops.lu(matrix1)
lu2, p2 = linalg_ops.lu(matrix2)
lu1_val, p1_val, lu2_val, p2_val = self.evaluate([lu1, p1, lu2, p2])
self.assertAllEqual(lu1_val, lu2_val)
self.assertAllEqual(p1_val, p2_val)
class LuBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(4096, 4096),
(513, 2, 2),
(513, 8, 8),
(513, 256, 256),
(4, 513, 2, 2),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (2.0 * n) + np.diag(
np.ones(n).astype(np.float32))
return np.tile(matrix, batch_shape + (1, 1))
def benchmarkLuOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
lu, p = linalg_ops.lu(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(lu, p),
min_iters=25,
name="lu_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
lu, p = linalg_ops.lu(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(lu, p),
min_iters=25,
name="lu_gpu_{shape}".format(shape=shape))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/lu_op_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.Einsum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class EinsumOpTest(test.TestCase):
def _check(self, s, *input_shapes, **kwargs):
dtype = kwargs.pop('dtype', np.float32)
r = np.random.RandomState(0)
inputs = []
for shape in input_shapes:
arr = np.array(r.randn(*shape)).astype(dtype)
if dtype == np.complex64 or dtype == np.complex128:
arr += 1j * np.array(r.randn(*shape)).astype(dtype)
inputs.append(arr)
input_tensors = [constant_op.constant(x, shape=x.shape) for x in inputs]
a = np.einsum(s, *inputs)
b = self.evaluate(gen_linalg_ops.einsum(input_tensors, s))
self.assertAllClose(a, b, atol=1e-4, rtol=1e-4)
def testUnary(self):
self._check('->', ())
self._check('aa->', (3, 3))
self._check('aa->a', (3, 3))
self._check('aaa->', (3, 3, 3))
self._check('aaa->a', (3, 3, 3))
self._check('aab->a', (3, 3, 4))
self._check('ab->', (3, 3))
self._check('ab->ab', (3, 3))
self._check('abc->b', (3, 4, 5))
self._check('abc->ca', (3, 4, 5))
self._check('abc->cab', (3, 4, 5))
self._check('aabcc->a', (3, 3, 5, 4, 4))
self._check('aabcc->ac', (3, 3, 5, 4, 4))
self._check('aabcd->ad', (3, 3, 5, 4, 4))
def testUnaryEllipsis(self):
self._check('...->...', ())
self._check('...->', ())
self._check('->...', ())
# Tests from dask
self._check('a...a->a...', (2, 2))
self._check('a...a->', (2, 2))
self._check('a...a->...', (2, 5, 1, 2))
self._check('a...a->a...', (2, 1, 2))
self._check('a...a->a...', (2, 3, 4, 5, 2))
self._check('...ijk->...ki', (3, 4, 5))
self._check('...ijk->...ki', (1, 3, 4, 5))
self._check('...ijk->...ki', (2, 2, 3, 4, 5))
# Repeated indices.
self._check('i...ii->...i', (3, 2, 3, 3))
def testBinary(self):
self._check(',->', (), ())
self._check('a,a->', (3,), (3,))
self._check('a,a->a', (3,), (3,))
self._check('ba,b->', (3, 2), (3,))
self._check('ab,b->a', (3, 4), (4,))
self._check('ab,ab->', (3, 4), (3, 4))
self._check('nij,jk->nik', (5, 2, 3), (3, 4))
self._check('abc,bad->abcd', (1, 2, 3), (2, 1, 4))
# Repeated indices.
self._check('ijj,k->ik', (2, 3, 3), (4,))
self._check('aba,a->b', (3, 4, 3), (3,))
# From https://github.com/dask/dask/pull/3412#discussion_r182413444
self._check('aab,bc->ac', (2, 2, 3), (3, 4))
self._check('aab,bcc->ac', (2, 2, 3), (3, 4, 4))
# Based on https://github.com/google/jax/issues/37#issuecomment-448572187
self._check('sa,shb->shab', (2, 1), (2, 3, 4))
def testBroadcasting(self):
# Batch matmul without broadcasting.
self._check('...ij,...jk->...ik', (5, 1, 2, 3), (5, 1, 3, 4))
# Batch matmul with broadcasting.
self._check('...ij,...jk->...ik', (1, 2, 3), (3, 5))
self._check('...ij,...jk->...ik', (2, 3), (1, 3, 5))
self._check('...ij,...jk->...ik', (5, 2, 3), (3, 5))
self._check('...ij,...jk->...ik', (2, 3), (5, 3, 5))
self._check('...ij,...jk->...ik', (3, 1, 2, 3), (1, 1, 7, 3, 5))
self._check('i...j,j...k->...ik', (2, 1, 3, 1, 3), (3, 1, 7, 5))
# Broadcasting with repeated indices.
self._check('ij,jk...k->i...', (3, 2), (2, 4, 1, 4))
self._check('ij,jk...k->...i', (3, 2), (2, 4, 5, 4))
self._check('ijj,jk...k->i...', (3, 2, 2), (2, 4, 1, 4))
self._check('i...jj,jk...k->i...', (3, 3, 1, 2, 2), (2, 4, 1, 5, 4))
# Following 2 from # https://stackoverflow.com/a/19203475/1611416
self._check('...abc,...abcd->...d', (1, 1, 2, 3, 4), (5, 2, 3, 4, 6))
self._check('ab...,b->ab...', (2, 3, 1, 1, 5), (3,))
def testDtypes(self):
for dtype in [np.float64, np.float32, np.complex64, np.complex128]:
self._check('ij,jk->ik', (2, 2), (2, 2), dtype=dtype)
self._check('ji,jk->ik', (2, 2), (2, 2), dtype=dtype)
self._check('ji,kj->ik', (2, 2), (2, 2), dtype=dtype)
self._check('ij,jk->ki', (2, 2), (2, 2), dtype=dtype)
self._check('ji,kj->ki', (2, 2), (2, 2), dtype=dtype)
@test_util.run_in_graph_and_eager_modes
def testInvalid(self):
r = np.random.RandomState(0)
cases = [
# incorrect rank.
('ij,jk->ik', r.randn(1, 2, 3), r.randn(3, 4)),
('...ij,jk->ik', r.randn(3), r.randn(3, 4)),
# inconsistent dimensions.
('ij,jk->ik', r.randn(2, 3), r.randn(4, 4)),
# broadcasting is invalid
('...ij,...jk->...ik', r.randn(5, 2, 3), r.randn(7, 3, 4)),
# output should have ellipsis when broadcasting shape is
# non-empty.
('...ij,...jk->ik', r.randn(2, 2, 3), r.randn(3, 4)),
]
for args in cases:
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
_ = self.evaluate(gen_linalg_ops.einsum(args[1:], args[0]))
placeholders = [
array_ops.placeholder_with_default(x, shape=None) for x in args[1:]
]
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
_ = self.evaluate(gen_linalg_ops.einsum(placeholders, args[0]))
@test_util.run_in_graph_and_eager_modes
def testPlaceholder(self):
def check(equation, *input_and_placeholder_shapes):
r = np.random.RandomState(0)
inputs = []
input_placeholders = []
for actual_shape, placeholder_shape in input_and_placeholder_shapes:
input_np = np.array(r.randn(*actual_shape))
inputs.append(input_np)
input_placeholders.append(
array_ops.placeholder_with_default(input_np, placeholder_shape))
a = np.einsum(equation, *inputs)
b = self.evaluate(gen_linalg_ops.einsum(input_placeholders, equation))
self.assertAllClose(a, b, atol=1e-4, rtol=1e-4)
check('bijl,bjkm->bik', ((9, 2, 3, 5), (None, None, None, 5)),
((9, 3, 4, 7), (None, None, 4, None)))
check('bijl,bjkm->bik', ((9, 2, 3, 5), None), ((9, 3, 4, 7), None))
check('...ij,...->...i', ((4, 3, 1, 2), (None, 3, None, 2)),
((4, 3), (None, 3)))
check('...ij,...jk->...ik', ((3, 1, 2, 3), None), ((1, 7, 3, 4), None))
def testOutputRepeatedLabels(self):
# This is the reverse operation of repeated input labels, to be used for
# computing symbolic gradients of einsum.
r = np.random.RandomState(0)
a = r.randn(2, 2)
s = 'a->aa'
diag_a = np.diag(np.diag(a))
b = self.evaluate(gen_linalg_ops.einsum([np.diag(a)], s))
self.assertAllClose(diag_a, b, atol=1e-4, rtol=1e-4)
class EinsumBenchmark(test.Benchmark):
cases = [
# Unary cases.
['ijk->i', 100],
['ijk->kji', 100],
# Regular matmul or batch matmul.
['ij,jk->ik', 1000],
['ji,kj->ik', 1000],
['ab,ab->', 100],
['ab,ba->', 100],
['abc,abc->', 100],
['abc,bac->', 100],
['abc,cba->', 100],
['bij,bjk->bik', 100],
['bji,bjk->bki', 100],
['ikl,kji->kl', 100],
['klj,lki->ij', 100],
['ijk,ilj->kli', 100],
['kij,mkb->ijmb', 100],
['abcd,ad->bc', 40],
# Larger binary contractions.
['ijk,jklm->il', 40],
['efabc,eabcd->efd', 30],
['fabec,abcde->fde', 30],
['efabc,edabc->efd', 30],
['eadbf,dfebc->ecfad', 30],
['abcdef,bcdfg->abcdeg', 30],
]
def benchmarkEinsum(self):
for equation, dim in self.cases:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device('/cpu:0'):
r = np.random.RandomState(0)
input_subscripts = equation.split('->')[0].split(',')
input_vars = []
for subscript in input_subscripts:
input_shape = (dim,) * len(subscript)
input_vars.append(
variables.Variable(np.array(r.randn(*input_shape), np.float32)))
variables.global_variables_initializer().run()
# Call einsum_v1.
self.run_op_benchmark(
sess,
special_math_ops.einsum(equation, *input_vars),
min_iters=50,
name='einsum_v1_cpu_({})_{}'.format(equation, dim))
# Call gen_linalg_ops.einsum.
self.run_op_benchmark(
sess,
gen_linalg_ops.einsum(input_vars, equation),
min_iters=50,
name='einsum_v2_cpu_({})_{}'.format(equation, dim))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/einsum_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyAdd(ref, indices, updates):
# Since numpy advanced assignment does not support repeated indices,
# we run a simple loop to perform scatter_add.
for i, indx in np.ndenumerate(indices):
ref[indx] += updates[i]
def _NumpyAddScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] += update
def _NumpySub(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] -= updates[i]
def _NumpySubScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] -= update
def _NumpyMul(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] *= updates[i]
def _NumpyMulScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] *= update
def _NumpyDiv(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] /= updates[i]
def _NumpyDivScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] /= update
def _NumpyMin(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], updates[i])
def _NumpyMinScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], update)
def _NumpyMax(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], updates[i])
def _NumpyMaxScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], update)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = updates[i]
def _NumpyUpdateScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = update
_TF_OPS_TO_NUMPY = {
state_ops.scatter_update: _NumpyUpdate,
state_ops.scatter_add: _NumpyAdd,
state_ops.scatter_sub: _NumpySub,
state_ops.scatter_mul: _NumpyMul,
state_ops.scatter_div: _NumpyDiv,
state_ops.scatter_min: _NumpyMin,
state_ops.scatter_max: _NumpyMax,
}
_TF_OPS_TO_NUMPY_SCALAR = {
state_ops.scatter_update: _NumpyUpdateScalar,
state_ops.scatter_add: _NumpyAddScalar,
state_ops.scatter_sub: _NumpySubScalar,
state_ops.scatter_mul: _NumpyMulScalar,
state_ops.scatter_div: _NumpyDivScalar,
state_ops.scatter_min: _NumpyMinScalar,
state_ops.scatter_max: _NumpyMaxScalar,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False):
np.random.seed(8)
with self.cached_session(use_gpu=True):
for indices_shape in (), (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
size = np.prod(indices_shape, dtype=itype)
first_dim = 3 * size
indices = np.arange(first_dim)
np.random.shuffle(indices)
indices = indices[:size]
if size > 1 and repeat_indices:
# Add some random repeats.
indices = indices[:size // 2]
for _ in range(size - size // 2):
# Randomly append some repeats.
indices = np.append(indices,
indices[np.random.randint(size // 2)])
np.random.shuffle(indices)
indices = indices.reshape(indices_shape)
if updates_are_scalar:
updates = _AsType(np.random.randn(), vtype)
else:
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
# Clips small values to avoid division by zero.
def clip_small_values(x):
threshold = 1e-4
sign = np.sign(x)
if isinstance(x, np.int32):
threshold = 1
sign = np.random.choice([-1, 1])
return threshold * sign if np.abs(x) < threshold else x
updates = np.vectorize(clip_small_values)(updates)
old = _AsType(np.random.randn(*((first_dim,) + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
if updates_are_scalar:
np_scatter = _TF_OPS_TO_NUMPY_SCALAR[tf_scatter]
else:
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = variables.Variable(old)
self.evaluate(ref.initializer)
self.evaluate(tf_scatter(ref, indices, updates))
self.assertAllClose(self.evaluate(ref), new)
def _VariableRankTests(self,
tf_scatter,
repeat_indices=False,
updates_are_scalar=False):
vtypes = [np.float32, np.float64]
if tf_scatter != state_ops.scatter_div:
vtypes.append(np.int32)
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(tf_scatter, vtype, itype, repeat_indices,
updates_are_scalar)
def testVariableRankUpdate(self):
self._VariableRankTests(state_ops.scatter_update, False)
def testVariableRankAdd(self):
self._VariableRankTests(state_ops.scatter_add, False)
def testVariableRankSub(self):
self._VariableRankTests(state_ops.scatter_sub, False)
def testVariableRankMul(self):
self._VariableRankTests(state_ops.scatter_mul, False)
def testVariableRankDiv(self):
self._VariableRankTests(state_ops.scatter_div, False)
def testVariableRankMin(self):
self._VariableRankTests(state_ops.scatter_min, False)
def testVariableRankMax(self):
self._VariableRankTests(state_ops.scatter_max, False)
def testRepeatIndicesAdd(self):
self._VariableRankTests(state_ops.scatter_add, True)
def testRepeatIndicesSub(self):
self._VariableRankTests(state_ops.scatter_sub, True)
def testRepeatIndicesMul(self):
self._VariableRankTests(state_ops.scatter_mul, True)
def testRepeatIndicesDiv(self):
self._VariableRankTests(state_ops.scatter_div, True)
def testRepeatIndicesMin(self):
self._VariableRankTests(state_ops.scatter_min, True)
def testRepeatIndicesMax(self):
self._VariableRankTests(state_ops.scatter_max, True)
def testVariableRankUpdateScalar(self):
self._VariableRankTests(state_ops.scatter_update, False, True)
def testVariableRankAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, False, True)
def testVariableRankSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, False, True)
def testVariableRankMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, False, True)
def testVariableRankDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, False, True)
def testVariableRankMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, False, True)
def testVariableRankMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, False, True)
def testRepeatIndicesAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, True, True)
def testRepeatIndicesSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, True, True)
def testRepeatIndicesMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, True, True)
def testRepeatIndicesDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, True, True)
def testRepeatIndicesMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, True, True)
def testRepeatIndicesMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, True, True)
def testBooleanScatterUpdate(self):
if not test.is_gpu_available():
with self.session(use_gpu=False):
var = variables.Variable([True, False])
update0 = state_ops.scatter_update(var, 1, True)
update1 = state_ops.scatter_update(
var, constant_op.constant(
0, dtype=dtypes.int64), False)
self.evaluate(var.initializer)
self.evaluate([update0, update1])
self.assertAllEqual([False, True], self.evaluate(var))
def testScatterOutOfRangeCpu(self):
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
if not test.is_gpu_available():
with self.session(use_gpu=False):
ref = variables.Variable(params)
self.evaluate(ref.initializer)
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
self.evaluate(op(ref, indices, updates))
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = -1 is not in \[0, 6\)'):
self.evaluate(op(ref, indices, updates))
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = 6 is not in \[0, 6\)'):
self.evaluate(op(ref, indices, updates))
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if test.is_gpu_available():
return
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with test_util.force_gpu():
ref = variables.Variable(params)
self.evaluate(ref.initializer)
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
self.evaluate(op(ref, indices, updates))
# Indicies out of range should not fail.
indices = np.array([-1, 0, 5])
self.evaluate(op(ref, indices, updates))
indices = np.array([2, 0, 6])
self.evaluate(op(ref, indices, updates))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/scatter_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for new version of accumulate_n op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.control_flow_ops import while_loop as while_loop_v1
from tensorflow.python.platform import googletest
class AccumulateNV2Test(test_util.TensorFlowTestCase):
"""Tests of the new, differentiable version of accumulate_n."""
@test_util.run_deprecated_v1
def testFloat(self):
np.random.seed(12345)
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
tf_x = ops.convert_n_to_tensor(x)
with self.session(use_gpu=True):
self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllClose(x[0] * 5,
math_ops.accumulate_n([tf_x[0]] * 5).eval())
@test_util.run_deprecated_v1
def testInt(self):
np.random.seed(54321)
x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
tf_x = ops.convert_n_to_tensor(x)
with self.session(use_gpu=True):
self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllEqual(x[0] * 6,
math_ops.accumulate_n([tf_x[0]] * 6).eval())
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.session(use_gpu=True):
x0 = array_ops.placeholder(dtype=dtypes_lib.int32, shape=[None])
acc = math_ops.accumulate_n([x0, x0], shape=[None])
self.assertAllEqual([2, 4], acc.eval(feed_dict={x0: [1, 2]}))
@test_util.run_deprecated_v1
def testGrad(self):
np.random.seed(42)
for num_inputs in range(1, 10):
with self.cached_session(use_gpu=True) as sess:
input_vars = [
variables.Variable(10.0 * np.random.random())
for _ in range(0, num_inputs)
]
accum_n = math_ops.accumulate_n(input_vars)
self.evaluate(variables.global_variables_initializer())
accum_n_grad = gradients.gradients(accum_n, input_vars)
self.assertAllEqual(
np.repeat(1.0, num_inputs), # d/dx (x + y + ...) = 1
[g.eval() for g in accum_n_grad])
# The tests below used to be in a separate class under cwise_ops_test.py,
# which did not run in the default test target.
# Putting them here so that everything that exercises AccumulateNV2 is in
# one place and the default build runs all unit tests.
def testSimple(self):
with self.cached_session():
random_arrays = [
np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
]
random_tensors = [
ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
for x in random_arrays
]
tf_val = math_ops.accumulate_n(random_tensors)
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, self.evaluate(tf_val))
# Test that AccumulateNV2 rewrite correctly add edges necessary to propagate
# while loop execution frame to all nodes.
def testAccumulateInsideWhileLoop(self):
with self.cached_session():
random_arrays = [
np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
]
random_tensors = [
ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
for x in random_arrays
]
def cond_fn(i, acc, tensors):
del acc, tensors # unused
return i < 1 # do just one iteration
def body_fn(i, acc, tensors):
return i + 1, acc + math_ops.accumulate_n(tensors), tensors
zeros = np.zeros((16, 16, 16, 16)).astype(np.float32)
_, tf_val, _ = while_loop_v1(cond_fn, body_fn, (0, zeros, random_tensors))
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, self.evaluate(tf_val))
def testZeroArgs(self):
with self.cached_session():
with self.assertRaises(ValueError):
tf_val = math_ops.accumulate_n([])
self.evaluate(tf_val)
def testWrongShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
a = variables.Variable(0.2)
b = variables.Variable(0.1)
math_ops.accumulate_n([a, b], shape=[2, 2]) # Should be shape=[]
def testIncompatibleShapes(self):
with self.cached_session():
with self.assertRaises(ValueError):
a = variables.Variable(np.array([0.1, 0.2]))
b = variables.Variable(np.array([[0.3], [0.4]]))
math_ops.accumulate_n([a, b])
def testWrongType(self):
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
b = variables.Variable(0.1, dtype=np.float32)
math_ops.accumulate_n([a, b], tensor_dtype=np.int32)
def testWrongTypeOneInput(self):
# Scenario that used to trigger a bug, even when testWrongType() worked
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
math_ops.accumulate_n([a], tensor_dtype=np.int32)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/accumulate_n_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for coefficient-wise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x**y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
_LT = lambda x, y: x < y
_LE = lambda x, y: x <= y
_GT = lambda x, y: x > y
_GE = lambda x, y: x >= y
_AND = lambda x, y: x & y
_OR = lambda x, y: x | y
_XOR = lambda x, y: x ^ y
_INV = lambda x: ~x
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
def _default_tolerance(dtype):
"""Returns a sensible default tolerance for comparing results of a given type.
Args:
dtype: A datatype.
"""
if dtype == np.float16:
return 5e-3
elif dtype in (np.float32, np.complex64):
return 1e-3
elif dtype in (np.float64, np.complex128):
return 1e-5
else:
return None # Fail fast for unexpected types
class ComparisonOpTest(test.TestCase):
def _compareScalar(self, func, x, y, dtype):
with test_util.use_gpu():
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = self.evaluate(out)
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)
self.assertEqual(
self._compareScalar(math_ops.less_equal, x, y, t), x <= y)
self.assertEqual(
self._compareScalar(math_ops.greater, x, y, t), x > y)
self.assertEqual(
self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(xt, yt, np.less, math_ops.less)
self._compare(xt, yt, np.less_equal, math_ops.less_equal)
self._compare(xt, yt, np.greater, math_ops.greater)
self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
# Complex types do not support ordering but do support equality tests.
for t in [np.complex64, np.complex128]:
xt = x.astype(t)
xt -= 1j * xt
yt = y.astype(t)
yt -= 1j * yt
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
if dtype in (np.complex64, np.complex128):
x -= 1j * x
y -= 1j * y
self._compare(x, y, np_func, tf_func)
self._compare(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func, include_complex=False):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
if include_complex:
dtypes.extend([np.complex64, np.complex128])
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, math_ops.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, math_ops.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, math_ops.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True)
def testBCastNotEqual(self):
self._testBCastByFunc(
np.not_equal, math_ops.not_equal, include_complex=True)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [
math_ops.less, math_ops.less_equal, math_ops.greater,
math_ops.greater_equal, math_ops.equal, math_ops.not_equal
]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x.astype(t), y.astype(t))
class LogicalOpTest(test.TestCase):
def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
np_ans = np_func(x, y)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with test_util.device(use_gpu=use_gpu):
out = math_ops.logical_not(ops.convert_to_tensor(x))
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def testScalar(self):
data = [np.array([True]), np.array([False])]
for use_gpu in [True, False]:
for x in data:
self._not(x, use_gpu)
for x in data:
for y in data:
self._compareBinary(x, y, np.logical_and, math_ops.logical_and,
use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor,
use_gpu)
def testTensor(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for use_gpu in [True, False]:
self._not(x, use_gpu)
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)
def testBCast(self):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
for (xs, ys) in shapes:
x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)
y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)
for use_gpu in [True, False]:
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)
for f in [math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x, y)
@test_util.run_deprecated_v1
def testUsingAsPythonValueFails(self):
# Ensure that we raise an error when the user attempts to treat a
# `Tensor` as a Python `bool`.
b = constant_op.constant(False)
with self.assertRaises(TypeError):
if b:
pass
x = constant_op.constant(3)
y = constant_op.constant(4)
with self.assertRaises(TypeError):
if x > y:
pass
z = constant_op.constant(7)
# The chained comparison should fail because Python computes `x <
# y` and short-circuits the comparison with `z` if it is `False`.
with self.assertRaises(TypeError):
_ = x < y < z
class SelectOpTest(test.TestCase):
def _compare(self, fn, c, x, y, use_gpu):
np_ans = np.where(c, x, y)
with test_util.device(use_gpu=use_gpu):
out = fn(c, x, y)
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self,
fn,
c,
x,
y,
numeric_gradient_type=None,
x_init_value=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = fn(c, inx, iny)
s = list(np.shape(c))
if x_init_value is None:
x_init_value = x
if x.shape != y.shape:
x_init_value = np.broadcast_to(y, x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x_init_value)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = fn(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, outf, s, x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, fn, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = fn(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=x, delta=1.0)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = fn(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, s, outf, s, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testScalar(self, fn):
c = True
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testScalar(self):
self._testScalar(array_ops.where)
self._testScalar(array_ops.where_v2)
def _testScalarBroadcast(self, fn, c, x, y):
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testScalarBroadcast(self):
c = True
# where_v2 only
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 2) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 2) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(3, 2) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
def _testBasic(self, fn):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testBasic(self):
self._testBasic(array_ops.where)
self._testBasic(array_ops.where_v2)
def _testBasicBroadcast(self, fn, c, x, y):
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testBasicBroadcast(self):
c0 = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
c1 = np.random.randint(0, 2, 2).astype(np.bool).reshape(1, 1, 2)
c2 = np.random.randint(0, 2, 3).astype(np.bool).reshape(1, 3, 1)
c3 = np.random.randint(0, 2, 1).astype(np.bool).reshape(1, 1, 1)
for c in [c0, c1, c2, c3]:
# where_v2 only
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 2) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 2) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(3, 2) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
def _testGradients(self, fn):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(fn, c, xt, yt, np.float)
self._compareGradientY(fn, c, xt, yt, np.float)
else:
self._compareGradientX(fn, c, xt, yt)
self._compareGradientY(fn, c, xt, yt)
@test_util.run_deprecated_v1
def testGradients(self):
self._testGradients(array_ops.where)
self._testGradients(array_ops.where_v2)
@test_util.run_deprecated_v1
def testGradientsBroadcast(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for t in [np.float32, np.float64]:
# where_v2 only
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 2) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 2) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(3, 2) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
def _testShapeMismatch(self, fn):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(2, 5, 3) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
fn(c, xt, yt)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
self._testShapeMismatch(array_ops.where)
self._testShapeMismatch(array_ops.where_v2)
def _testEmptyTensor(self, fn):
c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)
x = np.random.rand(1, 3, 0) * 100
y = np.random.rand(1, 3, 0) * 100
z_expected = np.zeros((1, 3, 0), dtype=np.float32)
with self.cached_session():
xt = x.astype(np.float32)
yt = y.astype(np.float32)
z = fn(c, xt, yt).eval()
self.assertAllEqual(z_expected, z)
@test_util.run_deprecated_v1
def testEmptyTensor(self):
self._testEmptyTensor(array_ops.where)
self._testEmptyTensor(array_ops.where_v2)
def _testNan(self, fn):
with self.cached_session():
for c in False, True:
for a in 7.0, np.nan:
for b in 5.0, np.nan:
x = fn(c, a, b).eval()
y = a if c else b
self.assertEqual(np.isnan(x), np.isnan(y))
@test_util.run_deprecated_v1
def testNan(self):
"""Verify that nans don't propagate where they shouldn't."""
self._testNan(array_ops.where)
self._testNan(array_ops.where_v2)
class BatchSelectOpTest(test.TestCase):
"""Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+."""
def _compare(self, c, x, y, use_gpu):
np_ans = np.dstack(
[x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(
[2, 0, 1])
with test_util.device(use_gpu=use_gpu):
out = array_ops.where(c, x, y)
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = array_ops.where(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = array_ops.where(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, outf, s, x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = array_ops.where(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = array_ops.where(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, s, outf, s, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
@test_util.run_deprecated_v1
def testGradients(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(c, xt, yt, np.float)
self._compareGradientY(c, xt, yt, np.float)
else:
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
c = np.random.randint(0, 2, 8).astype(np.bool)
x = np.random.rand(16, 3, 2) * 100
y = np.random.rand(16, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
array_ops.where(c, xt, yt)
class MinMaxOpTest(test.TestCase):
def _compare(self, x, y, use_gpu):
np_min, np_max = np.minimum(x, y), np.maximum(x, y)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
omin, omax = math_ops.minimum(inx, iny), math_ops.maximum(inx, iny)
tf_min, tf_max = self.evaluate([omin, omax])
self.assertAllEqual(np_min, tf_min)
self.assertAllEqual(np_max, tf_max)
def testBasic(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1, 3, 2) * 100.
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testDifferentShapes(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(2) * 100. # should broadcast
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testScalar(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1).item() * 100. # should broadcast
# dropped np.float64, int64 because TF automatically converts to 32 bit
for t in [np.float32, np.int32]:
self._compare(x.astype(t), t(y), use_gpu=False)
self._compare(x.astype(t), t(y), use_gpu=True)
def _compareGradientX(self, func, x, y):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, func, x, y):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=y)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testGradients(self):
x = np.random.rand(1, 3, 2) * 100.
# ensure x != y
y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1
self._compareGradientX(math_ops.maximum, x, y)
self._compareGradientY(math_ops.maximum, x, y)
self._compareGradientX(math_ops.minimum, x, y)
self._compareGradientY(math_ops.minimum, x, y)
class MathOpsOverloadTest(test.TestCase):
def _computeTensorAndLiteral(self, x, y, dtype, func):
with test_util.force_cpu():
inx = ops.convert_to_tensor(x, dtype=dtype)
z = func(inx, y) # Should use __add__, __sub__, etc.
return self.evaluate(z)
def _computeLiteralAndTensor(self, x, y, dtype, func):
with test_util.force_cpu():
iny = ops.convert_to_tensor(y, dtype=dtype)
z = func(x, iny) # Should use __radd__, __rsub__, etc.
return self.evaluate(z)
def _compareBinary(self, x, y, dtype, np_func, tf_func):
np_ans = np_func(x, y).astype(dtype.as_numpy_dtype)
self.assertAllClose(np_ans,
self._computeTensorAndLiteral(x, y, dtype, tf_func))
self.assertAllClose(np_ans,
self._computeLiteralAndTensor(x, y, dtype, tf_func))
def _compareUnary(self, x, dtype, np_func, tf_func):
np_ans = np_func(x).astype(dtype.as_numpy_dtype)
with test_util.force_cpu():
self.assertAllClose(
np_ans, self.evaluate(tf_func(ops.convert_to_tensor(x, dtype=dtype))))
def testOverload(self):
dtypes = [
dtypes_lib.float16,
dtypes_lib.float32,
dtypes_lib.float64,
dtypes_lib.int32,
dtypes_lib.int64,
dtypes_lib.complex64,
dtypes_lib.complex128,
]
funcs = [
(np.add, _ADD),
(np.subtract, _SUB),
(np.multiply, _MUL),
(np.power, _POW),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
if dtype in (dtypes_lib.complex64,
dtypes_lib.complex128) and tf_func == _FLOORDIV:
continue # floordiv makes no sense for complex
self._compareBinary(10, 5, dtype, np_func, tf_func)
# Mod only works for int32 and int64.
for dtype in [dtypes_lib.int32, dtypes_lib.int64]:
self._compareBinary(10, 3, dtype, np.mod, _MOD)
def testOverloadComparisons(self):
dtypes = [
dtypes_lib.float16,
dtypes_lib.float32,
dtypes_lib.float64,
dtypes_lib.int32,
dtypes_lib.int64,
]
funcs = [
(np.less, _LT),
(np.less_equal, _LE),
(np.greater, _GT),
(np.greater_equal, _GE),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
self._compareBinary(10, 5, dtype, np_func, tf_func)
logical_funcs = [(np.logical_and, _AND), (np.logical_or, _OR),
(np.logical_xor, _XOR), (np.equal, math_ops.equal),
(np.not_equal, math_ops.not_equal)]
for np_func, tf_func in logical_funcs:
self._compareBinary(True, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(True, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary([True, True, False, False],
[True, False, True, False], dtypes_lib.bool, np_func,
tf_func)
self._compareUnary(True, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary(False, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary([True, False], dtypes_lib.bool, np.logical_not, _INV)
class IsFiniteInfNanTest(test.TestCase):
def _compare(self, x, use_gpu):
np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
ofinite, oinf, onan = math_ops.is_finite(inx), math_ops.is_inf(
inx), math_ops.is_nan(inx)
tf_finite, tf_inf, tf_nan = self.evaluate([ofinite, oinf, onan])
self.assertAllEqual(np_inf, tf_inf)
self.assertAllEqual(np_nan, tf_nan)
self.assertAllEqual(np_finite, tf_finite)
self.assertShapeEqual(np_inf, oinf)
self.assertShapeEqual(np_nan, onan)
self.assertShapeEqual(np_finite, ofinite)
def _testDtype(self, dtype):
fi = np.finfo(dtype)
data = np.array([
0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max, -np.inf,
np.inf, np.nan
]).astype(dtype)
self._compare(data, use_gpu=False)
self._compare(data, use_gpu=True)
def testHalf(self):
self._testDtype(np.float16)
def testFloat(self):
self._testDtype(np.float32)
def testDouble(self):
self._testDtype(np.float64)
def testSqrt(self):
for dtype in [np.float16, np.float32, np.float64]:
fi = np.finfo(dtype)
for size in [1, 3, 4, 7, 8, 63, 64, 65]:
# For float32 Eigen uses Carmack's fast vectorized sqrt algorithm.
# It is not accurate for very large arguments, so we test for
# fi.max/100 instead of fi.max here.
for value in [fi.min, -2, -1, 0, fi.tiny, 1, 2, 1000, fi.max / 100]:
x = np.full((size,), value, dtype=dtype)
np_y = np.sqrt(x)
np_nan = np.isnan(np_y)
with test_util.use_gpu():
tf_y = math_ops.sqrt(x)
tf_nan = math_ops.is_nan(tf_y)
if value < 0:
self.assertAllEqual(np_nan, self.evaluate(tf_nan))
else:
self.assertAllCloseAccordingToType(np_y, self.evaluate(tf_y))
class RoundingTest(test.TestCase):
def _compare_values(self, x, y=None):
y = np.rint(x) if y is None else np.asarray(y)
tf_rint = math_ops.rint(x)
np_rint = self.evaluate(tf_rint)
self.assertAllEqual(y, np_rint)
self.assertShapeEqual(y, tf_rint)
def _compare(self, x):
np_floor, np_ceil = np.floor(x), np.ceil(x)
inx = ops.convert_to_tensor(x)
ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx)
tf_floor, tf_ceil = self.evaluate([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
self.assertShapeEqual(np_floor, ofloor)
self.assertShapeEqual(np_ceil, oceil)
def _testDtype(self, dtype):
data = (np.arange(-3, 3) / 4.).reshape(1, 3, 2).astype(dtype)
self._compare(data)
# TODO: rint op is not supported for float16
if dtype is np.float16:
return
self._compare_values(data)
x = [0.5, 0.5000001]
y = [0.0, 1.0]
self._compare_values(x, y=y)
# numpy example
x = [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]
y = [-2., -2., -0., 0., 2., 2., 2.]
self._compare_values(x, y=y)
def testTypes(self):
self.skipTest("b/131162241")
for dtype in [np.float16, np.float32, np.float64]:
self._testDtype(dtype)
class ComplexMakeRealImagTest(test.TestCase):
def _compareMake(self, real, imag, use_gpu):
np_ans = real + (1j) * imag
with test_util.device(use_gpu=use_gpu):
real = ops.convert_to_tensor(real)
imag = ops.convert_to_tensor(imag)
tf_ans = math_ops.complex(real, imag)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def testMake(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
for use_gpu in [False, True]:
self._compareMake(real, imag, use_gpu)
self._compareMake(real, 12.0, use_gpu)
self._compareMake(23.0, imag, use_gpu)
def _compareRealImag(self, cplx, use_gpu):
np_real, np_imag = np.real(cplx), np.imag(cplx)
np_zeros = np_real * 0
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_real = math_ops.real(inx)
tf_imag = math_ops.imag(inx)
tf_real_real = math_ops.real(tf_real)
tf_imag_real = math_ops.imag(tf_real)
self.assertAllEqual(np_real, self.evaluate(tf_real))
self.assertAllEqual(np_imag, self.evaluate(tf_imag))
self.assertAllEqual(np_real, self.evaluate(tf_real_real))
self.assertAllEqual(np_zeros, self.evaluate(tf_imag_real))
def testRealImag64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def testRealImag128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def _compareAngle(self, cplx, use_gpu):
np_angle = np.angle(cplx)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_angle = math_ops.angle(inx)
tf_angle_val = self.evaluate(tf_angle)
self.assertAllClose(np_angle, tf_angle_val)
self.assertShapeEqual(np_angle, tf_angle)
def testAngle64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareAngle(cplx, use_gpu=False)
self._compareAngle(cplx, use_gpu=True)
def testAngle(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareAngle(cplx, use_gpu=False)
self._compareAngle(cplx, use_gpu=True)
@test_util.run_deprecated_v1
def testRealReal(self):
for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float32,
dtypes_lib.float64):
x = array_ops.placeholder(dtype)
y = math_ops.real(x)
self.assertEqual(x, y)
def _compareConj(self, cplx, use_gpu):
np_ans = np.conj(cplx)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_conj = math_ops.conj(inx)
tf_ans = self.evaluate(tf_conj)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, tf_conj)
def testConj64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
def testConj128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
@test_util.run_deprecated_v1
def testConjReal(self):
for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float16,
dtypes_lib.float32, dtypes_lib.float64):
x = array_ops.placeholder(dtype)
y = math_ops.conj(x)
self.assertEqual(x, y)
@test_util.run_deprecated_v1
def testConjString(self):
x = array_ops.placeholder(dtypes_lib.string)
with self.assertRaisesRegexp(TypeError,
r"Expected numeric or variant tensor"):
math_ops.conj(x)
def _compareGradient(self, x):
# x[:, 0] is real, x[:, 1] is imag. We combine real and imag into
# complex numbers. Then, we extract real and imag parts and
# computes the squared sum. This is obviously the same as sum(real
# * real) + sum(imag * imag). We just want to make sure the
# gradient function is checked.
with self.cached_session():
inx = ops.convert_to_tensor(x)
real, imag = array_ops.split(value=inx, num_or_size_splits=2, axis=1)
real, imag = array_ops.reshape(real, [-1]), array_ops.reshape(imag, [-1])
cplx = math_ops.complex(real, imag)
cplx = math_ops.conj(cplx)
loss = math_ops.reduce_sum(math_ops.square(
math_ops.real(cplx))) + math_ops.reduce_sum(
math_ops.square(math_ops.imag(cplx)))
epsilon = 1e-3
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def _compareBroadcastGradient(self, x):
x_ = ops.convert_to_tensor(x)
epsilon = 1e-3
with self.cached_session():
for args in [(x_, 0.), (0., x_)]:
z = math_ops.reduce_sum(math_ops.abs(math_ops.complex(*args)))
jacob_t, jacob_n = gradient_checker.compute_gradient(
x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
@test_util.run_deprecated_v1
def testGradient(self):
# complex64
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32)
self._compareGradient(data)
self._compareBroadcastGradient(data)
# complex128
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float64)
self._compareGradient(data)
def _compareMulGradient(self, data):
# data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],
# data[:, 2], data[:, 3] are real parts of x, imaginary parts of
# x, real parts of y and imaginary parts of y.
with self.cached_session():
inp = ops.convert_to_tensor(data)
xr, xi, yr, yi = array_ops.split(value=inp, num_or_size_splits=4, axis=1)
def vec(x): # Reshape to a vector
return array_ops.reshape(x, [-1])
xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)
def cplx(r, i): # Combine to a complex vector
return math_ops.complex(r, i)
x, y = cplx(xr, xi), cplx(yr, yi)
# z is x times y in complex plane.
z = x * y
# Defines the loss function as the sum of all coefficients of z.
loss = math_ops.reduce_sum(math_ops.real(z) + math_ops.imag(z))
epsilon = 0.005
jacob_t, jacob_n = gradient_checker.compute_gradient(
inp, list(data.shape), loss, [1], x_init_value=data, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
@test_util.run_deprecated_v1
def testMulGradient(self):
data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32)
self._compareMulGradient(data)
class AccumulateTest(test.TestCase):
def testSimple(self):
with self.cached_session():
random_arrays = [
np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
]
random_tensors = [
ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
for x in random_arrays
]
tf_val = math_ops.accumulate_n(random_tensors)
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, self.evaluate(tf_val))
def testZeroArgs(self):
with self.cached_session():
with self.assertRaises(ValueError):
tf_val = math_ops.accumulate_n([])
self.evaluate(tf_val)
def testWrongShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
a = variables.Variable(0.2)
b = variables.Variable(0.1)
math_ops.accumulate_n([a, b], shape=[2, 2]) # Should be shape=[]
def testWrongType(self):
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
b = variables.Variable(0.1, dtype=np.float32)
math_ops.accumulate_n([a, b], tensor_dtype=np.int32)
def testWrongTypeOneInput(self):
# Scenario that used to trigger a bug, even when testWrongType() worked
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
math_ops.accumulate_n([a], tensor_dtype=np.int32)
class PolyvalTest(test.TestCase):
def _runtest(self, dtype, degree):
x = np.random.rand(2, 2).astype(dtype)
coeffs = [np.random.rand(2, 2).astype(dtype) for _ in range(degree + 1)]
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
def testSimple(self):
for dtype in [
np.int32, np.float32, np.float64, np.complex64, np.complex128
]:
for degree in range(5):
self._runtest(dtype, degree)
def testBroadcast(self):
dtype = np.float32
degree = 3
shapes = [(1,), (2, 1), (1, 2), (2, 2)]
for x_shape in shapes:
for coeff_shape in shapes:
x = np.random.rand(*x_shape).astype(dtype)
coeffs = [
np.random.rand(*coeff_shape).astype(dtype)
for _ in range(degree + 1)
]
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
def testEmpty(self):
x = np.random.rand(2, 2).astype(np.float32)
coeffs = []
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
class SingularGradientOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testGradientAtSingularity(self):
if not compat.forward_compatible(2019, 9, 14):
self.skipTest("Skipping test for future functionality.")
ops_and_singularity = [
(gen_math_ops.reciprocal, (0.,)),
(gen_math_ops.rsqrt, (0.,)),
(gen_math_ops.sqrt, (0.,)),
(gen_math_ops.sqrt_grad, (
0.,
0.,
)),
(gen_math_ops.reciprocal_grad, (
1.,
0.,
)),
(gen_math_ops.tan, (np.pi / 2,)),
(gen_math_ops.log, (0.,)),
(gen_math_ops.log1p, (-1.,)),
(gen_math_ops.acosh, (0.,)),
(gen_math_ops.asin, (1.,)),
(gen_math_ops.acos, (1.,)),
(gen_math_ops.atan2, (0., 0.)),
(gen_math_ops.div, (1., 0.)),
(gen_math_ops.div_no_nan, (1., 0.)),
(gen_math_ops.real_div, (1., 0.)),
(math_ops.pow, (0., -1.)),
]
for op, singularity in ops_and_singularity:
for dtype in (dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128):
if dtype.is_complex and op in [
gen_math_ops.asin, gen_math_ops.acos, gen_math_ops.atan2
]:
continue
if dtype == dtypes_lib.half and op in [
gen_math_ops.acosh, gen_math_ops.asin, gen_math_ops.acos,
gen_math_ops.atan2
]:
continue
with self.cached_session():
print("op = ", op, ", singularity = ", singularity, ", type = ",
dtype)
args = [constant_op.constant(s, dtype=dtype) for s in singularity]
grad_y = constant_op.constant(0, dtype=dtype)
y = op(*args)
g = gradients_impl.gradients(y, args, grad_ys=grad_y)
g_val = self.evaluate(g)
self.assertAllEqual(g_val, np.zeros(len(singularity)))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/cwise_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_v1_only("FIFOQueue removed from v2")
class FIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueHalf(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
@test_util.run_in_graph_and_eager_modes
def testMultipleDequeues(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue_many([[1, 2, 3]]))
a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()])
self.assertAllEqual(set([1, 2, 3]), set([a, b, c]))
@test_util.run_in_graph_and_eager_modes
def testQueuesDontShare(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue(1))
q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q2.enqueue(2))
self.assertAllEqual(self.evaluate(q2.dequeue()), 2)
self.assertAllEqual(self.evaluate(q.dequeue()), 1)
def testEnqueueDictWithoutNames(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue({"a": 12.0})
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue_many({"a": [12.0, 13.0]})
def testParallelEnqueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testDequeueHalf(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(3, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = self.evaluate(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], self.evaluate(size_t))
enqueue_op.run()
self.assertEqual([0], self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError("specified shapes"):
q.dequeue_many(0).eval()
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueUpToNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, (4, 4, 4, 4))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), ((),
(2)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongShapeAtRuntime(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[3,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongShape(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
self.evaluate(dequeued_t)
def testParallelEnqueueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(50, dtypes_lib.float32, shapes=())
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
self.evaluate(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(self.evaluate(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
self.evaluate(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, self.evaluate(dequeued_t))
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.FIFOQueue(100, dtypes_lib.int32, ())
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.FIFOQueue(total_count, dtypes_lib.int32, ())
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
self.assertAllEqual(elems[3:], self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with ops.Graph().as_default(), self.session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, (dtypes_lib.float32, dtypes_lib.float32), (
(), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = self.evaluate([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
self.assertEqual([50.0], self.evaluate(dequeued_t))
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
time.sleep(0.01)
self.assertEqual([50.0], self.evaluate(dequeued_t))
self.assertEqual([60.0], self.evaluate(dequeued_t))
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.2)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
def testDoesNotLoseValue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(1, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_f")
q_f_2 = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = []
for _ in xrange(num_queues):
qlist.append(data_flow_ops.FIFOQueue(10, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.FIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
q2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32)
enq_q = data_flow_ops.FIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.session() as sess:
q_empty = data_flow_ops.FIFOQueue(5, dtypes_lib.float32, ())
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.FIFOQueue(5, dtypes_lib.float32)
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
# Create a new session that hasn't been closed, so cached_session
# isn't messed up.
with self.session() as sess:
pass
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.uint16, dtypes_lib.bool, dtypes_lib.complex64,
dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = self.evaluate(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testDequeueEnqueueFail(self):
with self.cached_session() as session:
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
a = q.dequeue()
b = control_flow_ops.Assert(False, ["Before enqueue"])
with ops.control_dependencies([b]):
c = q.enqueue(33)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Before enqueue" in str(e)):
session.run([a, c])
@test_util.run_v1_only("FIFOQueue removed from v2")
class FIFOQueueDictTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
names=("i", "j"),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "j"], q.names)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
names=("i", "f"),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "f"], q.names)
def testEnqueueDequeueOneComponent(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=((),), names="f")
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue(10.0)
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0,))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"f": 10.0, "s": "aa"})
enqueue_op = q.enqueue({"f": 10.0})
enqueue_op2 = q.enqueue({"f": 20.0})
enqueue_op3 = q.enqueue({"f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many([40.0, 50.0])
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "s": ["aa", "bb"]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
self.evaluate(enqueue_op)
self.evaluate(enqueue_op2)
self.evaluate(enqueue_op3)
self.evaluate(enqueue_op4)
f = sess.run(dequeue["f"])
self.assertEqual(10.0, f)
f = sess.run(dequeue_2["f"])
self.assertEqual([20.0, 30.0], list(f))
f = sess.run(dequeue_2["f"])
self.assertEqual([40.0, 50.0], list(f))
def testEnqueueDequeueMultipleComponent(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32, dtypes_lib.string),
shapes=((), (), ()),
names=("f", "i", "s"))
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0, 123, "aa"))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 10.0})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 12, "s": "aa"})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0, "x": 10.0})
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0})
enqueue_op2 = q.enqueue({"i": 124, "s": "bb", "f": 20.0})
enqueue_op3 = q.enqueue({"i": 125, "s": "cc", "f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many(([40.0, 50.0], [126, 127], ["dd", "ee"]))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": [10.0, 20.0]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"],
"x": [1, 2]
})
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"]
})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
self.evaluate(enqueue_op)
self.evaluate(enqueue_op2)
self.evaluate(enqueue_op3)
self.evaluate(enqueue_op4)
i, f, s = sess.run([dequeue["i"], dequeue["f"], dequeue["s"]])
self.assertEqual(123, i)
self.assertEqual(10.0, f)
self.assertEqual(compat.as_bytes("aa"), s)
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([124, 125], list(i))
self.assertTrue([20.0, 30.0], list(f))
self.assertTrue([compat.as_bytes("bb"), compat.as_bytes("cc")], list(s))
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([126, 127], list(i))
self.assertTrue([40.0, 50.0], list(f))
self.assertTrue([compat.as_bytes("dd"), compat.as_bytes("ee")], list(s))
@test_util.run_v1_only("FIFOQueue removed from v2")
class FIFOQueueWithTimeoutTest(test.TestCase):
def testDequeueWithTimeout(self):
with self.session(
config=config_pb2.ConfigProto(operation_timeout_in_ms=20)) as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual(
compat.as_bytes(""), q.queue_ref.op.get_attr("container"))
dequeued_t = q.dequeue()
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
self.evaluate(dequeued_t)
def testReusableAfterTimeout(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
dequeued_t = q.dequeue()
enqueue_op = q.enqueue(37)
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
self.evaluate(enqueue_op)
self.assertEqual(37, self.evaluate(dequeued_t))
@test_util.run_v1_only("FIFOQueue removed from v2")
class QueueContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
with ops.container("test"):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual(
compat.as_bytes("test"), q.queue_ref.op.get_attr("container"))
@test_util.run_v1_only("FIFOQueue removed from v2")
class FIFOQueueBenchmark(test.Benchmark):
"""Benchmark FIFOQueue operations."""
def _build_graph(self):
"""Builds a graph that enqueues and dequeues a single float.
Returns:
A tuple with the graph init tensor and graph output tensor.
"""
q = data_flow_ops.FIFOQueue(1, "float")
init = q.enqueue(1.0)
x = q.dequeue()
q_inc = q.enqueue(x + 1)
return init, q_inc
# TODO(suharshs): Add benchmarks for:
# - different capacities of the queue
# - various sizes of tensors
# - enqueue_many, dequeue_many
def _run(self, num_iters):
"""Benchmarks enqueueing and dequeueing from a FIFOQueue.
Args:
num_iters: The number of iterations to run.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
init, output = self._build_graph()
with session_lib.Session(graph=graph) as session:
init.run()
_ = session.run(output) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run(output)
duration = time.time() - start_time
print("%f secs per enqueue-dequeue" % (duration / num_iters))
self.report_benchmark(
name="fifo_queue", iters=num_iters, wall_time=duration / num_iters)
return duration
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/fifo_queue_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.resource_variable_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import gc
import os
import pickle
import re
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
@test_util.with_control_flow_v2
class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
@test_util.run_deprecated_v1
def testHandleDtypeShapeMatch(self):
with self.cached_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[0],
dtype=dtypes.int32)).run()
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
0,
dtype=dtypes.int32)).run()
@test_util.run_gpu_only
def testGPUInt64(self):
with context.eager_mode(), context.device("gpu:0"):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int64)
self.assertAllEqual(1, v.numpy())
def testEagerNameNotIdentity(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0, name="a")
v1 = resource_variable_ops.ResourceVariable(2.0, name="a")
self.assertAllEqual(v0.numpy(), 1.0)
self.assertAllEqual(v1.numpy(), 2.0)
def testEagerNameNotNeeded(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0)
self.assertAllEqual(v0.numpy(), 1.0)
def testReadVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(handle, 1)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to read variable with wrong dtype. "
"Expected float got int32."):
_ = resource_variable_ops.read_variable_op(handle, dtype=dtypes.float32)
def testEagerInitializedValue(self):
with context.eager_mode():
variable = resource_variable_ops.ResourceVariable(1.0, name="eager-init")
self.assertAllEqual(variable.numpy(), 1.0)
self.assertAllEqual(variable.initialized_value().numpy(), 1.0)
def testEagerBool(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(False, name="bool_test")
self.assertAllEqual(bool(v), False)
def testEagerDeepCopy(self):
with context.eager_mode():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(init_value,
name="init")
copied_variable = copy.deepcopy(variable)
copied_variable.assign(4 * np.ones((4, 4, 4)))
# Copying the variable should create a new underlying tensor with distinct
# values.
self.assertFalse(np.allclose(variable.numpy(), copied_variable.numpy()))
@test_util.run_deprecated_v1
def testGraphDeepCopy(self):
with self.cached_session():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(init_value,
name="init")
with self.assertRaises(NotImplementedError):
copy.deepcopy(variable)
@test_util.run_in_graph_and_eager_modes
def testStridedSliceAssign(self):
v = resource_variable_ops.ResourceVariable([1.0, 2.0])
self.evaluate(variables.global_variables_initializer())
self.evaluate(v[0].assign(2.0))
self.assertAllEqual(self.evaluate(v), [2.0, 2.0])
@test_util.run_in_graph_and_eager_modes
def testVariableShape(self):
v = resource_variable_ops.ResourceVariable([1., 1.])
self.assertAllEqual(
tensor_util.constant_value(
resource_variable_ops.variable_shape(v.handle)),
[2])
@test_util.run_deprecated_v1
def testDifferentAssignGraph(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
ops.reset_default_graph()
v.assign(2.0) # Note: this fails if we run convert_to_tensor on not the
# variable graph.
@test_util.run_deprecated_v1
def testFetchHandle(self):
with self.cached_session():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertGreater(len(handle.eval()), 0)
@test_util.run_deprecated_v1
def testCachedValueReadBeforeWrite(self):
with self.cached_session() as sess:
v = resource_variable_ops.ResourceVariable(0.0, caching_device="cpu:0")
self.evaluate(v.initializer)
value, _ = sess.run([v, v.assign_add(1.0)])
self.assertAllEqual(value, 0.0)
def testAssignVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1]))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to assign variable with wrong "
"dtype. Expected int32 got float."):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1.], dtype=dtypes.float32))
def testUnprintableHandle(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertIn("<unprintable>", str(handle))
self.assertIn("<unprintable>", repr(handle))
@test_util.run_in_graph_and_eager_modes
def testDtypeSurvivesIdentity(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
self.evaluate(resource_variable_ops.assign_variable_op(
id_handle, constant_op.constant(0, dtype=dtypes.int32)))
def testUnreadOpName(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.assertNotEqual(v.name, v.assign_add(1.0).name)
@test_util.run_in_graph_and_eager_modes
def testCreateRead(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
value = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertAllEqual(1, value)
@test_util.run_in_graph_and_eager_modes
def testManyAssigns(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = self.evaluate([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
@test_util.run_in_graph_and_eager_modes
def testAssignAdd(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
self.evaluate(resource_variable_ops.assign_add_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
read = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertEqual(read, 2)
@test_util.run_in_graph_and_eager_modes
def testScatterAdd(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testGradientGatherNd(self):
v = resource_variable_ops.ResourceVariable(
np.random.uniform(size=[2, 2]), dtype=dtypes.float32)
with backprop.GradientTape() as tape:
l = array_ops.gather_nd(v, [[1, 1]])
l = math_ops.reduce_sum(l)
grads = tape.gradient(l, v)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(grads), [[0., 0.], [0., 1.]])
@test_util.run_in_graph_and_eager_modes
def testGradientGatherNdIndexedSlices(self):
v = resource_variable_ops.ResourceVariable(
np.random.uniform(size=[2, 2]), dtype=dtypes.float32)
with backprop.GradientTape() as tape:
l = array_ops.gather_nd(v, [[1], [1]])
l = math_ops.reduce_sum(l)
grads = tape.gradient(l, v)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(grads.values), [[1., 1.], [1., 1.]])
@test_util.run_in_graph_and_eager_modes
def testScatterSub(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMul(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant([[5]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
def testEagerPickle(self):
with context.eager_mode():
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, "var.pickle")
with open(fname, "wb") as f:
v = resource_variable_ops.ResourceVariable(
10.0,
dtype=dtypes.float16,
name="v")
pickle.dump(v, f)
with open(fname, "rb") as f:
new_v = pickle.load(f)
self.assertEqual(new_v.name, v.name)
self.assertEqual(new_v.shape, v.shape)
self.assertEqual(new_v.dtype, v.dtype)
self.assertEqual(new_v.trainable, v.trainable)
self.assertAllEqual(new_v.numpy(), v.numpy())
@test_util.run_in_graph_and_eager_modes
def testScatterDiv(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
def testUseResource(self):
v = variables.VariableV1(1.0, use_resource=True)
self.assertTrue(isinstance(v, resource_variable_ops.ResourceVariable))
def testEagerNoUseResource(self):
with context.eager_mode():
v = variables.Variable(1.0)
self.assertTrue(isinstance(v, resource_variable_ops.ResourceVariable))
@test_util.run_in_graph_and_eager_modes
def testScatterMin(self):
with ops.device("cpu:0"):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[[6]],
dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(handle, [0],
constant_op.constant(
[[3]],
dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
def testMetagraph(self):
with ops.Graph().as_default():
with variable_scope.variable_scope("foo", use_resource=True):
a = variable_scope.get_variable("a", initializer=10.0)
momentum.MomentumOptimizer(
learning_rate=0.001, momentum=0.1).minimize(
a,
colocate_gradients_with_ops=True,
global_step=training_util.get_or_create_global_step())
graph = ops.get_default_graph()
meta_graph_def = saver.export_meta_graph(graph=graph)
with ops.Graph().as_default():
saver.import_meta_graph(meta_graph_def, import_scope="")
meta_graph_two = saver.export_meta_graph(graph=graph)
self.assertEqual(meta_graph_def, meta_graph_two)
@test_util.run_in_graph_and_eager_modes
def testScatterMax(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@test_util.run_in_graph_and_eager_modes
def testScatterAddScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterSubScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMulScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant(5, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
@test_util.run_in_graph_and_eager_modes
def testScatterDivScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
@test_util.run_in_graph_and_eager_modes
def testScatterMinScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterMaxScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@test_util.run_in_graph_and_eager_modes
def testScatterAddVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 1.5], name="add")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_add(ops.IndexedSlices(indices=[1], values=[2.5])))
self.assertAllEqual([0.0, 4.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterSubVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 2.5], name="sub")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_sub(ops.IndexedSlices(indices=[1], values=[1.5])))
self.assertAllEqual([0.0, 1.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterMaxVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 4.0], name="max1")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_max(ops.IndexedSlices(indices=[1], values=[5.0])))
self.assertAllEqual([0.0, 5.0], self.evaluate(v))
v = resource_variable_ops.ResourceVariable([0.0, 3.5], name="max2")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_max(ops.IndexedSlices(indices=[1], values=[2.0])))
self.assertAllEqual([0.0, 3.5], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterMinVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 4.0], name="min1")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_min(ops.IndexedSlices(indices=[1], values=[5.0])))
self.assertAllEqual([0.0, 4.0], self.evaluate(v))
v = resource_variable_ops.ResourceVariable([0.0, 3.5], name="min2")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_min(ops.IndexedSlices(indices=[1], values=[2.0])))
self.assertAllEqual([0.0, 2.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterMulVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 4.0], name="mul")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_mul(ops.IndexedSlices(indices=[1], values=[3.0])))
self.assertAllEqual([0.0, 12.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterDivVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 6.0], name="div")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_div(ops.IndexedSlices(indices=[1], values=[2.0])))
self.assertAllEqual([0.0, 3.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 6.0], name="update")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_update(ops.IndexedSlices(indices=[1], values=[3.0])))
self.assertAllEqual([0.0, 3.0], self.evaluate(v))
@test_util.run_deprecated_v1
def testScatterUpdateString(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.string, shape=[1, 1])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant([["a"]], dtype=dtypes.string)))
self.evaluate(resource_variable_ops.resource_scatter_update(
handle, [0], constant_op.constant([["b"]], dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(compat.as_bytes(self.evaluate(read)[0][0]),
compat.as_bytes("b"))
@test_util.run_deprecated_v1
def testScatterUpdateStringScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.string, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[["a"]],
dtype=dtypes.string)))
self.evaluate(
resource_variable_ops.resource_scatter_update(handle, [0],
constant_op.constant(
"b",
dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(
compat.as_bytes(self.evaluate(read)[0][0]), compat.as_bytes("b"))
# TODO(alive): get this to work in Eager mode.
def testGPU(self):
with test_util.use_gpu():
abc = variable_scope.get_variable(
"abc",
shape=[1],
initializer=init_ops.ones_initializer(),
use_resource=True)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(
resource_variable_ops.var_is_initialized_op(abc.handle)),
True)
def testScatterBool(self):
with context.eager_mode():
ref = resource_variable_ops.ResourceVariable(
[False, True, False], trainable=False)
indices = math_ops.range(3)
updates = constant_op.constant([True, True, True])
state_ops.scatter_update(ref, indices, updates)
self.assertAllEqual(ref.read_value(), [True, True, True])
@test_util.run_in_graph_and_eager_modes
def testConstraintArg(self):
constraint = lambda x: x
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var0")
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var1")
# TODO(alive): how should this work in Eager mode?
@test_util.run_deprecated_v1
def testInitFn(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32)
self.assertEqual(v.handle.op.colocation_groups(),
v.initializer.inputs[1].op.colocation_groups())
def testHandleNumpy(self):
with context.eager_mode():
with self.assertRaises(ValueError):
resource_variable_ops.ResourceVariable(
1.0, name="handle-numpy").handle.numpy()
def testCountUpTo(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(v.count_up_to(1), 0)
with self.assertRaises(errors.OutOfRangeError):
v.count_up_to(1)
def testCountUpToFunction(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(state_ops.count_up_to(v, 1), 0)
with self.assertRaises(errors.OutOfRangeError):
state_ops.count_up_to(v, 1)
@test_util.run_in_graph_and_eager_modes
def testInitFnDtype(self):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32, name="var0")
self.assertEqual(dtypes.float32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitFnNoDtype(self):
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="var2")
self.assertEqual(dtypes.int32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitializeAllVariables(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.float32,
name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testOperatorOverload(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(2.0, self.evaluate(v + v))
@test_util.run_in_graph_and_eager_modes
def testAssignMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign(2.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign(3.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign(4.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testLoad(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
v.load(2.0)
self.assertEqual(2.0, self.evaluate(v.value()))
def testShapePassedToGradient(self):
with ops.Graph().as_default():
@custom_gradient.custom_gradient
def differentiable_scatter_update(handle, indices, values):
with ops.control_dependencies([
resource_variable_ops.resource_scatter_update(
handle, indices, values)]):
new_handle = array_ops.identity(handle)
def grad(dresult):
self.assertIsNotNone(
tensor_util.constant_value(dresult.dense_shape))
return [dresult, None, None]
return new_handle, grad
var = variable_scope.get_variable(
"foo", shape=[20], initializer=init_ops.zeros_initializer,
dtype=dtypes.float64, use_resource=True)
indices = math_ops.range(10)
updates = math_ops.range(9, -1, -1, dtype=dtypes.float64)
new_handle = differentiable_scatter_update(var.handle, indices, updates)
gathered = resource_variable_ops.resource_gather(
new_handle, indices, dtype=var.dtype)
gradients_impl.gradients([gathered], [updates])
def testToFromProtoCachedValue(self):
with ops.Graph().as_default():
v_def = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(3.0)).to_proto()
v_prime = resource_variable_ops.ResourceVariable(variable_def=v_def)
self.assertTrue(getattr(v_prime, "_cached_value", None) is None)
other_v_def = resource_variable_ops.ResourceVariable(
caching_device="cpu:0",
initial_value=constant_op.constant(3.0)).to_proto()
other_v_prime = resource_variable_ops.ResourceVariable(
variable_def=other_v_def)
self.assertTrue(other_v_prime._cached_value is not None)
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v_def = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session() as sess:
# v describes a VariableDef-based variable without an initial value.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, v.initialized_value().eval())
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session() as sess:
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = resource_variable_ops.ResourceVariable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
resource_variable_ops.ResourceVariable(
variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = resource_variable_ops.ResourceVariable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
resource_variable_ops.ResourceVariable(
variable_def=trainable_variable.to_proto())
.trainable)
@test_util.run_in_graph_and_eager_modes
def testSparseRead(self):
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var3")
self.evaluate(variables.global_variables_initializer())
value = self.evaluate(v.sparse_read([0, 3, 1, 2]))
self.assertAllEqual(init_value[[0, 3, 1, 2], ...], value)
@test_util.run_in_graph_and_eager_modes
def testGatherNd(self):
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var3")
self.evaluate(variables.global_variables_initializer())
value_op = v.gather_nd([[0, 0], [1, 2], [3, 3]])
self.assertAllEqual([3, 4], value_op.shape)
value = self.evaluate(value_op)
self.assertAllEqual([[0, 1, 2, 3], [24, 25, 26, 27], [60, 61, 62, 63]],
value)
value_op = v.gather_nd([[0, 0, 0], [1, 2, 3], [3, 3, 3]])
self.assertAllEqual([3], value_op.shape)
value = self.evaluate(value_op)
self.assertAllEqual([0, 27, 63], value)
@test_util.run_deprecated_v1
def testToFromProto(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertEquals(2, math_ops.add(w, 1).eval())
self.assertEquals(v._handle, w._handle)
self.assertEquals(v._graph_element, w._graph_element)
@test_util.run_in_graph_and_eager_modes
def testAssignAddMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_add(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_add(1.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_add(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testAssignSubMethod(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_sub(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_sub(1.0, read_value=True)
self.assertEqual(1.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_sub(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(0.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testDestroyResource(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3.0, self.evaluate(v.value()))
self.evaluate(resource_variable_ops.destroy_resource_op(v.handle))
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(v.value())
# Handle to a resource not actually created.
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
self.evaluate(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
@test_util.run_deprecated_v1
def testAssignDifferentShapes(self):
with self.cached_session() as sess, variable_scope.variable_scope(
"foo", use_resource=True):
var = variable_scope.get_variable("x", shape=[1, 1], dtype=dtypes.float32)
placeholder = array_ops.placeholder(dtypes.float32)
assign = var.assign(placeholder)
sess.run(
[assign],
feed_dict={placeholder: np.zeros(shape=[2, 2], dtype=np.float32)})
def testAssignDifferentShapesEagerNotAllowed(self):
with context.eager_mode():
with variable_scope.variable_scope("foo"):
var = variable_scope.get_variable("x", shape=[1, 1],
dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError,
"Shapes.*and.*are incompatible"):
assign = var.assign(np.zeros(shape=[2, 2]))
self.evaluate(assign)
@test_util.disable_xla("XLA doesn't allow changing shape at assignment, as "
"dictated by tf2xla/xla_resource.cc:SetTypeAndShape")
@test_util.run_in_graph_and_eager_modes
def testAssignDifferentShapesAllowed(self):
var = resource_variable_ops.ResourceVariable(
initial_value=np.zeros(shape=[1, 1]),
shape=tensor_shape.TensorShape(None))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(np.zeros(shape=[1, 1]), var.read_value())
self.evaluate(var.assign(np.zeros(shape=[2, 2])))
self.assertAllEqual(np.zeros(shape=[2, 2]), var.read_value())
@test_util.run_deprecated_v1
def testDtypeAfterFromProto(self):
v = resource_variable_ops.ResourceVariable(2.0)
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertIsInstance(w.dtype, dtypes.DType)
self.assertEqual(v.dtype, w.dtype)
# TODO(alive): get caching to work in eager mode.
@test_util.run_deprecated_v1
def testCachingDevice(self):
with ops.device("/job:server/task:1"):
v = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", v.value().device)
with self.assertRaises(ValueError):
_ = v.value().op.get_attr("_class")
with ops.colocate_with(v.op):
w = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", w.value().device)
with self.assertRaises(ValueError):
_ = w.value().op.get_attr("_class")
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(300.0, name="var4")
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var4",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
x = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var5",
container=ops.get_default_graph()._container)
with self.assertRaisesOpError("Resource .*/var5/.* does not exist"):
resource_variable_ops.read_variable_op(x, v.dtype.base_dtype).eval()
@test_util.run_deprecated_v1
def testSharedNameWithNamescope(self):
with self.cached_session():
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(300.0, name="var6")
self.assertEqual("foo/var6", v._shared_name) # pylint: disable=protected-access
self.assertEqual("foo/var6:0", v.name)
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="foo/var6",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
@test_util.run_in_graph_and_eager_modes
def testShape(self):
v = resource_variable_ops.ResourceVariable(
name="var4", initial_value=array_ops.ones(shape=[10, 20, 35]))
self.assertEqual("(10, 20, 35)", str(v.shape))
self.assertEqual("(10, 20, 35)", str(v.get_shape()))
self.assertEqual("(10, 20, 35)", str(v.value().shape))
self.assertEqual("(3, 20, 35)", str(v.sparse_read([0, 1, 2]).shape))
if not context.executing_eagerly():
self.assertEqual(
"<unknown>",
str(v.sparse_read(array_ops.placeholder(dtypes.int32)).shape))
@test_util.run_deprecated_v1
def testSetInitialValue(self):
with self.cached_session():
# Initialize variable with a value different from the initial value passed
# in the constructor.
v = resource_variable_ops.ResourceVariable(2.0)
v.initializer.run(feed_dict={v.initial_value: 3.0})
self.assertEqual(3.0, v.value().eval())
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = resource_variable_ops.ResourceVariable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegexp(ValueError, "initializer"):
control_flow_ops.while_loop(cond, body, [0, 0])
def testVariableEager(self):
with context.eager_mode():
init = array_ops.ones(shape=[10, 20, 35], dtype=dtypes.int32)
constraint = lambda x: x
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(
name="var7",
initial_value=init,
caching_device="cpu:0",
constraint=constraint)
# Test properties
self.assertEqual(dtypes.int32, v.dtype)
self.assertEqual("foo/var7:0", v.name)
self.assertAllEqual([10, 20, 35], v.shape.as_list())
self.assertTrue(isinstance(v.handle, ops.EagerTensor))
self.assertEqual(constraint, v.constraint)
self.assertAllEqual(init.numpy(), v.read_value().numpy())
self.assertAllEqual(init.numpy(), v.value().numpy())
# Callable init.
callable_init = lambda: init * 2
v2 = resource_variable_ops.ResourceVariable(
initial_value=callable_init, name="var7")
self.assertEqual("var7:0", v2.name)
self.assertAllEqual(2 * init.numpy(), v2.read_value().numpy())
# Test assign_add.
new_v2_val = v2.assign_add(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 3, new_v2_val.numpy())
# Test assign_sub.
new_v2_val = v2.assign_sub(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 2, new_v2_val.numpy())
# Test assign.
v2.assign(v.read_value())
self.assertAllEqual(v.read_value().numpy(), v2.read_value().numpy())
# Test load
v2.load(2 * v.read_value())
self.assertAllEqual(2 * v.read_value().numpy(), v2.read_value().numpy())
# Test convert_to_tensor
t = ops.convert_to_tensor(v)
self.assertAllEqual(t.numpy(), v.read_value().numpy())
# Test operations
self.assertAllEqual((v * 2).numpy(), (v + v).numpy())
def testContainerEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="same")
with ops.container("different"):
v2 = resource_variable_ops.ResourceVariable(initial_value=lambda: 0,
name="same")
v2.assign(2)
self.assertEqual(1, v1.read_value().numpy())
self.assertEqual(2, v2.read_value().numpy())
def testDestruction(self):
with context.eager_mode():
var = resource_variable_ops.ResourceVariable(initial_value=1.0,
name="var8")
var_handle = var._handle
del var
with self.assertRaisesRegexp(errors.NotFoundError,
r"Resource .* does not exist."):
resource_variable_ops.destroy_resource_op(var_handle,
ignore_lookup_error=False)
def testScatterUpdate(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3.0])
self.assertAllEqual([1.0, 3.0], v.numpy())
def testScatterAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="add")
state_ops.scatter_add(v, [1], [3])
self.assertAllEqual([1.0, 5.0], v.numpy())
def testScatterSubStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="sub")
state_ops.scatter_sub(v, [1], [3])
self.assertAllEqual([1.0, -1.0], v.numpy())
def testScatterUpdateVariant(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([
list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[])
])
v.scatter_update(
ops.IndexedSlices(
list_ops.tensor_list_from_tensor([1., 2.], element_shape=[]), 0))
self.assertAllEqual(
list_ops.tensor_list_get_item(v[0], 0, element_dtype=dtypes.float32),
1.)
def testGroupDoesntForceRead(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
assign = v.assign_add(1.0)
g = control_flow_ops.group([assign])
self.assertEqual(g.control_inputs[0].type, "AssignAddVariableOp")
def testScatterNdAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(
[1, 2, 3, 4, 5, 6, 7, 8], dtype=dtypes.float32, name="add")
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, 13, 3, 14, 14, 6, 7, 20])
state_ops.scatter_nd_add(v, indices, updates)
self.assertAllClose(expected, v.numpy())
@test_util.run_in_graph_and_eager_modes
def testUnreadVariableInsideFunction(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def assign():
v.assign(1.0)
graph = assign.get_concrete_function().graph
self.assertTrue(all(x.type != "ReadVariableOp"
for x in graph.get_operations()))
def testScatterNdSubStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(
[1, 2, 3, 4, 5, 6, 7, 8], dtype=dtypes.float32, name="sub")
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, -9, 3, -6, -4, 6, 7, -4])
state_ops.scatter_nd_sub(v, indices, updates)
self.assertAllClose(expected, v.numpy())
def testScatterUpdateCast(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3])
self.assertAllEqual([1.0, 3.0], v.numpy())
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateInvalidArgs(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3], name="update")
# The exact error and message differ between graph construction (where the
# error is realized during shape inference at graph construction time) and
# eager execution (where the error is realized during kernel execution).
with self.assertRaisesRegexp(Exception, r"shape.*2.*3"):
state_ops.scatter_update(v, [0, 1], [0, 1, 2])
@test_util.run_in_graph_and_eager_modes
def testAssignIncompatibleShape(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
self.evaluate(v.initializer)
pattern = re.compile("shapes must be equal", re.IGNORECASE)
with self.assertRaisesRegexp(Exception, pattern):
self.assertAllEqual(self.evaluate(v.assign_add(1)), [1, 2, 3, 4])
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testCopyToGraphUninitialized(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
copy_to_graph = ops.Graph()
with copy_to_graph.as_default(): # Intentionally testing v1 behavior
copied = resource_variable_ops.copy_to_graph_uninitialized(v)
self.assertEqual(v.name, copied.name)
self.assertIsNone(copied.initializer)
def create_variant_shape_and_type_data(self):
variant_shape_and_type_data = (
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData())
variant_shape_and_type_data.is_set = True
stored_shape = tensor_shape.TensorShape([None, 4]).as_proto()
stored_dtype = dtypes.float32.as_datatype_enum
# NOTE(ebrevdo): shape_and_type lacks append() in some versions of protobuf.
variant_shape_and_type_data.shape_and_type.extend([
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(
shape=stored_shape, dtype=stored_dtype)])
return variant_shape_and_type_data
@def_function.function
def create_constant_variant(self, value):
value = constant_op.constant(
tensor_pb2.TensorProto(
dtype=dtypes.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(value, dtype=np.int32).tobytes())
]))
return value
# TODO(ebrevdo): Add run_in_graph_and_eager_modes once we can create
# EagerTensor constants with TensorProto inputs.
@test_util.run_in_graph_and_eager_modes()
def testVariantInitializer(self):
variant_shape_and_type_data = self.create_variant_shape_and_type_data()
value = self.create_constant_variant(3)
initializer = array_ops.fill([3], value)
resource_variable_ops._set_handle_shapes_and_types( # pylint: disable=protected-access
initializer, variant_shape_and_type_data,
graph_mode=not context.executing_eagerly())
v = resource_variable_ops.ResourceVariable(initializer)
read = array_ops.identity(v)
read_variant_shape_and_type = (
resource_variable_ops.get_eager_safe_handle_data(read))
self.assertEqual(
read_variant_shape_and_type, variant_shape_and_type_data)
gather = v.sparse_read([0])
gather_variant_shape_and_type = (
resource_variable_ops.get_eager_safe_handle_data(gather))
self.assertEqual(
gather_variant_shape_and_type, variant_shape_and_type_data)
# Make sure initializer runs.
if not context.executing_eagerly():
self.evaluate(v.initializer)
self.evaluate(read.op)
self.evaluate(gather.op)
@parameterized.parameters([
# batch_dims=0 (equivalent to tf.gather)
dict( # 2D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[2, 1], [0, 3]],
expected=[[8, 7], [6, 9]]),
dict( # 3D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[9, 7], [8, 6]], [[6, 9], [8, 8]]]),
dict( # 4D indices
batch_dims=0,
params=[8, 9],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[8, 9], [9, 8]], [[8, 8], [9, 9]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
# batch_dims=indices.shape.ndims - 1 (equivalent to
# tf.compat.v1.batch_gather)
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
# 0 < batch_dims < indices.shape.ndims - 1
dict( # 3D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[13, 11], [12, 10]], [[20, 23], [22, 22]]]),
dict( # 4D indices (1 batch dim)
batch_dims=1,
params=[[6, 7], [8, 9]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[6, 7], [7, 6]], [[6, 6], [7, 7]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
dict( # 4D indices (2 batch dims)
batch_dims=2,
params=[[[2, 3], [4, 5]], [[6, 7], [8, 9]]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[2, 3], [3, 2]], [[4, 4], [5, 5]]],
[[[7, 7], [6, 6]], [[8, 9], [9, 8]]]]),
])
@test_util.run_in_graph_and_eager_modes
def testGatherWithBatchDims(self, params, indices, batch_dims, expected):
var = resource_variable_ops.ResourceVariable(params, name="var0")
with ops.control_dependencies([var.initializer]):
result = resource_variable_ops.resource_gather(
var.handle, indices, dtype=var.dtype, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=0,
output_shape=[2, 3, 8, 9, 10, 3, 4, 5, 6, 7]
# = indices.shape + params.shape[1:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=1,
output_shape=[2, 3, 8, 9, 10, 4, 5, 6, 7]
# = params.shape[:1] + indices.shape[1:] + params.shape[2:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 4, 9, 10],
batch_dims=3,
output_shape=[2, 3, 4, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[3:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 4, 5, 10],
batch_dims=4,
output_shape=[2, 3, 4, 5, 10, 7]
# = params.shape[:4] + indices.shape[4:] + params.shape[5:]
),
])
@test_util.run_in_graph_and_eager_modes
def testGatherWithBatchDimsMatchesTensor(self, params_shape, indices_shape,
batch_dims, output_shape):
"""Checks that gather with batch_dims returns the correct shape."""
# Generate a `params` tensor with the indicated shape.
params_size = np.prod(params_shape)
params = np.reshape(np.arange(params_size, dtype=np.int32), params_shape)
# Generate an `indices` tensor with the indicated shape, where each index
# is within the appropriate range.
indices_size = np.prod(indices_shape)
indices = np.reshape(np.arange(indices_size, dtype=np.int32), indices_shape)
indices = indices % params_shape[batch_dims]
var = resource_variable_ops.ResourceVariable(params, name="var0")
with ops.control_dependencies([var.initializer]):
expected = array_ops.gather(
var.read_value(), indices, batch_dims=batch_dims)
result = resource_variable_ops.resource_gather(
var.handle, indices, dtype=var.dtype, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/kernel_tests/resource_variable_ops_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.