python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to manipulate a tensor graph in python.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
_VARIABLE_OPS = {
"Assign",
"AssignAdd",
"AssignSub",
"Queue",
"ScatterAdd",
"ScatterSub",
"ScatterUpdate",
"TruncatedNormal",
"Variable",
"VariableV2",
}
_CONTROL_FLOW_OP_NAMES_OR_IDENTITY = [
"Switch",
"Enter",
"Exit",
"Identity",
"Merge",
"NextIteration",
]
def _is_variable_op(op):
"""Returns true if 'op' refers to a Variable node."""
return op in _VARIABLE_OPS
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.must_run_on_cpu`")
@tf_export(v1=["graph_util.must_run_on_cpu"])
def must_run_on_cpu(node, pin_variables_on_cpu=False):
"""Returns True if the given node_def must run on CPU, otherwise False.
Args:
node: The node to be assigned to a device. Could be either an ops.Operation
or NodeDef.
pin_variables_on_cpu: If True, this function will return False if node_def
represents a variable-related op.
Returns:
True if the given node must run on CPU, otherwise False.
"""
if isinstance(node, ops.Operation):
node_def = node.node_def
else:
assert isinstance(node, node_def_pb2.NodeDef)
node_def = node
# If the op is a variable-related op, should we pin it on CPU?
if pin_variables_on_cpu and _is_variable_op(node_def.op):
return True
# Constant operations producing a string or int32 must run on CPU.
if node_def.op == "Const":
# Get the value of the 'dtype' attr
dtype = node_def.attr["dtype"].type
if dtype == dtypes.string or dtype == dtypes.int32:
return True
if node_def.op in ["DynamicStitch", "ParallelDynamicStitch"]:
dtype = node_def.attr["T"].type
if dtype == dtypes.int32:
# DynamicStitch on GPU only works for int32 values.
return True
if node_def.op in ["Cast"]:
dtype = node_def.attr["SrcT"].type
if dtype == dtypes.int32:
# Cast on GPU does not works for int32 values.
return True
return False
################################################################################
#
# device functions for use in with g.device(...)
#
################################################################################
def _node_name(n):
if n.startswith("^"):
return n[1:]
else:
return n.split(":")[0]
def _extract_graph_summary(graph_def):
"""Extracts useful information from the graph and returns them."""
name_to_input_name = {} # Keyed by the dest node name.
name_to_node = {} # Keyed by node name.
# Keeps track of node sequences. It is important to still output the
# operations in the original order.
name_to_seq_num = {} # Keyed by node name.
seq = 0
for node in graph_def.node:
n = _node_name(node.name)
name_to_node[n] = node
name_to_input_name[n] = [_node_name(x) for x in node.input]
# Prevent colocated nodes from being lost.
if "_class" in node.attr:
for colocated_node_name in node.attr["_class"].list.s:
colocated_node_decoded = colocated_node_name.decode("utf-8")
if colocated_node_decoded.startswith("loc:@"):
name_to_input_name[n].append(colocated_node_decoded[5:])
name_to_seq_num[n] = seq
seq += 1
return name_to_input_name, name_to_node, name_to_seq_num
def _assert_nodes_are_present(name_to_node, nodes):
"""Assert that nodes are present in the graph."""
for d in nodes:
assert d in name_to_node, "%s is not in graph" % d
def _bfs_for_reachable_nodes(target_nodes, name_to_input_name):
"""Breadth first search for reachable nodes from target nodes."""
nodes_to_keep = set()
# Breadth first search to find all the nodes that we should keep.
next_to_visit = target_nodes[:]
while next_to_visit:
node = next_to_visit[0]
del next_to_visit[0]
if node in nodes_to_keep:
# Already visited this node.
continue
nodes_to_keep.add(node)
if node in name_to_input_name:
next_to_visit += name_to_input_name[node]
return nodes_to_keep
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.extract_sub_graph`")
@tf_export(v1=["graph_util.extract_sub_graph"])
def extract_sub_graph(graph_def, dest_nodes):
"""Extract the subgraph that can reach any of the nodes in 'dest_nodes'.
Args:
graph_def: A graph_pb2.GraphDef proto.
dest_nodes: A list of strings specifying the destination node names.
Returns:
The GraphDef of the sub-graph.
Raises:
TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto.
"""
if not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be a graph_pb2.GraphDef proto.")
if isinstance(dest_nodes, six.string_types):
raise TypeError("dest_nodes must be a list.")
name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
graph_def)
_assert_nodes_are_present(name_to_node, dest_nodes)
nodes_to_keep = _bfs_for_reachable_nodes(dest_nodes, name_to_input_name)
nodes_to_keep_list = sorted(
list(nodes_to_keep), key=lambda n: name_to_seq_num[n])
# Now construct the output GraphDef
out = graph_pb2.GraphDef()
for n in nodes_to_keep_list:
out.node.extend([copy.deepcopy(name_to_node[n])])
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
return out
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.tensor_shape_from_node_def_name`"
)
@tf_export(v1=["graph_util.tensor_shape_from_node_def_name"])
def tensor_shape_from_node_def_name(graph, input_name):
"""Convenience function to get a shape from a NodeDef's input string."""
# To get a tensor, the name must be in the form <input>:<port>, for example
# 'Mul:0'. The GraphDef input strings don't always have the port specified
# though, so if there isn't a colon we need to add a default ':0' to the end.
if ":" not in input_name:
canonical_name = input_name + ":0"
else:
canonical_name = input_name
tensor = graph.get_tensor_by_name(canonical_name)
shape = tensor.get_shape()
return shape
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.convert_variables_to_constants`")
@tf_export(v1=["graph_util.convert_variables_to_constants"])
def convert_variables_to_constants(sess,
input_graph_def,
output_node_names,
variable_names_whitelist=None,
variable_names_blacklist=None):
"""Replaces all the variables in a graph with constants of the same values.
If you have a trained graph containing Variable ops, it can be convenient to
convert them all to Const ops holding the same values. This makes it possible
to describe the network fully with a single GraphDef file, and allows the
removal of a lot of ops related to loading and saving the variables.
Args:
sess: Active TensorFlow session containing the variables.
input_graph_def: GraphDef object holding the network.
output_node_names: List of name strings for the result nodes of the graph.
variable_names_whitelist: The set of variable names to convert (by default,
all variables are converted).
variable_names_blacklist: The set of variable names to omit converting
to constants.
Returns:
GraphDef containing a simplified version of the original.
"""
get_input_name = lambda node, index=0: node.input[index].split(":")[0]
def create_const_op(node_name, dtype, data, data_shape=None):
"""Creates a Const op."""
output_node = node_def_pb2.NodeDef()
output_node.op = "Const"
output_node.name = node_name
output_node.attr["dtype"].CopyFrom(dtype)
output_node.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(
data, dtype=dtype.type, shape=data_shape)))
return output_node
# This graph only includes the nodes needed to evaluate the output nodes, and
# removes unneeded nodes like those involved in saving and assignment.
inference_graph = extract_sub_graph(input_graph_def, output_node_names)
# Identify the ops in the graph.
map_name_to_node = {
node.name: node for node in inference_graph.node
}
# Get list of variables.
variable_names = []
variable_dict_names = []
resource_op_types = {}
for node in inference_graph.node:
if node.op in ["Variable", "VariableV2", "VarHandleOp"]:
variable_name = node.name
if ((variable_names_whitelist is not None and
variable_name not in variable_names_whitelist) or
(variable_names_blacklist is not None and
variable_name in variable_names_blacklist)):
continue
variable_dict_names.append(variable_name)
if node.op == "VarHandleOp":
variable_names.append(variable_name + "/Read/ReadVariableOp:0")
else:
variable_names.append(variable_name + ":0")
elif node.op in ["ReadVariableOp", "ResourceGather"]:
# There can be one or more Identity or control flow ops in between the
# ReadVariableOp and VarHandleOp. Store the ops with the associated
# dtypes.
source_op_names = [get_input_name(node)]
while (source_op_names and map_name_to_node[source_op_names[0]].op in
_CONTROL_FLOW_OP_NAMES_OR_IDENTITY):
source_op_name = source_op_names.pop()
if source_op_name not in resource_op_types:
resource_op_types[source_op_name] = node.attr["dtype"]
source_op_names.append(
get_input_name(map_name_to_node[source_op_name]))
if map_name_to_node[source_op_name].op == "Merge":
merge_resource_name = get_input_name(
map_name_to_node[source_op_name], index=1)
if merge_resource_name not in resource_op_types:
resource_op_types[merge_resource_name] = node.attr["dtype"]
source_op_names.append(
get_input_name(map_name_to_node[merge_resource_name]))
for source_node in source_op_names:
if map_name_to_node[source_node].op != "VarHandleOp":
raise ValueError("Cannot find the variable that is an input "
"to the ReadVariableOp.")
# Gets map of variables and the associated data.
if variable_names:
returned_variables = sess.run(variable_names)
else:
returned_variables = []
variables_data_map = dict(zip(variable_dict_names, returned_variables))
logging.info("Froze %d variables.", len(returned_variables))
# Reconstruct the graph with constants in place of variables.
output_graph_def = graph_pb2.GraphDef()
how_many_converted = 0
for input_node in inference_graph.node:
output_node = node_def_pb2.NodeDef()
if input_node.name in variables_data_map:
data = variables_data_map[input_node.name]
output_node = create_const_op(input_node.name, input_node.attr["dtype"],
data, data.shape)
how_many_converted += 1
elif input_node.name in resource_op_types:
# Converts the type of the ops between the ReadVariableOp and VarHandleOp
# from RESOURCE_DT to the appropriate type based on the input they are
# referencing. Do not copy shapes due to incorrect shape info.
output_node.op = input_node.op
output_node.name = input_node.name
for in_node in input_node.input:
output_node.input.append(in_node)
for attr_name in input_node.attr:
if str(attr_name) != "_output_shapes":
output_node.attr[attr_name].CopyFrom(input_node.attr[attr_name])
output_node.attr["T"].CopyFrom(resource_op_types[input_node.name])
elif input_node.op == "ReadVariableOp":
# The first branch converts all VarHandleOps of ResourceVariables to
# constants, so we need to convert the associated ReadVariableOps to
# Identity ops.
output_node.op = "Identity"
output_node.name = input_node.name
output_node.input.extend([input_node.input[0]])
output_node.attr["T"].CopyFrom(input_node.attr["dtype"])
if "_class" in input_node.attr:
output_node.attr["_class"].CopyFrom(input_node.attr["_class"])
elif input_node.op == "ResourceGather":
# The first branch converts all VarHandleOps of ResourceGather to
# constants, so we need to convert the associated ResourceGather to Gather
# ops with a Const axis feeding into it.
if input_node.attr["batch_dims"].i != 0:
raise ValueError("batch_dims != 0 is not supported by freeze_graph.")
axis_data = input_node.attr["batch_dims"].i
axis_node_name = input_node.name + "/axis"
axis_dtype = input_node.attr["Tindices"]
output_axis_node = create_const_op(axis_node_name, axis_dtype, axis_data)
output_graph_def.node.extend([output_axis_node])
output_node.op = "GatherV2"
output_node.name = input_node.name
output_node.input.extend(
[input_node.input[0], input_node.input[1], axis_node_name])
output_node.attr["Tparams"].CopyFrom(input_node.attr["dtype"])
output_node.attr["Tindices"].CopyFrom(input_node.attr["Tindices"])
output_node.attr["Taxis"].CopyFrom(axis_dtype)
if "_class" in input_node.attr:
output_node.attr["_class"].CopyFrom(input_node.attr["_class"])
else:
output_node.CopyFrom(input_node)
output_graph_def.node.extend([output_node])
output_graph_def.library.CopyFrom(inference_graph.library)
logging.info("Converted %d variables to const ops.", how_many_converted)
return output_graph_def
@deprecation.deprecated(
date=None,
instructions="Use `tf.compat.v1.graph_util.remove_training_nodes`")
@tf_export(v1=["graph_util.remove_training_nodes"])
def remove_training_nodes(input_graph, protected_nodes=None):
"""Prunes out nodes that aren't needed for inference.
There are nodes like Identity and CheckNumerics that are only useful
during training, and can be removed in graphs that will be used for
nothing but inference. Here we identify and remove them, returning an
equivalent graph. To be specific, CheckNumerics nodes are always removed, and
Identity nodes that aren't involved in control edges are spliced out so that
their input and outputs are directly connected.
Args:
input_graph: Model to analyze and prune.
protected_nodes: An optional list of names of nodes to be kept
unconditionally. This is for example useful to preserve Identity output
nodes.
Returns:
A list of nodes with the unnecessary ones removed.
"""
if not protected_nodes:
protected_nodes = []
types_to_remove = {"CheckNumerics": True}
input_nodes = input_graph.node
names_to_remove = {}
for node in input_nodes:
if node.op in types_to_remove and node.name not in protected_nodes:
names_to_remove[node.name] = True
nodes_after_removal = []
for node in input_nodes:
if node.name in names_to_remove:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
input_before_removal = node.input
del new_node.input[:]
for full_input_name in input_before_removal:
input_name = re.sub(r"^\^", "", full_input_name)
if input_name in names_to_remove:
continue
new_node.input.append(full_input_name)
nodes_after_removal.append(new_node)
types_to_splice = {"Identity": True}
control_input_names = set()
node_names_with_control_input = set()
for node in nodes_after_removal:
for node_input in node.input:
if "^" in node_input:
control_input_names.add(node_input.replace("^", ""))
node_names_with_control_input.add(node.name)
names_to_splice = {}
for node in nodes_after_removal:
if node.op in types_to_splice and node.name not in protected_nodes:
# We don't want to remove nodes that have control edge inputs, because
# they might be involved in subtle dependency issues that removing them
# will jeopardize.
if node.name not in node_names_with_control_input:
names_to_splice[node.name] = node.input[0]
# We also don't want to remove nodes which are used as control edge inputs.
names_to_splice = {name: value for name, value in names_to_splice.items()
if name not in control_input_names}
nodes_after_splicing = []
for node in nodes_after_removal:
if node.name in names_to_splice:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
input_before_removal = node.input
del new_node.input[:]
for full_input_name in input_before_removal:
input_name = re.sub(r"^\^", "", full_input_name)
while input_name in names_to_splice:
full_input_name = names_to_splice[input_name]
input_name = re.sub(r"^\^", "", full_input_name)
new_node.input.append(full_input_name)
nodes_after_splicing.append(new_node)
output_graph = graph_pb2.GraphDef()
output_graph.node.extend(nodes_after_splicing)
return output_graph
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/graph_util_impl.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that the contrib module shows up properly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
class ContribTest(test.TestCase):
def testContrib(self):
# pylint: disable=g-import-not-at-top
import tensorflow as tf
_ = tf.contrib.layers # `tf.contrib` is loaded lazily on first use.
assert tf_inspect.ismodule(tf.contrib)
def testLayers(self):
# pylint: disable=g-import-not-at-top
import tensorflow as tf
assert tf_inspect.ismodule(tf.contrib.layers)
def testLinearOptimizer(self):
# pylint: disable=g-import-not-at-top
import tensorflow as tf
assert tf_inspect.ismodule(tf.contrib.linear_optimizer)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/contrib_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception types for TensorFlow errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import traceback
import warnings
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import error_interpolation
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import tf_stack
from tensorflow.python.util.tf_export import tf_export
def _compact_stack_trace(op):
"""Returns a traceback for `op` with common file prefixes stripped."""
compact_traces = []
common_prefix = error_interpolation.traceback_files_common_prefix([[op]])
for frame in op.traceback:
frame = list(frame)
filename = frame[tf_stack.TB_FILENAME]
if filename.startswith(common_prefix):
filename = filename[len(common_prefix):]
frame[tf_stack.TB_FILENAME] = filename
compact_traces.append(tuple(frame))
return compact_traces
class InaccessibleTensorError(ValueError):
pass
class OperatorNotAllowedInGraphError(TypeError):
pass
@tf_export("errors.OpError", v1=["errors.OpError", "OpError"])
@deprecation.deprecated_endpoints("OpError")
class OpError(Exception):
"""A generic error that is raised when TensorFlow execution fails.
Whenever possible, the session will raise a more specific subclass
of `OpError` from the `tf.errors` module.
"""
def __init__(self, node_def, op, message, error_code):
"""Creates a new `OpError` indicating that a particular op failed.
Args:
node_def: The `node_def_pb2.NodeDef` proto representing the op that
failed, if known; otherwise None.
op: The `ops.Operation` that failed, if known; otherwise None.
message: The message string describing the failure.
error_code: The `error_codes_pb2.Code` describing the error.
"""
super(OpError, self).__init__()
self._node_def = node_def
self._op = op
self._message = message
self._error_code = error_code
def __reduce__(self):
# Allow the subclasses to accept less arguments in their __init__.
init_argspec = tf_inspect.getargspec(self.__class__.__init__)
args = tuple(getattr(self, arg) for arg in init_argspec.args[1:])
return self.__class__, args
@property
def message(self):
"""The error message that describes the error."""
return self._message
@property
def op(self):
"""The operation that failed, if known.
*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`
or `Recv` op, there will be no corresponding
`tf.Operation`
object. In that case, this will return `None`, and you should
instead use the `tf.errors.OpError.node_def` to
discover information about the op.
Returns:
The `Operation` that failed, or None.
"""
return self._op
@property
def error_code(self):
"""The integer error code that describes the error."""
return self._error_code
@property
def node_def(self):
"""The `NodeDef` proto representing the op that failed."""
return self._node_def
def __str__(self):
if self._op is not None:
output = ["%s\n\nOriginal stack trace for %r:\n" % (self.message,
self._op.name,)]
curr_traceback_list = traceback.format_list(
_compact_stack_trace(self._op))
output.extend(curr_traceback_list)
# pylint: disable=protected-access
original_op = self._op._original_op
# pylint: enable=protected-access
while original_op is not None:
output.append(
"\n...which was originally created as op %r, defined at:\n"
% (original_op.name,))
prev_traceback_list = curr_traceback_list
curr_traceback_list = traceback.format_list(
_compact_stack_trace(original_op))
# Attempt to elide large common subsequences of the subsequent
# stack traces.
#
# TODO(mrry): Consider computing the actual longest common subsequence.
is_eliding = False
elide_count = 0
last_elided_line = None
for line, line_in_prev in zip(curr_traceback_list, prev_traceback_list):
if line == line_in_prev:
if is_eliding:
elide_count += 1
last_elided_line = line
else:
output.append(line)
is_eliding = True
elide_count = 0
else:
if is_eliding:
if elide_count > 0:
output.extend(
["[elided %d identical lines from previous traceback]\n"
% (elide_count - 1,), last_elided_line])
is_eliding = False
output.extend(line)
# pylint: disable=protected-access
original_op = original_op._original_op
# pylint: enable=protected-access
return "".join(output)
else:
return self.message
OK = error_codes_pb2.OK
tf_export("errors.OK").export_constant(__name__, "OK")
CANCELLED = error_codes_pb2.CANCELLED
tf_export("errors.CANCELLED").export_constant(__name__, "CANCELLED")
UNKNOWN = error_codes_pb2.UNKNOWN
tf_export("errors.UNKNOWN").export_constant(__name__, "UNKNOWN")
INVALID_ARGUMENT = error_codes_pb2.INVALID_ARGUMENT
tf_export("errors.INVALID_ARGUMENT").export_constant(__name__,
"INVALID_ARGUMENT")
DEADLINE_EXCEEDED = error_codes_pb2.DEADLINE_EXCEEDED
tf_export("errors.DEADLINE_EXCEEDED").export_constant(__name__,
"DEADLINE_EXCEEDED")
NOT_FOUND = error_codes_pb2.NOT_FOUND
tf_export("errors.NOT_FOUND").export_constant(__name__, "NOT_FOUND")
ALREADY_EXISTS = error_codes_pb2.ALREADY_EXISTS
tf_export("errors.ALREADY_EXISTS").export_constant(__name__, "ALREADY_EXISTS")
PERMISSION_DENIED = error_codes_pb2.PERMISSION_DENIED
tf_export("errors.PERMISSION_DENIED").export_constant(__name__,
"PERMISSION_DENIED")
UNAUTHENTICATED = error_codes_pb2.UNAUTHENTICATED
tf_export("errors.UNAUTHENTICATED").export_constant(__name__, "UNAUTHENTICATED")
RESOURCE_EXHAUSTED = error_codes_pb2.RESOURCE_EXHAUSTED
tf_export("errors.RESOURCE_EXHAUSTED").export_constant(__name__,
"RESOURCE_EXHAUSTED")
FAILED_PRECONDITION = error_codes_pb2.FAILED_PRECONDITION
tf_export("errors.FAILED_PRECONDITION").export_constant(__name__,
"FAILED_PRECONDITION")
ABORTED = error_codes_pb2.ABORTED
tf_export("errors.ABORTED").export_constant(__name__, "ABORTED")
OUT_OF_RANGE = error_codes_pb2.OUT_OF_RANGE
tf_export("errors.OUT_OF_RANGE").export_constant(__name__, "OUT_OF_RANGE")
UNIMPLEMENTED = error_codes_pb2.UNIMPLEMENTED
tf_export("errors.UNIMPLEMENTED").export_constant(__name__, "UNIMPLEMENTED")
INTERNAL = error_codes_pb2.INTERNAL
tf_export("errors.INTERNAL").export_constant(__name__, "INTERNAL")
UNAVAILABLE = error_codes_pb2.UNAVAILABLE
tf_export("errors.UNAVAILABLE").export_constant(__name__, "UNAVAILABLE")
DATA_LOSS = error_codes_pb2.DATA_LOSS
tf_export("errors.DATA_LOSS").export_constant(__name__, "DATA_LOSS")
# pylint: disable=line-too-long
@tf_export("errors.CancelledError")
class CancelledError(OpError):
"""Raised when an operation or step is cancelled.
For example, a long-running operation (e.g.
`tf.QueueBase.enqueue` may be
cancelled by running another operation (e.g.
`tf.QueueBase.close`,
or by `tf.Session.close`.
A step that is running such a long-running operation will fail by raising
`CancelledError`.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `CancelledError`."""
super(CancelledError, self).__init__(node_def, op, message, CANCELLED)
# pylint: enable=line-too-long
@tf_export("errors.UnknownError")
class UnknownError(OpError):
"""Unknown error.
An example of where this error may be returned is if a Status value
received from another address space belongs to an error-space that
is not known to this address space. Also errors raised by APIs that
do not return enough error information may be converted to this
error.
@@__init__
"""
def __init__(self, node_def, op, message, error_code=UNKNOWN):
"""Creates an `UnknownError`."""
super(UnknownError, self).__init__(node_def, op, message, error_code)
@tf_export("errors.InvalidArgumentError")
class InvalidArgumentError(OpError):
"""Raised when an operation receives an invalid argument.
This may occur, for example, if an operation is receives an input
tensor that has an invalid value or shape. For example, the
`tf.matmul` op will raise this
error if it receives an input that is not a matrix, and the
`tf.reshape` op will raise
this error if the new shape does not match the number of elements in the input
tensor.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InvalidArgumentError`."""
super(InvalidArgumentError, self).__init__(node_def, op, message,
INVALID_ARGUMENT)
@tf_export("errors.DeadlineExceededError")
class DeadlineExceededError(OpError):
"""Raised when a deadline expires before an operation could complete.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DeadlineExceededError`."""
super(DeadlineExceededError, self).__init__(node_def, op, message,
DEADLINE_EXCEEDED)
@tf_export("errors.NotFoundError")
class NotFoundError(OpError):
"""Raised when a requested entity (e.g., a file or directory) was not found.
For example, running the
`tf.WholeFileReader.read`
operation could raise `NotFoundError` if it receives the name of a file that
does not exist.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `NotFoundError`."""
super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND)
@tf_export("errors.AlreadyExistsError")
class AlreadyExistsError(OpError):
"""Raised when an entity that we attempted to create already exists.
For example, running an operation that saves a file
(e.g. `tf.train.Saver.save`)
could potentially raise this exception if an explicit filename for an
existing file was passed.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AlreadyExistsError`."""
super(AlreadyExistsError, self).__init__(node_def, op, message,
ALREADY_EXISTS)
@tf_export("errors.PermissionDeniedError")
class PermissionDeniedError(OpError):
"""Raised when the caller does not have permission to run an operation.
For example, running the
`tf.WholeFileReader.read`
operation could raise `PermissionDeniedError` if it receives the name of a
file for which the user does not have the read file permission.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `PermissionDeniedError`."""
super(PermissionDeniedError, self).__init__(node_def, op, message,
PERMISSION_DENIED)
@tf_export("errors.UnauthenticatedError")
class UnauthenticatedError(OpError):
"""The request does not have valid authentication credentials.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnauthenticatedError`."""
super(UnauthenticatedError, self).__init__(node_def, op, message,
UNAUTHENTICATED)
@tf_export("errors.ResourceExhaustedError")
class ResourceExhaustedError(OpError):
"""Some resource has been exhausted.
For example, this error might be raised if a per-user quota is
exhausted, or perhaps the entire file system is out of space.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `ResourceExhaustedError`."""
super(ResourceExhaustedError, self).__init__(node_def, op, message,
RESOURCE_EXHAUSTED)
@tf_export("errors.FailedPreconditionError")
class FailedPreconditionError(OpError):
"""Operation was rejected because the system is not in a state to execute it.
This exception is most commonly raised when running an operation
that reads a `tf.Variable`
before it has been initialized.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `FailedPreconditionError`."""
super(FailedPreconditionError, self).__init__(node_def, op, message,
FAILED_PRECONDITION)
@tf_export("errors.AbortedError")
class AbortedError(OpError):
"""The operation was aborted, typically due to a concurrent action.
For example, running a
`tf.QueueBase.enqueue`
operation may raise `AbortedError` if a
`tf.QueueBase.close` operation
previously ran.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AbortedError`."""
super(AbortedError, self).__init__(node_def, op, message, ABORTED)
@tf_export("errors.OutOfRangeError")
class OutOfRangeError(OpError):
"""Raised when an operation iterates past the valid input range.
This exception is raised in "end-of-file" conditions, such as when a
`tf.QueueBase.dequeue`
operation is blocked on an empty queue, and a
`tf.QueueBase.close`
operation executes.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `OutOfRangeError`."""
super(OutOfRangeError, self).__init__(node_def, op, message,
OUT_OF_RANGE)
@tf_export("errors.UnimplementedError")
class UnimplementedError(OpError):
"""Raised when an operation has not been implemented.
Some operations may raise this error when passed otherwise-valid
arguments that it does not currently support. For example, running
the `tf.nn.max_pool2d` operation
would raise this error if pooling was requested on the batch dimension,
because this is not yet supported.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnimplementedError`."""
super(UnimplementedError, self).__init__(node_def, op, message,
UNIMPLEMENTED)
@tf_export("errors.InternalError")
class InternalError(OpError):
"""Raised when the system experiences an internal error.
This exception is raised when some invariant expected by the runtime
has been broken. Catching this exception is not recommended.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InternalError`."""
super(InternalError, self).__init__(node_def, op, message, INTERNAL)
@tf_export("errors.UnavailableError")
class UnavailableError(OpError):
"""Raised when the runtime is currently unavailable.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnavailableError`."""
super(UnavailableError, self).__init__(node_def, op, message,
UNAVAILABLE)
@tf_export("errors.DataLossError")
class DataLossError(OpError):
"""Raised when unrecoverable data loss or corruption is encountered.
For example, this may be raised by running a
`tf.WholeFileReader.read`
operation, if the file is truncated while it is being read.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DataLossError`."""
super(DataLossError, self).__init__(node_def, op, message, DATA_LOSS)
_CODE_TO_EXCEPTION_CLASS = {
CANCELLED: CancelledError,
UNKNOWN: UnknownError,
INVALID_ARGUMENT: InvalidArgumentError,
DEADLINE_EXCEEDED: DeadlineExceededError,
NOT_FOUND: NotFoundError,
ALREADY_EXISTS: AlreadyExistsError,
PERMISSION_DENIED: PermissionDeniedError,
UNAUTHENTICATED: UnauthenticatedError,
RESOURCE_EXHAUSTED: ResourceExhaustedError,
FAILED_PRECONDITION: FailedPreconditionError,
ABORTED: AbortedError,
OUT_OF_RANGE: OutOfRangeError,
UNIMPLEMENTED: UnimplementedError,
INTERNAL: InternalError,
UNAVAILABLE: UnavailableError,
DATA_LOSS: DataLossError,
}
c_api.PyExceptionRegistry_Init(_CODE_TO_EXCEPTION_CLASS)
_EXCEPTION_CLASS_TO_CODE = {
class_: code for code, class_ in _CODE_TO_EXCEPTION_CLASS.items()}
@tf_export(v1=["errors.exception_type_from_error_code"])
def exception_type_from_error_code(error_code):
return _CODE_TO_EXCEPTION_CLASS[error_code]
@tf_export(v1=["errors.error_code_from_exception_type"])
def error_code_from_exception_type(cls):
try:
return _EXCEPTION_CLASS_TO_CODE[cls]
except KeyError:
warnings.warn("Unknown class exception")
return UnknownError(None, None, "Unknown class exception", None)
def _make_specific_exception(node_def, op, message, error_code):
try:
exc_type = exception_type_from_error_code(error_code)
return exc_type(node_def, op, message)
except KeyError:
warnings.warn("Unknown error code: %d" % error_code)
return UnknownError(node_def, op, message, error_code)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
# TODO(b/77295559): expand use of TF_Status* SWIG typemap and deprecate this.
@tf_export(v1=["errors.raise_exception_on_not_ok_status"]) # pylint: disable=invalid-name
class raise_exception_on_not_ok_status(object):
"""Context manager to check for C API status."""
def __enter__(self):
self.status = c_api_util.ScopedTFStatus()
return self.status.status
def __exit__(self, type_arg, value_arg, traceback_arg):
try:
if c_api.TF_GetCode(self.status.status) != 0:
raise _make_specific_exception(
None, None,
compat.as_text(c_api.TF_Message(self.status.status)),
c_api.TF_GetCode(self.status.status))
# Delete the underlying status object from memory otherwise it stays alive
# as there is a reference to status from this from the traceback due to
# raise.
finally:
del self.status
return False # False values do not suppress exceptions
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/errors_impl.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global registry for OpDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import op_def_pb2
_registered_ops = {}
def register_op_list(op_list):
"""Register all the ops in an op_def_pb2.OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
if op_def.name in _registered_ops:
if _registered_ops[op_def.name] != op_def:
raise ValueError(
"Registered op_def for %s (%s) not equal to op_def to register (%s)"
% (op_def.name, _registered_ops[op_def.name], op_def))
else:
_registered_ops[op_def.name] = op_def
def get_registered_ops():
"""Returns a dictionary mapping names to OpDefs."""
return _registered_ops
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/op_def_registry.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import registry
from tensorflow.python.platform import test
def bar():
pass
class RegistryTest(test.TestCase, parameterized.TestCase):
class Foo(object):
pass
# Test the registry basics on both classes (Foo) and functions (bar).
@parameterized.parameters([Foo, bar])
def testRegistryBasics(self, candidate):
myreg = registry.Registry('testRegistry')
with self.assertRaises(LookupError):
myreg.lookup('testKey')
myreg.register(candidate)
self.assertEqual(myreg.lookup(candidate.__name__), candidate)
myreg.register(candidate, 'testKey')
self.assertEqual(myreg.lookup('testKey'), candidate)
self.assertEqual(
sorted(myreg.list()), sorted(['testKey', candidate.__name__]))
def testDuplicate(self):
myreg = registry.Registry('testbar')
myreg.register(bar, 'Bar')
with self.assertRaisesRegexp(
KeyError, r'Registering two testbar with name \'Bar\'! '
r'\(Previous registration was in [^ ]+ .*.py:[0-9]+\)'):
myreg.register(bar, 'Bar')
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/registry_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Type specifications for TensorFlow APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
# Use LazyLoader to avoid circular dependencies.
tensor_spec = LazyLoader(
"tensor_spec", globals(),
"tensorflow.python.framework.tensor_spec")
ops = LazyLoader(
"ops", globals(),
"tensorflow.python.framework.ops")
@tf_export("TypeSpec", v1=["TypeSpec", "data.experimental.Structure"])
@six.add_metaclass(abc.ABCMeta)
class TypeSpec(object):
"""Specifies a TensorFlow value type.
A `tf.TypeSpec` provides metadata describing an object accepted or returned
by TensorFlow APIs. Concrete subclasses, such as `tf.TensorSpec` and
`tf.RaggedTensorSpec`, are used to describe different value types.
For example, `tf.function`'s `input_signature` argument accepts a list
(or nested structure) of `TypeSpec`s.
Creating new subclasses of TypeSpec (outside of TensorFlow core) is not
currently supported. In particular, we may make breaking changes to the
private methods and properties defined by this base class.
"""
# === Subclassing ===
#
# Each `TypeSpec` subclass must define:
#
# * A "component encoding" for values.
# * A "serialization" for types.
#
# The component encoding for a value is a nested structure of `tf.Tensor`
# or `CompositeTensor` that can be used by the `TypeSpec` to reconstruct
# the value. Each individual `TypeSpec` must use the same nested structure
# for all values -- this structure is defined by the `component_specs`
# attribute. Decomposing values into components, and reconstructing them
# from those components, should be inexpensive. In particular, it should
# *not* require any TensorFlow ops.
#
# The serialization for a `TypeSpec` is a nested tuple of values that can
# be used to reconstruct the `TypeSpec`. See the documentation for
# `_serialize()` for more information.
__slots__ = []
@abc.abstractproperty
def value_type(self):
"""The Python type for values that are compatible with this TypeSpec."""
raise NotImplementedError("%s.value_type" % type(self).__name__)
def is_compatible_with(self, spec_or_value):
"""Returns true if `spec_or_value` is compatible with this TypeSpec."""
# === Subclassing ===
# If not overridden by subclasses, the default behavior is to convert
# `spec_or_value` to a `TypeSpec` (if it isn't already); and then to
# consider two `TypeSpec`s compatible if they have the same type, and
# the values returned by `_serialize` are compatible (where
# `tf.TensorShape`, `tf.TensorSpec`, and `tf.DType` are checked for
# compatibility using their `is_compatible_with` method; and all other
# types are considered compatible if they are equal).
if not isinstance(spec_or_value, TypeSpec):
spec_or_value = type_spec_from_value(spec_or_value)
if type(self) is not type(spec_or_value):
return False
return self.__is_compatible(self._serialize(),
spec_or_value._serialize()) # pylint: disable=protected-access
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# === Subclassing ===
# If not overridden by a subclass, the default behavior is to raise a
# `ValueError` if `self` and `other` have different types, or if their type
# serializations differ by anything other than `TensorShape`s. Otherwise,
# the two type serializations are combined (using
# `most_specific_compatible_shape` to combine `TensorShape`s), and the
# result is used to construct and return a new `TypeSpec`.
if type(self) is not type(other):
raise ValueError("No TypeSpec is compatible with both %s and %s" %
(self, other))
merged = self.__most_specific_compatible_type_serialization(
self._serialize(), other._serialize()) # pylint: disable=protected-access
return self._deserialize(merged)
# === Component encoding for values ===
@abc.abstractmethod
def _to_components(self, value):
"""Encodes `value` as a nested structure of `Tensor` or `CompositeTensor`.
Args:
value: A value compatible with this `TypeSpec`. (Caller is responsible
for ensuring compatibility.)
Returns:
A nested structure of `tf.Tensor` or `tf.CompositeTensor` compatible with
`self._component_specs`, which can be used to reconstruct `value`.
"""
# === Subclassing ===
# This method must be inexpensive (do not call TF ops).
raise NotImplementedError("%s._to_components()" % type(self).__name__)
@abc.abstractmethod
def _from_components(self, components):
"""Reconstructs a value from a nested structure of Tensor/CompositeTensor.
Args:
components: A nested structure of `tf.Tensor` or `tf.CompositeTensor`,
compatible with `self._component_specs`. (Caller is repsonsible for
ensuring compatibility.)
Returns:
A value that is compatible with this `TypeSpec`.
"""
# === Subclassing ===
# This method must be inexpensive (do not call TF ops).
raise NotImplementedError("%s._from_components()" % type(self).__name__)
@abc.abstractproperty
def _component_specs(self):
"""A nested structure of TypeSpecs for this type's components.
Returns:
A nested structure describing the component encodings that are returned
by this TypeSpec's `_to_components` method. In particular, for a
TypeSpec `spec` and a compatible value `value`:
```
nest.map_structure(lambda t, c: assert t.is_compatible_with(c),
spec._component_specs, spec._to_components(value))
```
"""
raise NotImplementedError("%s._component_specs()" % type(self).__name__)
# === Tensor list encoding for values ===
def _to_tensor_list(self, value):
"""Encodes `value` as a flat list of `tf.Tensor`.
By default, this just flattens `self._to_components(value)` using
`nest.flatten`. However, subclasses may override this to return a
different tensor encoding for values. In particular, some subclasses
of `BatchableTypeSpec` override this method to return a "boxed" encoding
for values, which then can be batched or unbatched. See
`BatchableTypeSpec` for more details.
Args:
value: A value with compatible this `TypeSpec`. (Caller is responsible
for ensuring compatibility.)
Returns:
A list of `tf.Tensor`, compatible with `self._flat_tensor_specs`, which
can be used to reconstruct `value`.
"""
return nest.flatten(self._to_components(value), expand_composites=True)
def _from_tensor_list(self, tensor_list):
"""Reconstructs a value from a flat list of `tf.Tensor`.
Args:
tensor_list: A flat list of `tf.Tensor`, compatible with
`self._flat_tensor_specs`.
Returns:
A value that is compatible with this `TypeSpec`.
Raises:
ValueError: If `tensor_list` is not compatible with
`self._flat_tensor_specs`.
"""
self.__check_tensor_list(tensor_list)
return self._from_compatible_tensor_list(tensor_list)
def _from_compatible_tensor_list(self, tensor_list):
"""Reconstructs a value from a compatible flat list of `tf.Tensor`.
Args:
tensor_list: A flat list of `tf.Tensor`, compatible with
`self._flat_tensor_specs`. (Caller is responsible for ensuring
compatibility.)
Returns:
A value that is compatible with this `TypeSpec`.
"""
return self._from_components(nest.pack_sequence_as(
self._component_specs, tensor_list, expand_composites=True))
@property
def _flat_tensor_specs(self):
"""A list of TensorSpecs compatible with self._to_tensor_list(v)."""
return nest.flatten(self._component_specs, expand_composites=True)
# === Serialization for types ===
@abc.abstractmethod
def _serialize(self):
"""Returns a nested tuple containing the state of this TypeSpec.
The serialization may contain the following value types: boolean,
integer, string, float, None, `TensorSpec`, `tf.TensorShape`, `tf.DType`,
`np.ndarray`, `TypeSpec`, and nested tuples, namedtuples, dicts, and
OrderedDicts of any of the above.
This method is used to provide default definitions for: equality
testing (__eq__, __ne__), hashing (__hash__), pickling (__reduce__),
string representation (__repr__), `self.is_compatible_with()`,
`self.most_specific_compatible_type()`, and protobuf serialization
(e.g. TensorInfo and StructuredValue).
"""
raise NotImplementedError("%s._serialize()" % type(self).__name__)
@classmethod
def _deserialize(cls, serialization):
"""Reconstructs a TypeSpec from a value returned by `serialize`."""
return cls(*serialization)
# === Operators ===
def __eq__(self, other):
# pylint: disable=protected-access
return (type(other) is type(self) and
self.__get_cmp_key() == other.__get_cmp_key())
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.__get_cmp_key())
def __reduce__(self):
return type(self), self._serialize()
def __repr__(self):
return "%s%r" % (type(self).__name__, self._serialize())
# === Legacy Output ===
# TODO(b/133606651) Document and/or deprecate the legacy_output methods.
# (These are used by tf.data.)
def _to_legacy_output_types(self):
raise NotImplementedError("%s._to_legacy_output_types()" %
type(self).__name__)
def _to_legacy_output_shapes(self):
raise NotImplementedError("%s._to_legacy_output_shapes()" %
type(self).__name__)
def _to_legacy_output_classes(self):
return self.value_type
# === Private Helper Methods ===
def __check_tensor_list(self, tensor_list):
expected = self._flat_tensor_specs
specs = [type_spec_from_value(t) for t in tensor_list]
if len(specs) != len(expected):
raise ValueError("Incompatible input: wrong number of tensors")
for i, (s1, s2) in enumerate(zip(specs, expected)):
if not s1.is_compatible_with(s2):
raise ValueError("Incompatible input: tensor %d (%s) is incompatible "
"with %s" % (i, tensor_list[i], s2))
def __get_cmp_key(self):
"""Returns a hashable eq-comparable key for `self`."""
# TODO(b/133606651): Decide whether to cache this value.
return (type(self), self.__make_cmp_key(self._serialize()))
def __make_cmp_key(self, value):
"""Converts `value` to a hashable key."""
if isinstance(value, (int, float, bool, dtypes.DType, TypeSpec)):
return value
if isinstance(value, compat.bytes_or_text_types):
return value
if value is None:
return value
if isinstance(value, dict):
return tuple([
tuple([self.__make_cmp_key(key),
self.__make_cmp_key(value[key])])
for key in sorted(value.keys())
])
if isinstance(value, tuple):
return tuple([self.__make_cmp_key(v) for v in value])
if isinstance(value, tensor_shape.TensorShape):
if value.ndims is None:
# Note: we include a type object in the tuple, to ensure we can't get
# false-positive matches (since users can't include type objects).
return (tensor_shape.TensorShape, None)
return (tensor_shape.TensorShape, tuple(value.as_list()))
if isinstance(value, np.ndarray):
return (np.ndarray, value.shape,
TypeSpec.__nested_list_to_tuple(value.tolist()))
raise ValueError("Unsupported value type %s returned by "
"%s._serialize" %
(type(value).__name__, type(self).__name__))
@staticmethod
def __nested_list_to_tuple(value):
"""Converts a nested list to a corresponding nested tuple."""
if isinstance(value, list):
return tuple(TypeSpec.__nested_list_to_tuple(v) for v in value)
return value
@staticmethod
def __is_compatible(a, b):
"""Returns true if the given type serializations compatible."""
if type(a) is not type(b):
return False
if isinstance(a, tuple):
return (len(a) == len(b) and
all(TypeSpec.__is_compatible(x, y) for (x, y) in zip(a, b)))
if isinstance(a, dict):
return (len(a) == len(b) and sorted(a.keys()) == sorted(b.keys()) and all(
TypeSpec.__is_compatible(a[k], b[k]) for k in a.keys()))
if isinstance(a, (TypeSpec, tensor_shape.TensorShape, dtypes.DType)):
return a.is_compatible_with(b)
return a == b
@staticmethod
def __most_specific_compatible_type_serialization(a, b):
"""Helper for most_specific_compatible_type.
Combines two type serializations as follows:
* If they are both tuples of the same length, then recursively combine
the respective tuple elements.
* If they are both dicts with the same keys, then recursively combine
the respective dict elements.
* If they are both TypeSpecs, then combine using
TypeSpec.most_specific_comptible_type.
* If they are both TensorShapes, then combine using
TensorShape.most_specific_compatible_shape.
* If they are both TensorSpecs with the same dtype, then combine using
TensorShape.most_specific_compatible_shape to combine shapes.
* If they are equal, then return a.
* If none of the above, then raise a ValueError.
Args:
a: A serialized TypeSpec or nested component from a serialized TypeSpec.
b: A serialized TypeSpec or nested component from a serialized TypeSpec.
Returns:
A value with the same type and structure as `a` and `b`.
Raises:
ValueError: If `a` and `b` are incompatible.
"""
if type(a) is not type(b):
raise ValueError("Types are not compatible: %r vs %r" % (a, b))
if isinstance(a, tuple):
if len(a) != len(b):
raise ValueError("Types are not compatible: %r vs %r" % (a, b))
return tuple(TypeSpec.__most_specific_compatible_type_serialization(x, y)
for (x, y) in zip(a, b))
if isinstance(a, dict):
a_keys, b_keys = sorted(a.keys()), sorted(b.keys())
if len(a) != len(b) or a_keys != b_keys:
raise ValueError("Types are not compatible: %r vs %r" % (a, b))
return {
k: TypeSpec.__most_specific_compatible_type_serialization(a[k], b[k])
for k in a_keys
}
if isinstance(a, tensor_shape.TensorShape):
return a.most_specific_compatible_shape(b)
if isinstance(a, list):
raise AssertionError("_serialize() should not return list values.")
if isinstance(a, TypeSpec):
return a.most_specific_compatible_type(b)
if a != b:
raise ValueError("Types are not compatible: %r vs %r" % (a, b))
return a
class BatchableTypeSpec(TypeSpec):
"""TypeSpec with a batchable tensor encoding.
The batchable tensor encoding is a list of `tf.Tensor`s that supports
batching and unbatching. In particular, stacking (or unstacking)
values with the same `TypeSpec` must be equivalent to stacking (or
unstacking) each of their tensor lists. Unlike the component encoding
(returned by `self._to_components)`, the batchable tensor encoding
may require using encoding/decoding ops.
If a subclass's batchable tensor encoding is not simply a flattened version
of the component encoding, then the subclass must override `_to_tensor_list`,
`_from_tensor_list`, and _flat_tensor_specs`.
"""
__slots__ = []
@abc.abstractmethod
def _batch(self, batch_size):
"""Returns a TypeSpec representing a batch of objects with this TypeSpec.
Args:
batch_size: An `int` representing the number of elements in a batch,
or `None` if the batch size may vary.
Returns:
A `TypeSpec` representing a batch of objects with this TypeSpec.
"""
raise NotImplementedError("%s._batch" % type(self).__name__)
@abc.abstractmethod
def _unbatch(self):
"""Returns a TypeSpec representing a single element this TypeSpec.
Returns:
A `TypeSpec` representing a single element of objects with this TypeSpec.
"""
raise NotImplementedError("%s._unbatch" % type(self).__name__)
def _to_batched_tensor_list(self, value):
"""Returns a tensor list encoding for value with rank>0."""
tensor_list = self._to_tensor_list(value)
if any(t.shape.ndims == 0 for t in tensor_list):
raise ValueError("Value %s has insufficient rank for batching." % value)
return tensor_list
def type_spec_from_value(value):
"""Returns a `TypeSpec` that represents the given `value`.
Args:
value: A value that can be accepted or returned by TensorFlow APIs.
Returns:
A `TypeSpec` that is compatible with `value`.
Raises:
TypeError: If a TypeSpec cannot be built for `value`, because its type
is not supported.
"""
spec = _type_spec_from_value(value)
if spec is not None:
return spec
# Fallback: try converting value to a tensor.
try:
tensor = ops.convert_to_tensor(value)
spec = _type_spec_from_value(tensor)
if spec is not None:
return spec
except (ValueError, TypeError) as e:
logging.vlog(
3, "Failed to convert %r to tensor: %s" % (type(value).__name__, e))
raise TypeError("Could not build a TypeSpec for %r with type %s" %
(value, type(value).__name__))
def _type_spec_from_value(value):
"""Returns a `TypeSpec` that represents the given `value`."""
if isinstance(value, ops.Tensor):
# Note: we do not include Tensor names when constructing TypeSpecs.
return tensor_spec.TensorSpec(value.shape, value.dtype)
if isinstance(value, composite_tensor.CompositeTensor):
return value._type_spec # pylint: disable=protected-access
# If `value` is a list and all of its elements can be represented by the same
# batchable type spec, then we can represent the entire list using a single
# type spec that captures the type accurately (unlike the `convert_to_tensor`
# fallback).
if isinstance(value, list) and value:
subspecs = [_type_spec_from_value(v) for v in value]
if isinstance(subspecs[0], BatchableTypeSpec):
merged_subspec = subspecs[0]
try:
for subspec in subspecs[1:]:
merged_subspec = merged_subspec.most_specific_compatible_type(subspec)
return merged_subspec._batch(len(subspecs)) # pylint: disable=protected-access
except (ValueError, TypeError):
pass # incompatible subspecs
for entry in reversed(_TYPE_CONVERSION_FUNCTION_REGISTRY):
type_object, converter_fn, allow_subclass = entry
if ((type(value) is type_object) or # pylint: disable=unidiomatic-typecheck
(allow_subclass and isinstance(value, type_object))):
return converter_fn(value)
return None
_TYPE_CONVERSION_FUNCTION_REGISTRY = []
def register_type_spec_from_value_converter(type_object, converter_fn,
allow_subclass=False):
"""Registers a function for converting values with a given type to TypeSpecs.
If multiple registered `type_object`s match a value, then the most recent
registration takes precedence. Custom converters should not be defined for
`CompositeTensor`s; use `CompositeTensor._type_spec` instead.
Args:
type_object: A Python `type` object representing the type of values
accepted by `converter_fn`.
converter_fn: A function that takes one argument (an instance of the
type represented by `type_object`) and returns a `TypeSpec`.
allow_subclass: If true, then use `isinstance(value, type_object)` to
check for matches. If false, then use `type(value) is type_object`.
"""
_, type_object = tf_decorator.unwrap(type_object)
_TYPE_CONVERSION_FUNCTION_REGISTRY.append(
(type_object, converter_fn, allow_subclass))
pywrap_tensorflow.RegisterType("TypeSpec", TypeSpec)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/type_spec.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests enabling eager execution at process level."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.platform import googletest
class OpsEnableAndDisableEagerTest(googletest.TestCase):
def setUp(self):
# test for enable eager test
ops.enable_eager_execution()
self.assertTrue(context.executing_eagerly())
# Calling enable eager execution a second time should not cause an error.
ops.enable_eager_execution()
self.assertTrue(context.executing_eagerly())
def tearDown(self):
# test for disable eager test
ops.disable_eager_execution()
self.assertFalse(context.executing_eagerly())
# Calling disable eager execution a second time should not cause an error.
ops.disable_eager_execution()
self.assertFalse(context.executing_eagerly())
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/ops_enable_eager_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Protobuf related tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class ProtoTest(test.TestCase):
# TODO(vrv): re-enable this test once we figure out how this can
# pass the pip install test (where the user is expected to have
# protobuf installed).
def _testLargeProto(self):
# create a constant of size > 64MB.
a = constant_op.constant(np.zeros([1024, 1024, 17]))
# Serialize the resulting graph def.
gdef = a.op.graph.as_graph_def()
serialized = gdef.SerializeToString()
unserialized = ops.Graph().as_graph_def()
# Deserialize back. Protobuf python library should support
# protos larger than 64MB.
unserialized.ParseFromString(serialized)
self.assertProtoEquals(unserialized, gdef)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/proto_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to represent a device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.tf_export import tf_export
# ==============================================================================
# == Global Implementation Details =============================================
# ==============================================================================
_STRING_TO_COMPONENTS_CACHE = {}
_COMPONENTS_TO_STRING_CACHE = {}
def _as_str_or_none(inp):
return None if inp is None else str(inp)
def _as_int_or_none(inp):
return None if inp is None else int(inp)
def _as_device_str_or_none(device_type):
# For backwards compatibility only, we support lowercase variants of
# cpu and gpu but turn them into uppercase here.
if device_type in ("cpu", "gpu"):
return device_type.upper()
return _as_str_or_none(device_type)
@tf_export("DeviceSpec", v1=[])
class DeviceSpecV2(object):
"""Represents a (possibly partial) specification for a TensorFlow device.
`DeviceSpec`s are used throughout TensorFlow to describe where state is stored
and computations occur. Using `DeviceSpec` allows you to parse device spec
strings to verify their validity, merge them or compose them programmatically.
Example:
```python
# Place the operations on device "GPU:0" in the "ps" job.
device_spec = DeviceSpec(job="ps", device_type="GPU", device_index=0)
with tf.device(device_spec):
# Both my_var and squared_var will be placed on /job:ps/device:GPU:0.
my_var = tf.Variable(..., name="my_variable")
squared_var = tf.square(my_var)
```
If a `DeviceSpec` is partially specified, it will be merged with other
`DeviceSpec`s according to the scope in which it is defined. `DeviceSpec`
components defined in inner scopes take precedence over those defined in
outer scopes.
```python
with tf.device(DeviceSpec(job="train", )):
with tf.device(DeviceSpec(job="ps", device_type="GPU", device_index=0):
# Nodes created here will be assigned to /job:ps/device:GPU:0.
with tf.device(DeviceSpec(device_type="GPU", device_index=1):
# Nodes created here will be assigned to /job:train/device:GPU:1.
```
A `DeviceSpec` consists of 5 components -- each of
which is optionally specified:
* Job: The job name.
* Replica: The replica index.
* Task: The task index.
* Device type: The device type string (e.g. "CPU" or "GPU").
* Device index: The device index.
"""
__slots__ = ("_job", "_replica", "_task", "_device_type", "_device_index",
"_as_string", "_hash")
def __init__(self, job=None, replica=None, task=None, device_type=None,
device_index=None):
"""Create a new `DeviceSpec` object.
Args:
job: string. Optional job name.
replica: int. Optional replica index.
task: int. Optional task index.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left
unspecified, device represents 'any' device_index.
"""
self._job = _as_str_or_none(job)
self._replica = _as_int_or_none(replica)
self._task = _as_int_or_none(task)
self._device_type = _as_device_str_or_none(device_type)
self._device_index = _as_int_or_none(device_index)
self._as_string = self._components_to_string(
job=self._job, replica=self._replica, task=self._task,
device_type=self._device_type, device_index=self._device_index)
self._hash = hash(self.to_string())
def to_string(self):
"""Return a string representation of this `DeviceSpec`.
Returns:
a string of the form
/job:<name>/replica:<id>/task:<id>/device:<device_type>:<id>.
"""
return self._as_string
@classmethod
def from_string(cls, spec):
"""Construct a `DeviceSpec` from a string.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
A DeviceSpec.
"""
return cls(*cls._string_to_components(spec))
def parse_from_string(self, spec):
"""Parse a `DeviceSpec` name into its components.
2.x behavior change:
In TensorFlow 1.x, this function mutates its own state and returns itself.
In 2.x, DeviceSpecs are immutable, and this function will return a
DeviceSpec which contains the spec.
Recommended:
```
# my_spec and my_updated_spec are unrelated.
my_spec = tf.DeviceSpec.from_string("/CPU:0")
my_updated_spec = tf.DeviceSpec.from_string("/GPU:0")
with tf.device(my_updated_spec):
...
```
Will work in 1.x and 2.x (though deprecated in 2.x):
```
my_spec = tf.DeviceSpec.from_string("/CPU:0")
my_updated_spec = my_spec.parse_from_string("/GPU:0")
with tf.device(my_updated_spec):
...
```
Will NOT work in 2.x:
```
my_spec = tf.DeviceSpec.from_string("/CPU:0")
my_spec.parse_from_string("/GPU:0") # <== Will not update my_spec
with tf.device(my_spec):
...
```
In general, `DeviceSpec.from_string` should completely replace
`DeviceSpec.parse_from_string`, and `DeviceSpec.replace` should
completely replace setting attributes directly.
Args:
spec: an optional string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
The `DeviceSpec`.
Raises:
ValueError: if the spec was not valid.
"""
return self.from_string(spec)
def make_merged_spec(self, dev):
"""Returns a new DeviceSpec which incorporates `dev`.
When combining specs, `dev` will take precidence over the current spec.
So for instance:
```
first_spec = tf.DeviceSpec(job=0, device_type="CPU")
second_spec = tf.DeviceSpec(device_type="GPU")
combined_spec = first_spec.make_merged_spec(second_spec)
```
is equivalent to:
```
combined_spec = tf.DeviceSpec(job=0, device_type="GPU")
```
Args:
dev: a `DeviceSpec`
Returns:
A new `DeviceSpec` which combines `self` and `dev`
"""
return self.__class__(*self._get_combined_properties(dev))
def replace(self, **kwargs):
"""Convenience method for making a new DeviceSpec by overriding fields.
For instance:
```
my_spec = DeviceSpec=(job="my_job", device="CPU")
my_updated_spec = my_spec.replace(device="GPU")
my_other_spec = my_spec.replace(device=None)
```
Args:
**kwargs: This method takes the same args as the DeviceSpec constructor
Returns:
A DeviceSpec with the fields specified in kwargs overridden.
"""
init_kwargs = dict(
job=self.job, replica=self.replica, task=self.task,
device_type=self.device_type, device_index=self.device_index)
# Explicitly provided kwargs take precidence.
init_kwargs.update(kwargs)
return self.__class__(**init_kwargs)
@property
def job(self):
return self._job
@property
def replica(self):
return self._replica
@property
def task(self):
return self._task
@property
def device_type(self):
return self._device_type
@property
def device_index(self):
return self._device_index
def _get_combined_properties(self, dev):
"""Combine the current DeviceSpec with another DeviceSpec.
The combination of DeviceSpecs is will give priority to dev.
Args:
dev: a `DeviceSpec`
Returns:
A tuple of (job, replica, task, device_type, device_index) which
represents the combination of self and dev.
"""
return (
dev.job if dev.job is not None else self.job,
dev.replica if dev.replica is not None else self.replica,
dev.task if dev.task is not None else self.task,
dev.device_type if dev.device_type is not None else self.device_type,
dev.device_index if dev.device_index is not None else self.device_index,
)
@staticmethod
def _string_to_components(spec=None):
"""Stateless portion of device spec string parsing.
Args:
spec: An optional string specifying a device specification.
Returns:
The parsed components of `spec`. Note that the result of this function
must go through attribute setters of DeviceSpec, and should therefore NOT
be used directly.
"""
cached_result = _STRING_TO_COMPONENTS_CACHE.get(spec)
if cached_result is not None:
return cached_result
raw_spec = spec # keep a copy of the original to update the cache
job, replica, task, device_type, device_index = None, None, None, None, None
spec = spec or ""
splits = [x.split(":") for x in spec.split("/")]
for y in splits:
ly = len(y)
if y:
# NOTE(taylorrobie): these will go through setters later.
if ly == 2 and y[0] == "job":
job = y[1]
elif ly == 2 and y[0] == "replica":
replica = y[1]
elif ly == 2 and y[0] == "task":
task = y[1]
elif ((ly == 1 or ly == 2) and
((y[0].upper() == "GPU") or (y[0].upper() == "CPU"))):
if device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
device_type = y[0].upper()
if ly == 2 and y[1] != "*":
device_index = int(y[1])
elif ly == 3 and y[0] == "device":
if device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
device_type = y[1]
if y[2] != "*":
device_index = int(y[2])
elif ly and y[0] != "": # pylint: disable=g-explicit-bool-comparison
raise ValueError("Unknown attribute: '%s' in '%s'" % (y[0], spec))
output = (job, replica, task, device_type, device_index)
_STRING_TO_COMPONENTS_CACHE[raw_spec] = output
return output
@staticmethod
def _components_to_string(job, replica, task, device_type, device_index):
"""Stateless portion of `to_string` (separated to allow caching)."""
key = (job, replica, task, device_type, device_index)
cached_result = _COMPONENTS_TO_STRING_CACHE.get(key)
if cached_result is not None:
return cached_result
output = []
if job is not None:
output.append("/job:" + job)
if replica is not None:
output.append("/replica:" + str(replica))
if task is not None:
output.append("/task:" + str(task))
if device_type is not None:
device_index_string = "*"
if device_index is not None:
# Unlike the others, device_index is stored as an int.
device_index_string = str(device_index)
output.append("/device:%s:%s" % (device_type, device_index_string))
output = "".join(output)
_COMPONENTS_TO_STRING_CACHE[key] = output
return output
def __eq__(self, other):
"""Checks if the `other` DeviceSpec is same as the current instance, eg have
same value for all the internal fields.
Args:
other: Another DeviceSpec
Returns:
Return `True` if `other` is also a DeviceSpec instance and has same value
as the current instance.
Return `False` otherwise.
"""
return (isinstance(other, self.__class__) and
self.to_string() == other.to_string())
def __hash__(self):
return self._hash
@tf_export(v1=["DeviceSpec"]) # pylint: disable=missing-docstring
class DeviceSpecV1(DeviceSpecV2):
__doc__ = DeviceSpecV2.__doc__
__slots__ = DeviceSpecV2.__slots__
@DeviceSpecV2.job.setter
def job(self, job):
self._job = _as_str_or_none(job)
self._as_string, self._hash = None, None
@DeviceSpecV2.replica.setter
def replica(self, replica):
self._replica = _as_int_or_none(replica)
self._as_string, self._hash = None, None
@DeviceSpecV2.task.setter
def task(self, task):
self._task = _as_int_or_none(task)
self._as_string, self._hash = None, None
@DeviceSpecV2.device_type.setter
def device_type(self, device_type):
self._device_type = _as_device_str_or_none(device_type)
self._as_string, self._hash = None, None
@DeviceSpecV2.device_index.setter
def device_index(self, device_index):
self._device_index = _as_int_or_none(device_index)
self._as_string, self._hash = None, None
def __hash__(self):
if self._hash is None:
self._hash = hash(self.to_string())
return self._hash
def to_string(self):
if self._as_string is None:
self._as_string = self._components_to_string(
job=self.job, replica=self.replica, task=self.task,
device_type=self.device_type, device_index=self.device_index)
return self._as_string
def parse_from_string(self, spec):
(self.job, self.replica, self.task, self.device_type, self.device_index
) = self._string_to_components(spec)
return self
def merge_from(self, dev):
"""Merge the properties of "dev" into this `DeviceSpec`.
Note: Will be removed in TensorFlow 2.x since DeviceSpecs will become
immutable.
Args:
dev: a `DeviceSpec`.
"""
(self.job, self.replica, self.task, self.device_type, self.device_index
) = self._get_combined_properties(dev)
# Use parent class docstrings for public methods.
to_string.__doc__ = DeviceSpecV2.to_string.__doc__
parse_from_string.__doc__ = DeviceSpecV2.parse_from_string.__doc__
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/device_spec.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for c_api utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class ApiDefMapTest(test_util.TensorFlowTestCase):
def testApiDefMapOpNames(self):
api_def_map = c_api_util.ApiDefMap()
self.assertIn("Add", api_def_map.op_names())
def testApiDefMapGet(self):
api_def_map = c_api_util.ApiDefMap()
op_def = api_def_map.get_op_def("Add")
self.assertEqual(op_def.name, "Add")
api_def = api_def_map.get_api_def("Add")
self.assertEqual(api_def.graph_op_name, "Add")
def testApiDefMapPutThenGet(self):
api_def_map = c_api_util.ApiDefMap()
api_def_text = """
op {
graph_op_name: "Add"
summary: "Returns x + y element-wise."
description: <<END
*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
END
}
"""
api_def_map.put_api_def(api_def_text)
api_def = api_def_map.get_api_def("Add")
self.assertEqual(api_def.graph_op_name, "Add")
self.assertEqual(api_def.summary, "Returns x + y element-wise.")
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/c_api_util_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to manipulate a tensor graph in python.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.framework.graph_util_impl import convert_variables_to_constants
from tensorflow.python.framework.graph_util_impl import extract_sub_graph
from tensorflow.python.framework.graph_util_impl import must_run_on_cpu
from tensorflow.python.framework.graph_util_impl import remove_training_nodes
from tensorflow.python.framework.graph_util_impl import tensor_shape_from_node_def_name
# pylint: enable=unused-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/graph_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for querying registered kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import kernel_def_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.util import compat
def get_all_registered_kernels():
"""Returns a KernelList proto of all registered kernels.
"""
buf = c_api.TF_GetAllRegisteredKernels()
data = c_api.TF_GetBuffer(buf)
kernel_list = kernel_def_pb2.KernelList()
kernel_list.ParseFromString(compat.as_bytes(data))
return kernel_list
def get_registered_kernels_for_op(name):
"""Returns a KernelList proto of registered kernels for a given op.
Args:
name: A string representing the name of the op whose kernels to retrieve.
"""
buf = c_api.TF_GetRegisteredKernelsForOp(name)
data = c_api.TF_GetBuffer(buf)
kernel_list = kernel_def_pb2.KernelList()
kernel_list.ParseFromString(compat.as_bytes(data))
return kernel_list
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/kernels.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparse tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_TensorLike = tensor_like._TensorLike
_eval_using_default_session = ops._eval_using_default_session
_override_helper = ops._override_helper
# pylint: enable=protected-access
@tf_export("sparse.SparseTensor", "SparseTensor")
class SparseTensor(_TensorLike, composite_tensor.CompositeTensor):
"""Represents a sparse tensor.
TensorFlow represents a sparse tensor as three separate dense tensors:
`indices`, `values`, and `dense_shape`. In Python, the three tensors are
collected into a `SparseTensor` class for ease of use. If you have separate
`indices`, `values`, and `dense_shape` tensors, wrap them in a `SparseTensor`
object before passing to the ops below.
Concretely, the sparse tensor `SparseTensor(indices, values, dense_shape)`
comprises the following components, where `N` and `ndims` are the number
of values and number of dimensions in the `SparseTensor`, respectively:
* `indices`: A 2-D int64 tensor of dense_shape `[N, ndims]`, which specifies
the indices of the elements in the sparse tensor that contain nonzero
values (elements are zero-indexed). For example, `indices=[[1,3], [2,4]]`
specifies that the elements with indexes of [1,3] and [2,4] have
nonzero values.
* `values`: A 1-D tensor of any type and dense_shape `[N]`, which supplies the
values for each element in `indices`. For example, given
`indices=[[1,3], [2,4]]`, the parameter `values=[18, 3.6]` specifies
that element [1,3] of the sparse tensor has a value of 18, and element
[2,4] of the tensor has a value of 3.6.
* `dense_shape`: A 1-D int64 tensor of dense_shape `[ndims]`, which specifies
the dense_shape of the sparse tensor. Takes a list indicating the number of
elements in each dimension. For example, `dense_shape=[3,6]` specifies a
two-dimensional 3x6 tensor, `dense_shape=[2,3,4]` specifies a
three-dimensional 2x3x4 tensor, and `dense_shape=[9]` specifies a
one-dimensional tensor with 9 elements.
The corresponding dense tensor satisfies:
```python
dense.shape = dense_shape
dense[tuple(indices[i])] = values[i]
```
By convention, `indices` should be sorted in row-major order (or equivalently
lexicographic order on the tuples `indices[i]`). This is not enforced when
`SparseTensor` objects are constructed, but most ops assume correct ordering.
If the ordering of sparse tensor `st` is wrong, a fixed version can be
obtained by calling `tf.sparse.reorder(st)`.
Example: The sparse tensor
```python
SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
```
represents the dense tensor
```python
[[1, 0, 0, 0]
[0, 0, 2, 0]
[0, 0, 0, 0]]
```
"""
@classmethod
def from_value(cls, sparse_tensor_value):
if not is_sparse(sparse_tensor_value):
raise TypeError("Neither a SparseTensor nor SparseTensorValue: %s." %
sparse_tensor_value)
return SparseTensor(
indices=sparse_tensor_value.indices,
values=sparse_tensor_value.values,
dense_shape=sparse_tensor_value.dense_shape)
def __init__(self, indices, values, dense_shape):
"""Creates a `SparseTensor`.
Args:
indices: A 2-D int64 tensor of shape `[N, ndims]`.
values: A 1-D tensor of any type and shape `[N]`.
dense_shape: A 1-D int64 tensor of shape `[ndims]`.
"""
with ops.name_scope(None, "SparseTensor", [indices, values, dense_shape]):
indices = ops.convert_to_tensor(
indices, name="indices", dtype=dtypes.int64)
# TODO(touts): Consider adding mutable_values() when 'values'
# is a VariableOp and updating users of SparseTensor.
values = ops.internal_convert_to_tensor(values, name="values")
dense_shape = ops.convert_to_tensor(
dense_shape, name="dense_shape", dtype=dtypes.int64)
self._indices = indices
self._values = values
self._dense_shape = dense_shape
indices_shape = indices.shape.with_rank(2)
values_shape = values.shape.with_rank(1)
dense_shape_shape = dense_shape.shape.with_rank(1)
# Assert number of rows in indices match the number of elements in values.
indices_shape.dims[0].merge_with(values_shape.dims[0])
# Assert number of columns in indices matches the number of elements in
# dense_shape.
indices_shape.dims[1].merge_with(dense_shape_shape.dims[0])
def get_shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return tensor_util.constant_value_as_shape(self._dense_shape)
@property
def indices(self):
"""The indices of non-zero values in the represented dense tensor.
Returns:
A 2-D Tensor of int64 with dense_shape `[N, ndims]`, where `N` is the
number of non-zero values in the tensor, and `ndims` is the rank.
"""
return self._indices
@property
def values(self):
"""The non-zero values in the represented dense tensor.
Returns:
A 1-D Tensor of any data type.
"""
return self._values
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self._values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._values.dtype
@property
def dense_shape(self):
"""A 1-D Tensor of int64 representing the shape of the dense tensor."""
return self._dense_shape
@property
def shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return tensor_util.constant_value_as_shape(self._dense_shape)
@property
def graph(self):
"""The `Graph` that contains the index, value, and dense_shape tensors."""
return self._indices.graph
def __str__(self):
return "SparseTensor(indices=%s, values=%s, dense_shape=%s)" % (
self._indices, self._values, self._dense_shape)
def eval(self, feed_dict=None, session=None):
"""Evaluates this sparse tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `SparseTensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.
Returns:
A `SparseTensorValue` object.
"""
indices, values, dense_shape = _eval_using_default_session(
[self.indices, self.values, self.dense_shape], feed_dict, self.graph,
session)
return SparseTensorValue(indices, values, dense_shape)
@staticmethod
def _override_operator(operator, func):
_override_helper(SparseTensor, operator, func)
@property
def _type_spec(self):
return SparseTensorSpec(self.shape, self.dtype)
def _shape_invariant_to_type_spec(self, shape):
# From the tf.while_loop docs: "If a loop variable is a SparseTensor, the
# shape invariant must be TensorShape([r]) where r is the rank of the dense
# tensor represented by the sparse tensor. It means the shapes of the three
# tensors of the SparseTensor are ([None], [None, r], [r]). NOTE: The shape
# invariant here is the shape of the SparseTensor.dense_shape property. It
# must be the shape of a vector.
if shape.ndims is not None and shape.ndims != 1:
raise ValueError("Expected a shape with 1 dimension")
rank = tensor_shape.dimension_value(shape[0])
return SparseTensorSpec(tensor_shape.unknown_shape(rank), self.dtype)
def consumers(self):
return self._consumers()
SparseTensorValue = collections.namedtuple("SparseTensorValue",
["indices", "values", "dense_shape"])
tf_export(v1=["SparseTensorValue"])(SparseTensorValue)
pywrap_tensorflow.RegisterType("SparseTensorValue", SparseTensorValue)
@tf_export("SparseTensorSpec")
class SparseTensorSpec(type_spec.BatchableTypeSpec):
"""Type specification for a `tf.SparseTensor`."""
__slots__ = ["_shape", "_dtype"]
value_type = property(lambda self: SparseTensor)
def __init__(self, shape=None, dtype=dtypes.float32):
"""Constructs a type specification for a `tf.SparseTensor`.
Args:
shape: The dense shape of the `SparseTensor`, or `None` to allow
any dense shape.
dtype: `tf.DType` of values in the `SparseTensor`.
"""
self._shape = tensor_shape.as_shape(shape)
self._dtype = dtypes.as_dtype(dtype)
def _serialize(self):
return (self._shape, self._dtype)
@property
def dtype(self):
"""The `tf.dtypes.DType` specified by this type for the SparseTensor."""
return self._dtype
@property
def shape(self):
"""The `tf.TensorShape` specified by this type for the SparseTensor."""
return self._shape
@property
def _component_specs(self):
rank = self._shape.ndims
num_values = None
return [
tensor_spec.TensorSpec([num_values, rank], dtypes.int64),
tensor_spec.TensorSpec([num_values], self._dtype),
tensor_spec.TensorSpec([rank], dtypes.int64)]
def _to_components(self, value):
if isinstance(value, SparseTensorValue):
value = SparseTensor.from_value(value)
return [value.indices, value.values, value.dense_shape]
def _from_components(self, tensor_list):
if (all(isinstance(t, np.ndarray) for t in tensor_list) and
not tf2.enabled()):
return SparseTensorValue(*tensor_list)
else:
return SparseTensor(*tensor_list)
# The SparseTensorSpec tensor_list encoding uses (de)serialize_sparse ops
# to (un)box the component tensors in a way that allows for batching &
# unbatching.
@property
def _flat_tensor_specs(self):
# NOTE(mrry): The default flat shape of a boxed `SparseTensor` is `(3,)`,
# but a `SparseTensorSpec` can also represent a batch of boxed
# `SparseTensor` objects with shape `(..., 3)` (and batches of batches,
# etc.), so the flat shape must be unknown.
return [tensor_spec.TensorSpec(None, dtypes.variant)]
def _to_tensor_list(self, value):
value = SparseTensor.from_value(value)
return [gen_sparse_ops.serialize_sparse(
value.indices, value.values, value.dense_shape,
out_type=dtypes.variant)]
def _to_batched_tensor_list(self, value):
dense_shape = tensor_util.constant_value_as_shape(value.dense_shape)
if self._shape.merge_with(dense_shape).ndims == 0:
raise ValueError(
"Unbatching a sparse tensor is only supported for rank >= 1")
return [gen_sparse_ops.serialize_many_sparse(
value.indices, value.values, value.dense_shape,
out_type=dtypes.variant)]
def _from_compatible_tensor_list(self, tensor_list):
tensor_list = gen_sparse_ops.deserialize_sparse(tensor_list[0], self._dtype)
result = SparseTensor(*tensor_list)
rank = self._shape.ndims
result.indices.set_shape([None, rank])
result.dense_shape.set_shape([rank])
return result
def _batch(self, batch_size):
return SparseTensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._shape),
self._dtype)
def _unbatch(self):
if self._shape.ndims == 0:
raise ValueError("Unbatching a tensor is only supported for rank >= 1")
return SparseTensorSpec(self._shape[1:], self._dtype)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return SparseTensor
@classmethod
def from_value(cls, value):
if isinstance(value, SparseTensor):
return cls(value.shape, value.dtype)
if isinstance(value, SparseTensorValue):
if isinstance(value.values, np.ndarray):
return cls(value.dense_shape, value.values.dtype)
else:
return cls.from_value(SparseTensor.from_value(value))
else:
raise TypeError("Expected SparseTensor or SparseTensorValue")
# TODO(b/133606651) Delete the SparseTensor registration when CompositeTensor
# is updated to define a _type_spec field (since registration will be
# automatic). Do *not* delete the SparseTensorValue registration.
type_spec.register_type_spec_from_value_converter(
SparseTensor, SparseTensorSpec.from_value)
type_spec.register_type_spec_from_value_converter(
SparseTensorValue, SparseTensorSpec.from_value)
@tf_export(v1=["convert_to_tensor_or_sparse_tensor"])
def convert_to_tensor_or_sparse_tensor(value, dtype=None, name=None):
"""Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, SparseTensorValue):
value = SparseTensor.from_value(value)
if isinstance(value, SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError("Sparse dtype: requested = %s, actual = %s" %
(dtype.name, value.dtype.name))
return value
return ops.internal_convert_to_tensor(value, dtype=dtype, name=name)
def is_sparse(x):
"""Check whether `x` is sparse.
Check whether an object is a `tf.SparseTensor` or
`tf.compat.v1.SparseTensorValue`.
Args:
x: A python object to check.
Returns:
`True` iff `x` is a `tf.SparseTensor` or `tf.compat.v1.SparseTensorValue`.
"""
return isinstance(x, (SparseTensor, SparseTensorValue))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/sparse_tensor.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""smart_cond and related utilties."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if not callable(true_fn):
raise TypeError("`true_fn` must be callable.")
if not callable(false_fn):
raise TypeError("`false_fn` must be callable.")
pred_value = smart_constant_value(pred)
if pred_value is not None:
if pred_value:
return true_fn()
else:
return false_fn()
else:
return control_flow_ops.cond(pred, true_fn=true_fn, false_fn=false_fn,
name=name)
def smart_constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Arguments:
pred: A scalar, either a Python bool or tensor.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError: If `pred` is not a Tensor or bool.
"""
if isinstance(pred, ops.Tensor):
pred_value = tensor_util.constant_value(pred)
# TODO(skyewm): consider folding this into tensor_util.constant_value.
# pylint: disable=protected-access
if pred_value is None:
pred_value = c_api.TF_TryEvaluateConstant_wrapper(pred.graph._c_graph,
pred._as_tf_output())
# pylint: enable=protected-access
elif pred in {0, 1}: # Accept 1/0 as valid boolean values
pred_value = bool(pred)
elif isinstance(pred, bool):
pred_value = pred
else:
raise TypeError("`pred` must be a Tensor, or a Python bool, or 1 or 0. "
"Found instead: %s" % type(pred))
return pred_value
def smart_case(pred_fn_pairs, default=None, exclusive=False, name="smart_case"):
"""Like tf.case, except attempts to statically evaluate predicates.
If any predicate in `pred_fn_pairs` is a bool or has a constant value, the
associated callable will be called or omitted depending on its value.
Otherwise this functions like tf.case.
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a
callable which returns a list of tensors.
default: Optional callable that returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for this operation (optional).
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
"""
return control_flow_ops._case_helper( # pylint: disable=protected-access
smart_cond, pred_fn_pairs, default, exclusive, name,
allow_python_preds=True)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/smart_cond.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes for tensor shape inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python import tf2
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
_TENSORSHAPE_V2_OVERRIDE = None
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/v2_tensorshape",
"Whether tensor_shape.enable_v2_tensorshape() is called.")
@tf_export(v1=["enable_v2_tensorshape"])
def enable_v2_tensorshape():
"""In TensorFlow 2.0, iterating over a TensorShape instance returns values.
This enables the new behavior.
Concretely, `tensor_shape[i]` returned a Dimension instance in V1, but
it V2 it returns either an integer, or None.
Examples:
```
#######################
# If you had this in V1:
value = tensor_shape[i].value
# Do this in V2 instead:
value = tensor_shape[i]
#######################
# If you had this in V1:
for dim in tensor_shape:
value = dim.value
print(value)
# Do this in V2 instead:
for value in tensor_shape:
print(value)
#######################
# If you had this in V1:
dim = tensor_shape[i]
dim.assert_is_compatible_with(other_shape) # or using any other shape method
# Do this in V2 instead:
if tensor_shape.rank is None:
dim = Dimension(None)
else:
dim = tensor_shape.dims[i]
dim.assert_is_compatible_with(other_shape) # or using any other shape method
# The V2 suggestion above is more explicit, which will save you from
# the following trap (present in V1):
# you might do in-place modifications to `dim` and expect them to be reflected
# in `tensor_shape[i]`, but they would not be.
```
"""
global _TENSORSHAPE_V2_OVERRIDE # pylint: disable=invalid-name
_TENSORSHAPE_V2_OVERRIDE = True
_api_usage_gauge.get_cell().set(True)
@tf_export(v1=["disable_v2_tensorshape"])
def disable_v2_tensorshape():
"""Disables the V2 TensorShape behavior and reverts to V1 behavior.
See docstring for `enable_v2_tensorshape` for details about the new behavior.
"""
global _TENSORSHAPE_V2_OVERRIDE # pylint: disable=invalid-name
_TENSORSHAPE_V2_OVERRIDE = False
_api_usage_gauge.get_cell().set(False)
@tf_export(
"compat.dimension_value", v1=["dimension_value", "compat.dimension_value"])
def dimension_value(dimension):
"""Compatibility utility required to allow for both V1 and V2 behavior in TF.
Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to
coexist with the new behavior. This utility is a bridge between the two.
When accessing the value of a TensorShape dimension,
use this utility, like this:
```
# If you had this in your V1 code:
value = tensor_shape[i].value
# Use `dimension_value` as direct replacement compatible with both V1 & V2:
value = dimension_value(tensor_shape[i])
# This would be the V2 equivalent:
value = tensor_shape[i] # Warning: this will return the dim value in V2!
```
Arguments:
dimension: Either a `Dimension` instance, an integer, or None.
Returns:
A plain value, i.e. an integer or None.
"""
if isinstance(dimension, Dimension):
return dimension.value
return dimension
@tf_export(
"compat.dimension_at_index",
v1=["dimension_at_index", "compat.dimension_at_index"])
def dimension_at_index(shape, index):
"""Compatibility utility required to allow for both V1 and V2 behavior in TF.
Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to
coexist with the new behavior. This utility is a bridge between the two.
If you want to retrieve the Dimension instance corresponding to a certain
index in a TensorShape instance, use this utility, like this:
```
# If you had this in your V1 code:
dim = tensor_shape[i]
# Use `dimension_at_index` as direct replacement compatible with both V1 & V2:
dim = dimension_at_index(tensor_shape, i)
# Another possibility would be this, but WARNING: it only works if the
# tensor_shape instance has a defined rank.
dim = tensor_shape.dims[i] # `dims` may be None if the rank is undefined!
# In native V2 code, we recommend instead being more explicit:
if tensor_shape.rank is None:
dim = Dimension(None)
else:
dim = tensor_shape.dims[i]
# Being more explicit will save you from the following trap (present in V1):
# you might do in-place modifications to `dim` and expect them to be reflected
# in `tensor_shape[i]`, but they would not be (as the Dimension object was
# instantiated on the fly.
```
Arguments:
shape: A TensorShape instance.
index: An integer index.
Returns:
A dimension object.
"""
assert isinstance(shape, TensorShape)
if shape.rank is None:
return Dimension(None)
else:
return shape.dims[index]
@tf_export(v1=["Dimension"])
class Dimension(object):
"""Represents the value of one dimension in a TensorShape."""
def __init__(self, value):
"""Creates a new Dimension with the given value."""
if value is None:
self._value = None
elif isinstance(value, Dimension):
self._value = value.value
elif isinstance(value, dtypes.DType):
raise TypeError("Cannot convert %s to Dimension" % value)
else:
self._value = int(value)
if (not isinstance(value, compat.bytes_or_text_types) and
self._value != value):
raise ValueError("Ambiguous dimension: %s" % value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __str__(self):
value = self._value
return "?" if value is None else str(value)
def __eq__(self, other):
"""Returns true if `other` has the same known value as this Dimension."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
"""Returns true if `other` has a different known value from `self`."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
# This is needed for Windows.
# See https://github.com/tensorflow/tensorflow/pull/9780
def __long__(self):
return self._value
def __index__(self):
# Allow use in Python 3 range
return self._value
@property
def value(self):
"""The value of this dimension, or None if it is unknown."""
return self._value
def is_compatible_with(self, other):
"""Returns true if `other` is compatible with this Dimension.
Two known Dimensions are compatible if they have the same value.
An unknown Dimension is compatible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are compatible.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
return (self._value is None or other.value is None or
self._value == other.value)
def assert_is_compatible_with(self, other):
"""Raises an exception if `other` is not compatible with this Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
if not self.is_compatible_with(other):
raise ValueError("Dimensions %s and %s are not compatible" %
(self, other))
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and `other`.
Dimensions are combined as follows:
```python
tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(n)) ==
tf.compat.v1.Dimension(n)
tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(None)) ==
tf.compat.v1.Dimension(n)
tf.compat.v1.Dimension(None).merge_with(tf.compat.v1.Dimension(n)) ==
tf.compat.v1.Dimension(n)
# equivalent to tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None).merge_with(tf.compat.v1.Dimension(None))
# raises ValueError for n != m
tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(m))
```
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
self.assert_is_compatible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
def __add__(self, other):
"""Returns the sum of `self` and `other`.
Dimensions are summed as follows:
```python
tf.compat.v1.Dimension(m) + tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m + n)
tf.compat.v1.Dimension(m) + tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value + other.value)
def __radd__(self, other):
"""Returns the sum of `other` and `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
return self + other
def __sub__(self, other):
"""Returns the subtraction of `other` from `self`.
Dimensions are subtracted as follows:
```python
tf.compat.v1.Dimension(m) - tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m - n)
tf.compat.v1.Dimension(m) - tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `other` from `self`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value - other.value)
def __rsub__(self, other):
"""Returns the subtraction of `self` from `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `self` from `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value - self._value)
def __mul__(self, other):
"""Returns the product of `self` and `other`.
Dimensions are summed as follows:
```python
tf.compat.v1.Dimension(m) * tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m * n)
tf.compat.v1.Dimension(m) * tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value * other.value)
def __rmul__(self, other):
"""Returns the product of `self` and `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
return self * other
def __floordiv__(self, other):
"""Returns the quotient of `self` and `other` rounded down.
Dimensions are divided as follows:
```python
tf.compat.v1.Dimension(m) // tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m // n)
tf.compat.v1.Dimension(m) // tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) // tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) // tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value // other.value)
def __rfloordiv__(self, other):
"""Returns the quotient of `other` and `self` rounded down.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value // self._value)
def __div__(self, other):
"""DEPRECATED: Use `__floordiv__` via `x // y` instead.
This function exists only for backwards compatibility purposes; new code
should use `__floordiv__` via the syntax `x // y`. Using `x // y`
communicates clearly that the result rounds down, and is forward compatible
to Python 3.
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
return self // other
def __rdiv__(self, other):
"""Use `__floordiv__` via `x // y` instead.
This function exists only to have a better error message. Instead of:
`TypeError: unsupported operand type(s) for /: 'int' and 'Dimension'`,
this function will explicitly call for usage of `//` instead.
Args:
other: Another `Dimension`.
Raises:
TypeError.
"""
raise TypeError("unsupported operand type(s) for /: '{}' and 'Dimension', "
"please use // instead".format(type(other).__name__))
def __truediv__(self, other):
"""Use `__floordiv__` via `x // y` instead.
This function exists only to have a better error message. Instead of:
`TypeError: unsupported operand type(s) for /: 'Dimension' and 'int'`,
this function will explicitly call for usage of `//` instead.
Args:
other: Another `Dimension`.
Raises:
TypeError.
"""
raise TypeError("unsupported operand type(s) for /: 'Dimension' and '{}', "
"please use // instead".format(type(other).__name__))
def __rtruediv__(self, other):
"""Use `__floordiv__` via `x // y` instead.
This function exists only to have a better error message. Instead of:
`TypeError: unsupported operand type(s) for /: 'int' and 'Dimension'`,
this function will explicitly call for usage of `//` instead.
Args:
other: Another `Dimension`.
Raises:
TypeError.
"""
raise TypeError("unsupported operand type(s) for /: '{}' and 'Dimension', "
"please use // instead".format(type(other).__name__))
def __mod__(self, other):
"""Returns `self` modulo `other`.
Dimension moduli are computed as follows:
```python
tf.compat.v1.Dimension(m) % tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m % n)
tf.compat.v1.Dimension(m) % tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is `self` modulo `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value % other.value)
def __rmod__(self, other):
"""Returns `other` modulo `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is `other` modulo `self`.
"""
other = as_dimension(other)
return other % self
def __lt__(self, other):
"""Returns True if `self` is known to be less than `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) < tf.compat.v1.Dimension(n)) == (m < n)
(tf.compat.v1.Dimension(m) < tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value < other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value < other.value
def __le__(self, other):
"""Returns True if `self` is known to be less than or equal to `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) <= tf.compat.v1.Dimension(n)) == (m <= n)
(tf.compat.v1.Dimension(m) <= tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value <= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value <= other.value
def __gt__(self, other):
"""Returns True if `self` is known to be greater than `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) > tf.compat.v1.Dimension(n)) == (m > n)
(tf.compat.v1.Dimension(m) > tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value > other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value > other.value
def __ge__(self, other):
"""Returns True if `self` is known to be greater than or equal to `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) >= tf.compat.v1.Dimension(n)) == (m >= n)
(tf.compat.v1.Dimension(m) >= tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value >= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value >= other.value
def __reduce__(self):
return Dimension, (self._value,)
def as_dimension(value):
"""Converts the given value to a Dimension.
A Dimension input will be returned unmodified.
An input of `None` will be converted to an unknown Dimension.
An integer input will be converted to a Dimension with that value.
Args:
value: The value to be converted.
Returns:
A Dimension corresponding to the given value.
"""
if isinstance(value, Dimension):
return value
else:
return Dimension(value)
@tf_export("TensorShape")
class TensorShape(object):
"""Represents the shape of a `Tensor`.
A `TensorShape` represents a possibly-partial shape specification for a
`Tensor`. It may be one of the following:
* *Fully-known shape:* has a known number of dimensions and a known size
for each dimension. e.g. `TensorShape([16, 256])`
* *Partially-known shape:* has a known number of dimensions, and an unknown
size for one or more dimension. e.g. `TensorShape([None, 256])`
* *Unknown shape:* has an unknown number of dimensions, and an unknown
size in all dimensions. e.g. `TensorShape(None)`
If a tensor is produced by an operation of type `"Foo"`, its shape
may be inferred if there is a registered shape function for
`"Foo"`. See [Shape
functions](https://tensorflow.org/extend/adding_an_op#shape_functions_in_c)
for details of shape functions and how to register them. Alternatively,
the shape may be set explicitly using `tf.Tensor.set_shape`.
"""
def __init__(self, dims):
"""Creates a new TensorShape with the given dimensions.
Args:
dims: A list of Dimensions, or None if the shape is unspecified.
Raises:
TypeError: If dims cannot be converted to a list of dimensions.
"""
if dims is None:
self._dims = None
elif isinstance(dims, compat.bytes_or_text_types):
raise TypeError("A string has ambiguous TensorShape, please wrap in a "
"list or convert to an int: %s" % dims)
elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):
if dims.unknown_rank:
self._dims = None
else:
self._dims = [
# Protos store variable-size dimensions as -1
as_dimension(dim.size if dim.size != -1 else None)
for dim in dims.dim
]
elif isinstance(dims, TensorShape):
self._dims = dims.dims
else:
try:
dims_iter = iter(dims)
except TypeError:
# Treat as a singleton dimension
self._dims = [as_dimension(dims)]
else:
# Got a list of dimensions
self._dims = [as_dimension(d) for d in dims_iter]
@property
def _v2_behavior(self):
if _TENSORSHAPE_V2_OVERRIDE is None:
return tf2.enabled()
return _TENSORSHAPE_V2_OVERRIDE
def __repr__(self):
if self._v2_behavior:
if self._dims is not None:
return "TensorShape(%r)" % [dim.value for dim in self._dims]
else:
return "TensorShape(None)"
else:
return "TensorShape(%r)" % self._dims
def __str__(self):
if self.rank is None:
return "<unknown>"
elif self.rank == 1:
if self._v2_behavior:
return "(%s,)" % self._dims[0].value
else:
return "(%s,)" % self._dims[0]
else:
if self._v2_behavior:
return "(%s)" % ", ".join(str(d.value) for d in self._dims)
else:
return "(%s)" % ", ".join(str(d) for d in self._dims)
@property
def rank(self):
"""Returns the rank of this shape, or None if it is unspecified."""
if self._dims is not None:
return len(self._dims)
return None
@property
def dims(self):
"""Returns a list of Dimensions, or None if the shape is unspecified."""
return self._dims
@property
def ndims(self):
"""Deprecated accessor for `rank`."""
return self.rank
def __len__(self):
"""Returns the rank of this shape, or raises ValueError if unspecified."""
if self._dims is None:
raise ValueError("Cannot take the length of shape with unknown rank.")
return len(self._dims)
def __bool__(self):
"""Returns True if this shape contains non-zero information."""
return self._dims is not None
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __iter__(self):
"""Returns `self.dims` if the rank is known, otherwise raises ValueError."""
if self._dims is None:
raise ValueError("Cannot iterate over a shape with unknown rank.")
else:
if self._v2_behavior:
return iter(d.value for d in self._dims)
else:
return iter(d for d in self._dims)
def __getitem__(self, key):
"""Returns the value of a dimension or a shape, depending on the key.
Args:
key: If `key` is an integer, returns the dimension at that index;
otherwise if `key` is a slice, returns a TensorShape whose dimensions
are those selected by the slice from `self`.
Returns:
An integer if `key` is an integer, or a `TensorShape` if `key` is a
slice.
Raises:
ValueError: If `key` is a slice and `self` is completely unknown and
the step is set.
"""
if self._dims is not None:
if isinstance(key, slice):
return TensorShape(self._dims[key])
else:
if self._v2_behavior:
return self._dims[key].value
else:
return self._dims[key]
else:
if isinstance(key, slice):
start = key.start if key.start is not None else 0
stop = key.stop
if key.step is not None:
# TODO(mrry): Handle these maybe.
raise ValueError("Steps are not yet handled")
if stop is None:
# NOTE(mrry): This implies that TensorShape(None) is compatible with
# TensorShape(None)[1:], which is obviously not true. It would be
# possible to track the number of dimensions symbolically,
# and perhaps we should do that.
return unknown_shape()
elif start < 0 or stop < 0:
# TODO(mrry): Handle this better, as it will be useful for handling
# suffixes of otherwise unknown shapes.
return unknown_shape()
else:
return unknown_shape(rank=stop - start)
else:
if self._v2_behavior:
return None
else:
return Dimension(None)
def num_elements(self):
"""Returns the total number of elements, or none for incomplete shapes."""
if self.is_fully_defined():
size = 1
for dim in self._dims:
size *= dim.value
return size
else:
return None
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible.
"""
other = as_shape(other)
if self._dims is None:
return other
else:
try:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
except ValueError:
raise ValueError("Shapes %s and %s are not compatible" % (self, other))
def __add__(self, other):
if not isinstance(other, TensorShape):
other = TensorShape(other)
return self.concatenate(other)
def __radd__(self, other):
if not isinstance(other, TensorShape):
other = TensorShape(other)
return other.concatenate(self)
def concatenate(self, other):
"""Returns the concatenation of the dimension in `self` and `other`.
*N.B.* If either `self` or `other` is completely unknown,
concatenation will discard information about the other shape. In
future, we might support concatenation that preserves this
information for use with slicing.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` whose dimensions are the concatenation of the
dimensions in `self` and `other`.
"""
# TODO(mrry): Handle the case where we concatenate a known shape with a
# completely unknown shape, so that we can use the partial information.
other = as_shape(other)
if self._dims is None or other.dims is None:
return unknown_shape()
else:
return TensorShape(self._dims + other.dims)
def assert_same_rank(self, other):
"""Raises an exception if `self` and `other` do not have compatible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
"""
other = as_shape(other)
if self.rank is not None and other.rank is not None:
if self.rank != other.rank:
raise ValueError("Shapes %s and %s must have the same rank" %
(self, other))
def assert_has_rank(self, rank):
"""Raises an exception if `self` is not compatible with the given `rank`.
Args:
rank: An integer.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
if self.rank not in (None, rank):
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank(self, rank):
"""Returns a shape based on `self` with the given rank.
This method promotes a completely unknown shape to one with a
known rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with the given rank.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
try:
return self.merge_with(unknown_shape(rank=rank))
except ValueError:
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank_at_least(self, rank):
"""Returns a shape based on `self` with at least the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at least the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at least the given
`rank`.
"""
if self.rank is not None and self.rank < rank:
raise ValueError("Shape %s must have rank at least %d" % (self, rank))
else:
return self
def with_rank_at_most(self, rank):
"""Returns a shape based on `self` with at most the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at most the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at most the given
`rank`.
"""
if self.rank is not None and self.rank > rank:
raise ValueError("Shape %s must have rank at most %d" % (self, rank))
else:
return self
def is_compatible_with(self, other):
"""Returns True iff `self` is compatible with `other`.
Two possibly-partially-defined shapes are compatible if there
exists a fully-defined shape that both shapes can represent. Thus,
compatibility allows the shape inference code to reason about
partially-defined shapes. For example:
* TensorShape(None) is compatible with all shapes.
* TensorShape([None, None]) is compatible with all two-dimensional
shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is
not compatible with, for example, TensorShape([None]) or
TensorShape([None, None, None]).
* TensorShape([32, None]) is compatible with all two-dimensional shapes
with size 32 in the 0th dimension, and also TensorShape([None, None])
and TensorShape(None). It is not compatible with, for example,
TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]).
* TensorShape([32, 784]) is compatible with itself, and also
TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None,
None]) and TensorShape(None). It is not compatible with, for example,
TensorShape([32, 1, 784]) or TensorShape([None]).
The compatibility relation is reflexive and symmetric, but not
transitive. For example, TensorShape([32, 784]) is compatible with
TensorShape(None), and TensorShape(None) is compatible with
TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with
TensorShape([4, 4]).
Args:
other: Another TensorShape.
Returns:
True iff `self` is compatible with `other`.
"""
other = as_shape(other)
if self._dims is not None and other.dims is not None:
if self.rank != other.rank:
return False
for x_dim, y_dim in zip(self._dims, other.dims):
if not x_dim.is_compatible_with(y_dim):
return False
return True
def assert_is_compatible_with(self, other):
"""Raises exception if `self` and `other` do not represent the same shape.
This method can be used to assert that there exists a shape that both
`self` and `other` represent.
Args:
other: Another TensorShape.
Raises:
ValueError: If `self` and `other` do not represent the same shape.
"""
if not self.is_compatible_with(other):
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
def most_specific_compatible_shape(self, other):
"""Returns the most specific TensorShape compatible with `self` and `other`.
* TensorShape([None, 1]) is the most specific TensorShape compatible with
both TensorShape([2, 1]) and TensorShape([5, 1]). Note that
TensorShape(None) is also compatible with above mentioned TensorShapes.
* TensorShape([1, 2, 3]) is the most specific TensorShape compatible with
both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more
less specific TensorShapes compatible with above mentioned TensorShapes,
e.g. TensorShape([1, 2, None]), TensorShape(None).
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` which is the most specific compatible shape of `self`
and `other`.
"""
other = as_shape(other)
if self._dims is None or other.dims is None or self.rank != other.rank:
return unknown_shape()
dims = [(Dimension(None))] * self.rank
for i, (d1, d2) in enumerate(zip(self._dims, other.dims)):
if d1 is not None and d2 is not None and d1 == d2:
dims[i] = d1
return TensorShape(dims)
def is_fully_defined(self):
"""Returns True iff `self` is fully defined in every dimension."""
return (self._dims is not None and
all(dim.value is not None for dim in self._dims))
def assert_is_fully_defined(self):
"""Raises an exception if `self` is not fully defined in every dimension.
Raises:
ValueError: If `self` does not have a known value for every dimension.
"""
if not self.is_fully_defined():
raise ValueError("Shape %s is not fully defined" % self)
def as_list(self):
"""Returns a list of integers or `None` for each dimension.
Returns:
A list of integers or `None` for each dimension.
Raises:
ValueError: If `self` is an unknown shape with an unknown rank.
"""
if self._dims is None:
raise ValueError("as_list() is not defined on an unknown TensorShape.")
return [dim.value for dim in self._dims]
def as_proto(self):
"""Returns this shape as a `TensorShapeProto`."""
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=-1 if d.value is None else d.value) for d in self._dims
])
def __eq__(self, other):
"""Returns True if `self` is equivalent to `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
return self._dims == other.dims
def __ne__(self, other):
"""Returns True if `self` is known to be different from `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
if self.rank is None or other.rank is None:
raise ValueError("The inequality of unknown TensorShapes is undefined.")
if self.rank != other.rank:
return True
return self._dims != other.dims
def __reduce__(self):
return TensorShape, (self._dims,)
def __concat__(self, other):
return self.concatenate(other)
def as_shape(shape):
"""Converts the given object to a TensorShape."""
if isinstance(shape, TensorShape):
return shape
else:
return TensorShape(shape)
def unknown_shape(rank=None, **kwargs):
"""Returns an unknown TensorShape, optionally with a known rank.
Args:
rank: (Optional) If specified, the number of dimensions in the shape.
**kwargs: For backwards compatibility.
Returns:
An unknown TensorShape.
Raises:
TypeError: In case of invalid arguments.
"""
if rank is None and "ndims" in kwargs:
rank = kwargs.pop("ndims")
if kwargs:
raise TypeError("Unknown argument: %s" % kwargs)
if rank is None:
return TensorShape(None)
else:
return TensorShape([Dimension(None)] * rank)
@deprecation.deprecated(None, "Use tf.TensorShape([]).")
def scalar():
"""Returns a shape representing a scalar."""
return TensorShape([])
@deprecation.deprecated(None, "Use tf.TensorShape([length]).")
def vector(length):
"""Returns a shape representing a vector.
Args:
length: The length of the vector, which may be None if unknown.
Returns:
A TensorShape representing a vector of the given length.
"""
return TensorShape([length])
@deprecation.deprecated(None, "Use tf.TensorShape([rows, cols]).")
def matrix(rows, cols):
"""Returns a shape representing a matrix.
Args:
rows: The number of rows in the matrix, which may be None if unknown.
cols: The number of columns in the matrix, which may be None if unknown.
Returns:
A TensorShape representing a matrix of the given size.
"""
return TensorShape([rows, cols])
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/tensor_shape.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unified callbacks op execution and creation under eager and graph modes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
# A thread-local state object. It may hold the following attributes:
# - `callbacks`: the thread-local stack of op callbacks.
# - `invoking_callbacks`: a boolean used to keep track of whether
# we are currently invoking an op_callback.
_state = threading.local()
class _OpCallbackContextManager(object):
"""Context manager for op callbacks."""
def __init__(self, callback_fn):
self._callback_fn = callback_fn
def __enter__(self):
"""A method of when a scope of this context manager is being entered."""
# Monkey-patch `execute.execute()`.
execute.execute = execute.execute_with_callbacks
if not hasattr(_state, "callback_stack"):
_state.callback_stack = []
_state.invoking_callbacks = False
_state.callback_stack.append(self._callback_fn)
ctx = context.context()
if ctx.executing_eagerly():
ctx.post_execution_callbacks.append(self._callback_fn)
def __exit__(self, exec_type, exec_value, exec_traceback):
"""A method of when a scope of this context manager is being exited."""
_state.callback_stack.pop()
ctx = context.context()
if ctx.executing_eagerly():
ctx.post_execution_callbacks.pop()
def op_callback(callback_fn):
r"""Intercepts op execution and op creation.
The `callback_fn` will be invoked immediately after any of the three types
of events:
- The execution of an TensorFlow operation ("op" for short hereafter)
under eager mode,
- The execution of a FuncGraph under eager mode,
- The creation of an op during graph construction (e.g., in
@tf.function-decorated Python functions).
Args:
callback_fn: A callback_fn that has the following signature:
def callback_fn(op_type,
inputs,
attrs,
outputs,
op_name=None,
graph=None):
# op_type: The type of the op, as a string. E.g., "MatMul".
# For the special case of FuncGraph execution, op_type
# takes the name of the graph name, e.g.,
# "__inference_my_func_24".
# inputs: (`tuple` of `Tensor`s) Input tensors to the op or the
# FuncGraph.
# - In eager execution, these are `EagerTensor`s.
# - In graph construction, these are non-eager `Tensor`s
# that form the inputs to the just-created op.
# attrs: The attributes of the op or FuncGraph of which the execution
# or creation caused the current invocation of the callback.
# This is applicable to both eager- and graph-based execution,
# as well as graph construction.
# This is a tuple of alternating attribute keys and attribute
# values. E.g., `('adjoint_a', False, 'adjoint_b', False)`.
# outputs: (`tuple of `Tensor`s) Output tensors from the op or
# FuncGraph.
# In eager execution, these are `EagerTensor`s.
# In graph construction, these are non-eager `Tensor`s that
# are the outputs of the just-created op.
# op_name: Name of the op.
# - If the current invocation of the callback is due to the
# eager execution of an op or FuncGraph, this will be
# `None`, as op names are meaningless in eager execution.
# - In graph construction, this is the name of the op, e.g.,
# "MatMul_2".
# graph: The graph that the op belongs to (if any).
# - In eager execution of an op or FuncGraph, this is `None`.
# - In graph construction, this is the op's containing graph
# as a `tf.Graph` object.
#
# Return values:
# This callback function is expected to return `None` or
# a `list` or `tuple` of `Tensor`s with its length matching
# `len(outputs)`, in the order that corresponds to that of the
# `outputs` argument.
# If the return value is `None`, downstream execution or graph
# construction will be unaffected.
# Howevevr, if the return value is a `list` or `tuple` of `Tensor`s,
# - In eager execution, these returned `Tensor`s should be
# `EagerTensor`s. Their values will replace the original values of
# `outputs` for downstream eager execution. (*Not implemented yet*).
# - In graph construction, these returned `Tensor`s should be
# non-eager `Tensor`s. Their values will replace the original
# `outputs` for downstream graph construction.
Returns:
A thread-local context manager. Within the scope of the context
manager, all eager op/graph execution and graph op construction
will invoke `callback_fn`.
Raises:
ValueEror: If `callback_fn` is not callable.
"""
# TODO(b/139668041): Implement support for overriding `EagerTensor`s from
# callback.
if callback_fn is None:
raise ValueError("Passed callback function cannot be None.")
if not callable(callback_fn):
raise ValueError(
"Callback function passed to op_callback() is expected to be callable, "
"but is not. Recevied %s" % callback_fn)
return _OpCallbackContextManager(callback_fn)
def should_invoke_op_callbacks():
"""Determine if op callbacks are present and should be invoked.
Returns:
A thread-local result (boolean) indicating whether any op callback(s) exist
and should be invoked.
"""
return (
hasattr(_state, "callback_stack") and _state.callback_stack and
not (hasattr(_state, "invoking_callbacks") and _state.invoking_callbacks))
def invoke_op_callbacks(op_type,
inputs,
attrs,
outputs,
op_name=None,
graph=None):
r"""Invoke the callbacks that exist in the current scope (if any).
If no callbacks are present in the current scope, this method returns
immediately.
Args:
op_type: Type of the operation (e.g., "MatMul").
inputs: Input tensors to the op. These are `EagerTensor`s in the case of
eager execution of ops or `FuncGraph`s, and are non-eager `Tensor`s in the
case of graph construction.
attrs: Attributes of the op, as `tuple` of alternating keys and values.
outputs: Output tensors from the op. These are `EagerTensor`s in the case of
eager execution and are non-eager `Tensor`s in the case of graph
construction.
op_name: Name of the op. Applicable if and only if this method is invoked
due to the graph construction of an op or the eager execution of of a
`FuncGraph`.
graph: The graph involved (if any).
- In the case if the eager execution of an op or FuncGraph, this is
`None`.
- In the case of the graph construction of an op, this is the `tf.Graph`
object being built.
Returns:
`None`, or a `list` or `tuple` of output tenors that will override the
original (input) `outputs`.
"""
if _state.callback_stack:
# Guards against stack overflow that can result from recursive invocation
# due to op constructions inside client-supplied op callbacks.
_state.invoking_callbacks = True
try:
if isinstance(attrs, dict):
attrs_list = []
for key in attrs:
attrs_list.append(key)
attrs_list.append(attrs[key])
attrs_tuple = tuple(attrs_list)
else:
attrs_tuple = attrs
new_outputs = outputs
for callback in reversed(_state.callback_stack):
new_outputs = callback(
op_type,
inputs,
attrs_tuple,
new_outputs,
op_name=op_name,
graph=graph)
if new_outputs is not None and len(new_outputs) != len(outputs):
raise ValueError(
"The op callback returned %s tensors, which does not match the "
"original number of outputs of op %s (%d)." %
(len(new_outputs), op_name, len(outputs)))
return new_outputs
finally:
_state.invoking_callbacks = False
else:
return outputs
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/op_callbacks.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.op_def_library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
def _unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
class OpDefLibraryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = test_ops._op_def_lib
def _add_op(self, ascii): # pylint: disable=redefined-builtin
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
def Tensor(self, t, name="in"):
return self._lib.apply_op("OutT", T=t, name=name)
def testNoRegisteredOpFails(self):
with self.assertRaises(RuntimeError) as cm:
self._lib.apply_op("unknown")
self.assertEqual(str(cm.exception), "Unrecognized Op name unknown")
def testAddOpValidation(self):
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'MissingTypeAttr' "
"input_arg { name: 'a' type_attr: 'T' } ")
self.assertEqual(str(cm.exception),
"Inconsistent OpDef for 'MissingTypeAttr', "
"missing attr 'T'")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'BadTypeAttr' "
"output_arg { name: 'a' type_attr: 'T' } "
"attr { name: 'T' type: 'int' }")
self.assertEqual(
str(cm.exception),
"Attr 'T' of 'BadTypeAttr' used as a type_attr but has type int")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'MissingNumberAttr' "
"input_arg { name: 'a' type: DT_INT32 number_attr: 'N' } ")
self.assertEqual(str(cm.exception),
"Inconsistent OpDef for 'MissingNumberAttr', "
"missing attr 'N'")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'BadNumberAttr' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'type' }")
self.assertEqual(
str(cm.exception),
"Attr 'N' of 'BadNumberAttr' used as a number_attr but has type type")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'TwoTypesA' "
"input_arg { name: 'a' type: DT_INT32 type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'TwoTypesA' must have one type field not 2")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'TwoTypesB' "
"input_arg { name: 'a' type: DT_INT32 type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'TwoTypesB' must have one type field not 2")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'ThreeTypes' "
"input_arg { name: 'a' type: DT_INT32 type_attr: 'T' "
"type_list_attr: 'U' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'U' type: 'list(type)' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'ThreeTypes' must have one type field not 3")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'NoTypes' output_arg { name: 'a' } ")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'NoTypes' must have one type field not 0")
def testSimple(self):
with ops.Graph().as_default():
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'Simple' op: 'Simple' input: 'Simple/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=4)
self.assertProtoEquals("""
name: 'Simple_1' op: 'Simple' input: 'Simple_1/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=5, name="named")
self.assertProtoEquals("""
name: 'named' op: 'Simple' input: 'named/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=[[1, 2, 3], [4, 5, 6]], name="two_d")
self.assertProtoEquals("""
name: 'two_d' op: 'Simple' input: 'two_d/a'
""", out.op.node_def)
def testSimpleFailures(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a="Bad string")
self.assertTrue(
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got 'Bad string' of type 'str' instead." in str(cm.exception))
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=self.Tensor(dtypes.string))
self.assertTrue(
"Input 'a' of 'Simple' Op has type string "
"that does not match expected type of int32." in str(cm.exception))
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=6, extra="bogus")
self.assertTrue(
"apply_op() got unexpected keyword arguments: extra"
in str(cm.exception))
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=6, extra1="bogus", extra2="also_bogus")
self.assertTrue(
"apply_op() got unexpected keyword arguments: extra1, "
"extra2" in str(cm.exception))
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple")
self.assertTrue(
"No argument for input a" in str(cm.exception))
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", wrong=7)
self.assertTrue(
"No argument for input a" in str(cm.exception))
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a={"label": 1})
self.assertTrue(
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got {'label': 1} of type 'dict' instead." in str(cm.exception))
def testReservedInput(self):
with ops.Graph().as_default():
op = self._lib.apply_op("ReservedInput", input_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedInput' input: 'x/input'
""", op.node_def)
def testPolymorphic(self):
with ops.Graph().as_default():
out = self._lib.apply_op("Polymorphic", a=7, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'Polymorphic' input: 'p/a'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("Polymorphic", a="s", name="q")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'Polymorphic' input: 'q/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("Polymorphic", a=["s", "t", "u"], name="r")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'r' op: 'Polymorphic' input: 'r/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Polymorphic", a="s", T=dtypes.string)
self.assertEqual(str(cm.exception),
"Should not specify value for inferred attr 'T'.")
def testPolymorphicOut(self):
with ops.Graph().as_default():
out = self._lib.apply_op("PolymorphicOut", T=dtypes.int32, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("PolymorphicOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("PolymorphicOut")
self.assertEqual(str(cm.exception),
"No argument for attr T")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("PolymorphicOut", T=None)
self.assertEqual(str(cm.exception),
"Expected DataType for argument 'T' not None.")
def testPolymorphicDefaultOut(self):
with ops.Graph().as_default():
out = self._lib.apply_op("PolymorphicDefaultOut", T=None, name="p")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("PolymorphicDefaultOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
def testBinary(self):
with ops.Graph().as_default():
out = self._lib.apply_op("Binary", a=8, b=9, name="b")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'b' op: 'Binary' input: 'b/a' input: 'b/b'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("Binary", a="left", b="right", name="c")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'c' op: 'Binary' input: 'c/a' input: 'c/b'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError):
self._lib.apply_op("Binary", a="left", b=12)
with self.assertRaises(TypeError):
self._lib.apply_op("Binary",
a=self.Tensor(dtypes.string),
b=self.Tensor(dtypes.int32))
def testRestrict(self):
with ops.Graph().as_default():
out = self._lib.apply_op("Restrict", a="foo", name="g")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'g' op: 'Restrict' input: 'g/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("Restrict", a=True, name="h")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'h' op: 'Restrict' input: 'h/a'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Restrict", a=17)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testTypeList(self):
with ops.Graph().as_default():
op = self._lib.apply_op("TypeList", a=["foo"], name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeList' input: 'z/a_0'
attr { key: 'T' value { list { type: DT_STRING } } }
""", op.node_def)
op = self._lib.apply_op("TypeList", a=[True, 12], name="y")
self.assertProtoEquals("""
name: 'y' op: 'TypeList' input: 'y/a_0' input: 'y/a_1'
attr { key: 'T' value { list { type: DT_BOOL type: DT_INT32 } } }
""", op.node_def)
op = self._lib.apply_op("TypeList", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeList' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeList", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' "
"argument to 'TypeList' Op, not ")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeList", a=[self.Tensor(dtypes.int32), None])
self.assertStartsWith(str(cm.exception),
"Tensors in list passed to 'a' of 'TypeList' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>]")
def testTypeListTwice(self):
with ops.Graph().as_default():
op = self._lib.apply_op("TypeListTwice",
a=["foo", True],
b=["bar", False],
name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeListTwice'
input: 'z/a_0' input: 'z/a_1' input: 'z/b_0' input: 'z/b_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
op = self._lib.apply_op("TypeListTwice", a=[], b=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeListTwice' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeListTwice", a=["foo", True], b=["bar", 6])
self.assertEqual(str(cm.exception),
"Input 'b' of 'TypeListTwice' Op has type list of "
"string, int32 that does not match type list "
"string, bool of argument 'a'.")
def testOutTypeList(self):
with ops.Graph().as_default():
out, = self._lib.apply_op("OutTypeList", T=[dtypes.float32], name="x")
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'x' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_FLOAT } } }
""", out.op.node_def)
out1, out2 = self._lib.apply_op("OutTypeList",
T=[dtypes.int32, dtypes.bool],
name="w")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'w' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_INT32 type: DT_BOOL } } }
""", out1.op.node_def)
out = self._lib.apply_op("OutTypeList", T=[], name="empty")
self.assertEqual([], out)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("OutTypeList", T=dtypes.int32)
self.assertEqual(str(cm.exception), "Expected list for attr T")
def testTypeListRestrict(self):
with ops.Graph().as_default():
op = self._lib.apply_op("TypeListRestrict", a=["foo", False], name="v")
self.assertProtoEquals("""
name: 'v' op: 'TypeListRestrict' input: 'v/a_0' input: 'v/a_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeListRestrict", a=[True, 12])
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testOutTypeListRestrict(self):
with ops.Graph().as_default():
out1, out2 = self._lib.apply_op("OutTypeListRestrict",
t=[dtypes.bool, dtypes.string],
name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertProtoEquals("""
name: 'u' op: 'OutTypeListRestrict'
attr { key: 't' value { list { type: DT_BOOL type: DT_STRING } } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("OutTypeListRestrict",
t=[dtypes.string, dtypes.int32])
self.assertEqual(str(cm.exception),
"Value passed to parameter 't' has DataType int32 "
"not in list of allowed values: string, bool")
def testAttr(self):
with ops.Graph().as_default():
op = self._lib.apply_op("Attr", a=12, name="t")
self.assertProtoEquals("""
name: 't' op: 'Attr' attr { key: 'a' value { i: 12 } }
""", op.node_def)
op = self._lib.apply_op("Attr", a=tensor_shape.Dimension(13), name="u")
self.assertProtoEquals("""
name: 'u' op: 'Attr' attr { key: 'a' value { i: 13 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a="bad")
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not 'bad'.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a=[12])
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not [12].")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a=None)
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not None.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr")
self.assertEqual(str(cm.exception), "No argument for attr a")
def testAttrFloat(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrFloat", a=1.2, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrFloat' attr { key: 'a' value { f: 1.2 } }
""", op.node_def)
op = self._lib.apply_op("AttrFloat", a=12, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrFloat' attr { key: 'a' value { f: 12 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrFloat", a="bad")
self.assertEqual(str(cm.exception),
"Expected float for argument 'a' not 'bad'.")
def testAttrFunc(self):
with ops.Graph().as_default():
@function.Defun(dtypes.float32, func_name="MyFn")
def fn(x):
return 2 + x
op = self._lib.apply_op("FuncAttr", f=fn, name="t")
self.assertProtoEquals("""
name: 't' op: 'FuncAttr' attr { key: 'f'
value { func { name: 'MyFn' } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("FuncAttr", f=3)
self.assertEqual(str(cm.exception),
"Don't know how to convert 3 to a func for argument f")
def testAttrFuncList(self):
with ops.Graph().as_default():
@function.Defun(dtypes.float32, func_name="MyFn")
def fn1(x):
return 2 + x
@function.Defun(dtypes.int32, dtypes.float32, func_name="MyFn2")
def fn2(x, y):
return 2 + x, y * 3
@function.Defun(dtypes.int32, func_name="MyFn3")
def fn3(y):
return 2 + y
op = self._lib.apply_op("FuncListAttr", f=[fn1, fn2, fn3], name="t")
self.assertProtoEquals("""
name: 't' op: 'FuncListAttr'
attr { key: 'f' value { list { func { name: 'MyFn' }
func { name: 'MyFn2' }
func { name: 'MyFn3' } } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("FuncListAttr", f=[fn1, 3, fn2])
self.assertEqual(str(cm.exception),
"Don't know how to convert 3 to a func for argument f")
def testAttrBool(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrBool", a=True, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBool' attr { key: 'a' value { b: true } }
""", op.node_def)
op = self._lib.apply_op("AttrBool", a=False, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBool' attr { key: 'a' value { b: false } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=0)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=1)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 1.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=[])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not [].")
def testAttrBoolList(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrBoolList", a=[True, False, True], name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBoolList'
attr { key: 'a' value { list { b: true b: false b:true } } }
""", op.node_def)
op = self._lib.apply_op("AttrBoolList", a=[], name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBoolList' attr { key: 'a' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBoolList", a=[0])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
def testAttrMin(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrMin", a=12, name="s")
self.assertProtoEquals("""
name: 's' op: 'AttrMin' attr { key: 'a' value { i: 12 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrMin", a=2)
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrMin' Op passed 2 less than minimum 5.")
def testAttrListMin(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrListMin", a=[1, 2], name="r")
self.assertProtoEquals("""
name: 'r' op: 'AttrListMin'
attr { key: 'a' value { list { i: 1 i: 2 } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrListMin", a=[17])
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrListMin' Op "
"passed list of length 1 less than minimum 2.")
def testAttrEnum(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrEnum", a="oranges", name="e")
self.assertProtoEquals("""
name: 'e' op: 'AttrEnum' attr { key: 'a' value { s: 'oranges' } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrEnum", a="invalid")
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnum\' Op '
'passed string \'invalid\' not in: '
'"apples", "oranges".')
def testAttrEnumList(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrEnumList", a=["oranges", "apples"], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrEnumList'
attr { key: 'a' value { list { s: 'oranges' s: 'apples' } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrEnumList", a=["apples", "invalid", "oranges"])
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnumList\' Op '
'passed string \'invalid\' not '
'in: "apples", "oranges".')
def testAttrShape(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShape", a=(4, 3, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op(
"AttrShape", a=tensor_shape.TensorShape([3, 2]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrShape' attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = 6
shape.dim.add().size = 3
op = self._lib.apply_op("AttrShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 6 } dim { size: 3 } } } }
""", op.node_def)
# TODO(josh11b): Re-enable this test once we stop promoting scalars to
# shapes.
# with self.assertRaises(TypeError) as cm:
# self._lib.apply_op("AttrShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for"
# " argument 'a'")
with self.assertRaises(TypeError):
self._lib.apply_op("AttrShape", a="ABC")
def testAttrShapeList(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrShapeList", a=[[3, 2], [6, 5, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: 5 } dim { size: 4 } } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrShapeList' attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrPartialShape(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrPartialShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrPartialShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShape", a=(4, None, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: -1 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op(
"AttrPartialShape", a=tensor_shape.TensorShape([3, None]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: -1 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrPartialShape'
attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = -1
shape.dim.add().size = 3
op = self._lib.apply_op("AttrPartialShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: -1 } dim { size: 3 } } } }
""", op.node_def)
# TODO(ebrevdo): Re-enable once we stop promoting scalars to shapes.
# with self.assertRaises(TypeError) as cm:
# self._lib.apply_op("AttrPartialShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for"
# " argument 'a'")
with self.assertRaises(TypeError):
self._lib.apply_op("AttrPartialShape", a="ABC")
def testAttrPartialShapeList(self):
with ops.Graph().as_default():
op = self._lib.apply_op(
"AttrPartialShapeList", a=[[3, 2], [6, None, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrPartialShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: -1 } dim { size: 4 } } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrPartialShapeList' attr {
key: 'a' value { list { } } }
""", op.node_def)
def testAttrDefault(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrDefault", a=None, name="d")
self.assertProtoEquals("""
name: 'd' op: 'AttrDefault' attr { key: 'a' value { s: 'banana' } }
""", op.node_def)
op = self._lib.apply_op("AttrDefault", a="kiwi", name="c")
self.assertProtoEquals("""
name: 'c' op: 'AttrDefault' attr { key: 'a' value { s: 'kiwi' } }
""", op.node_def)
def testAttrListDefault(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 5 i: 15 } } }
""", op.node_def)
op = self._lib.apply_op("AttrListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 3 } } }
""", op.node_def)
op = self._lib.apply_op("AttrListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrEmptyListDefault(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrEmptyListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
op = self._lib.apply_op("AttrEmptyListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { f: 3 } } }
""", op.node_def)
op = self._lib.apply_op("AttrEmptyListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testReservedAttr(self):
with ops.Graph().as_default():
op = self._lib.apply_op("ReservedAttr", range_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedAttr' attr { key: 'range' value { i: 7 } }
""", op.node_def)
def testDefaultAttrType(self):
with ops.Graph().as_default():
# Give an input whose type has no obvious output type.
op = self._lib.apply_op("AttrTypeDefault", a=[], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrTypeDefault' input: 'n/a'
attr { key: 'T' value { type: DT_INT32 } }
""", op.node_def)
# Give an input whose type can be inferred as different
# than the default.
op = self._lib.apply_op("AttrTypeDefault", a=[1.0], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrTypeDefault' input: 'f/a'
attr { key: 'T' value { type: DT_FLOAT } }
""", op.node_def)
def testDefaultListAttrType(self):
with ops.Graph().as_default():
# Give an input whose type can be inferred as different
# than the default.
op = self._lib.apply_op("AttrListTypeDefault", a=[1.0], b=[2.0], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrListTypeDefault' input: 'n/a_0' input: 'n/b_0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
def testNIntsIn(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NIntsIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NIntsIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NIntsIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NIntsIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'N' value { i: 5 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=["foo", "bar"])
self.assertEqual(
str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[string, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn",
a=[self.Tensor(dtypes.string),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have "
"types [string, string] that do not match expected type "
"int32.")
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NIntsIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NIntsIn' Op "
"with length 1 shorter than "
"minimum length 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=[38, "bar"])
self.assertEqual(
str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[int32, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn",
a=[self.Tensor(dtypes.int32),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op "
"have types [int32, string] that do not match expected "
"type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NIntsIn' Op, not ")
def testNPolymorphicIn(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NPolymorphicIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 5 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn", a=["foo", "bar"], name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn",
a=[1, self.Tensor(dtypes.float32, name="x")],
name="q")
self.assertProtoEquals("""
name: 'q' op: 'NPolymorphicIn' input: 'q/a_0' input: 'x'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn",
a=[self.Tensor(dtypes.float32, name="y"),
self.Tensor(dtypes.float32_ref, name="z")],
name="r")
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicIn' input: 'y' input: 'z'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NPolymorphicIn' Op with length 1 "
"shorter than minimum length 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, "bar"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, None])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>] that "
"don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn",
a=["abcd", self.Tensor(dtypes.int32)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [string, int32] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NPolymorphicIn' Op, not ")
def testNPolymorphicRestrictIn(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NPolymorphicRestrictIn", a=["foo", "bar"],
name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicRestrictIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicRestrictIn",
a=[False, True, False],
name="b")
self.assertProtoEquals("""
name: 'b' op: 'NPolymorphicRestrictIn'
input: 'b/a_0' input: 'b/a_1' input: 'b/a_2'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicRestrictIn", a=[1, 2])
self.assertEqual(
str(cm.exception),
"Value passed to parameter 'a' has DataType int32 not in "
"list of allowed values: string, bool")
def testNInTwice(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NInTwice", a=[1, 2], b=["one", "two"], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwice", a=[], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwice' attr { key: 'N' value { i: 0 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInTwice", a=[1, 2, 3], b=["too short"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwice' Op "
"with length 1 must match "
"length 3 of argument 'a'.")
def testNInPolymorphicTwice(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NInPolymorphicTwice", a=[1, 2], b=[3, 4],
name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInPolymorphicTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInPolymorphicTwice", a=[1, 2, 3], b=[5])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInPolymorphicTwice' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NInPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'NInPolymorphicTwice' "
"Op have types [string, string] that do not match type "
"int32 inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NInPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of "
"'NInPolymorphicTwice' Op have types [string] that do "
"not match type int32 inferred from earlier arguments.")
def testNInTwoTypeVariables(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NInTwoTypeVariables",
a=[1, 2],
b=[True, False],
name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwoTypeVariables'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwoTypeVariables", a=[1, 2], b=[3, 4],
name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwoTypeVariables'
input: 'o/a_0' input: 'o/a_1' input: 'o/b_0' input: 'o/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwoTypeVariables",
a=[self.Tensor(dtypes.int32, name="q")],
b=[self.Tensor(dtypes.string, name="r")],
name="p")
self.assertProtoEquals("""
name: 'p' op: 'NInTwoTypeVariables' input: 'q' input: 'r'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInTwoTypeVariables", a=[1, 2, 3], b=["5"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwoTypeVariables' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
def testInPolymorphicTwice(self):
with ops.Graph().as_default():
op = self._lib.apply_op("InPolymorphicTwice", a=[8], b=[3, 4, 5],
name="n")
self.assertProtoEquals("""
name: 'n' op: 'InPolymorphicTwice'
input: 'n/a_0' input: 'n/b_0' input: 'n/b_1' input: 'n/b_2'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 3 } }
""", op.node_def)
op = self._lib.apply_op("InPolymorphicTwice", a=[8], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'InPolymorphicTwice' input: 'o/a_0'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 0 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice", a=[], b=[3, 4, 5])
self.assertEqual(str(cm.exception),
"Don't know how to infer type variable from empty input "
"list passed to input 'a' of 'InPolymorphicTwice' Op.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(
str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' Op "
"have types [string, string] that do not match type int32 "
"inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' "
"Op have types [string] that do not match type int32 "
"inferred from earlier arguments.")
def testNIntsOut(self):
with ops.Graph().as_default():
out1, out2 = self._lib.apply_op("NIntsOut", N=2, name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NIntsOut' attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3, out4, out5 = self._lib.apply_op(
"NIntsOut", N=5, name="o")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertEqual(dtypes.int32, out4.dtype)
self.assertEqual(dtypes.int32, out5.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NIntsOut' attr { key: 'N' value { i: 5 } }
""", out5.op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NIntsOut", N=1)
self.assertEqual(
str(cm.exception),
"Attr 'N' of 'NIntsOut' Op passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsOut", N=[3])
self.assertEqual(str(cm.exception),
"Expected int for argument 'N' not [3].")
def testNIntsOutDefault(self):
with ops.Graph().as_default():
out1, out2, out3 = self._lib.apply_op(
"NIntsOutDefault", N=None, name="z")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'z' op: 'NIntsOutDefault' attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = self._lib.apply_op("NIntsOutDefault", N=2, name="y")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'y' op: 'NIntsOutDefault' attr { key: 'N' value { i: 2 } }
""", out2.op.node_def)
def testNPolymorphicOut(self):
with ops.Graph().as_default():
out1, out2 = self._lib.apply_op("NPolymorphicOut",
N=2,
T=dtypes.int32,
name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOut", T=dtypes.string, N=3, name="o")
self.assertEqual(dtypes.string, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertEqual(dtypes.string, out3.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 3 } }
""", out3.op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NPolymorphicOut", N=1, T=dtypes.string)
self.assertEqual(str(cm.exception),
"Attr 'N' of 'NPolymorphicOut' Op "
"passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicOut", N=3, T=[dtypes.string])
self.assertEqual(
str(cm.exception),
"Expected DataType for argument 'T' not [tf.string].")
def testNPolymorphicOutDefault(self):
with ops.Graph().as_default():
out1, out2 = self._lib.apply_op(
"NPolymorphicOutDefault", N=None, T=None, name="r")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOutDefault", N=3, T=None, name="s")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 's' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = self._lib.apply_op(
"NPolymorphicOutDefault", N=None, T=dtypes.int32, name="t")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 't' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOutDefault", N=3, T=dtypes.int32, name="u")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
def testNPolymorphicRestrictOut(self):
with ops.Graph().as_default():
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicRestrictOut", N=3, T=dtypes.bool, name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicRestrictOut'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicRestrictOut", N=2, T=dtypes.int32)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'T' has DataType int32 "
"not in list of allowed values: string, bool")
def testRef(self):
with ops.Graph().as_default():
out = self._lib.apply_op("RefOut", T=dtypes.bool, name="o")
self.assertEqual(dtypes.bool_ref, out.dtype)
self.assertProtoEquals("""
name: 'o' op: 'RefOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
op = self._lib.apply_op("RefIn", a=out, name="i")
self.assertProtoEquals("""
name: 'i' op: 'RefIn' input: 'o'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: "_class" value { list { s: "loc:@o" } } }
""", op.node_def)
# Can pass ref to non-ref input.
out = self._lib.apply_op("RefOut", T=dtypes.int32, name="r")
out = self._lib.apply_op("Simple", a=out, name="s")
self.assertProtoEquals("""
name: 's' op: 'Simple' input: 'r'
""", out.op.node_def)
# Can't pass non-ref to ref input.
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("RefIn", a=2)
self.assertEqual(
str(cm.exception),
"'RefIn' Op requires that input 'a' be a mutable tensor " +
"(e.g.: a tf.Variable)")
input_a = self._lib.apply_op("RefOut", T=dtypes.int32, name="t")
input_b = self._lib.apply_op("RefOut", T=dtypes.int32, name="u")
op = self._lib.apply_op("TwoRefsIn", a=input_a, b=input_b, name="v")
# NOTE(mrry): The order of colocation constraints is an implementation
# detail.
self.assertProtoEquals("""
name: 'v' op: 'TwoRefsIn' input: 't' input: 'u'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: "_class" value { list { s: "loc:@t" s: "loc:@u" } } }
""", op.node_def)
def testSpecifyDevice(self):
graph = ops.Graph()
with graph.as_default():
with graph.device("/job:ADevice"):
self._lib.apply_op("Simple", a=3)
# We look at the whole graph here to make sure the Const op is also given
# the specified device.
graph_def = graph.as_graph_def()
self.assertEqual(len(graph_def.node), 2)
for node in graph_def.node:
self.assertDeviceEqual(node.device, "/job:ADevice")
def testStructuredOutputSingleList(self):
with ops.Graph().as_default():
for n_a in [0, 1, 3]:
a = self._lib.apply_op("SimpleStruct", n_a=n_a)
self.assertTrue(isinstance(a, list))
self.assertEqual(n_a, len(a))
def testStructuredOutputListAndSingle(self):
with ops.Graph().as_default():
for n_a in [0, 1, 3]:
a, b = self._lib.apply_op("MixedStruct", n_a=n_a)
self.assertTrue(isinstance(a, list))
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertTrue(isinstance(b, ops.Tensor))
self.assertEqual(dtypes.float32, b.dtype)
def testStructuredOutputMultipleLists(self):
with ops.Graph().as_default():
for n_a in [0, 1, 3]:
for n_b in [0, 1, 3]:
for t_c in [[],
[dtypes.int32],
[dtypes.int32, dtypes.float32]]:
a, b, c = self._lib.apply_op("ComplexStruct",
n_a=n_a,
n_b=n_b,
t_c=t_c)
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertEqual(n_b, len(b))
self.assertTrue(all(x.dtype == dtypes.int64 for x in b))
self.assertEqual(t_c, [x.dtype for x in c])
class OpDefLibraryGraphTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = test_ops._op_def_lib
def _add_op(self, ascii): # pylint: disable=redefined-builtin
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
def testNoGraph(self):
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(out.graph, ops.get_default_graph())
def testDefaultGraph(self):
graph = ops.Graph()
with graph.as_default():
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(out.graph, graph)
def testDifferentGraphFails(self):
with ops.Graph().as_default():
a = self._lib.apply_op("Simple", a=3)
with ops.Graph().as_default():
b = self._lib.apply_op("Simple", a=4)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("Binary", a=a, b=b)
self.assertTrue("must be from the same graph" in str(cm.exception))
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/op_def_library_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import device
from tensorflow.python.framework import device_spec
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
TEST_V1_AND_V2 = (("v1", device_spec.DeviceSpecV1),
("v2", device_spec.DeviceSpecV2))
class DeviceTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters(*TEST_V1_AND_V2)
def testMerge(self, DeviceSpec): # pylint: disable=invalid-name
d = DeviceSpec.from_string("/job:muu/task:1/device:MyFunnyDevice:2")
self.assertEqual("/job:muu/task:1/device:MyFunnyDevice:2", d.to_string())
if not context.executing_eagerly():
with ops.device(device.merge_device("/device:GPU:0")):
var1 = variables.Variable(1.0)
self.assertEqual("/device:GPU:0", var1.device)
with ops.device(device.merge_device("/job:worker")):
var2 = variables.Variable(1.0)
self.assertEqual("/job:worker/device:GPU:0", var2.device)
with ops.device(device.merge_device("/device:CPU:0")):
var3 = variables.Variable(1.0)
self.assertEqual("/job:worker/device:CPU:0", var3.device)
with ops.device(device.merge_device("/job:ps")):
var4 = variables.Variable(1.0)
self.assertEqual("/job:ps/device:CPU:0", var4.device)
def testCanonicalName(self):
self.assertEqual("/job:foo/replica:0",
device.canonical_name("/job:foo/replica:0"))
self.assertEqual("/job:foo/replica:0",
device.canonical_name("/replica:0/job:foo"))
self.assertEqual("/job:foo/replica:0/task:0",
device.canonical_name("/job:foo/replica:0/task:0"))
self.assertEqual("/job:foo/replica:0/task:0",
device.canonical_name("/job:foo/task:0/replica:0"))
self.assertEqual("/device:CPU:0",
device.canonical_name("/device:CPU:0"))
self.assertEqual("/device:GPU:2",
device.canonical_name("/device:GPU:2"))
self.assertEqual("/job:foo/replica:0/task:0/device:GPU:0",
device.canonical_name(
"/job:foo/replica:0/task:0/device:GPU:0"))
self.assertEqual("/job:foo/replica:0/task:0/device:GPU:0",
device.canonical_name(
"/device:GPU:0/task:0/replica:0/job:foo"))
def testCheckValid(self):
device.check_valid("/job:foo/replica:0")
with self.assertRaisesRegexp(ValueError, "invalid literal for int"):
device.check_valid("/job:j/replica:foo")
with self.assertRaisesRegexp(ValueError, "invalid literal for int"):
device.check_valid("/job:j/task:bar")
with self.assertRaisesRegexp(ValueError, "Unknown attribute: 'bar'"):
device.check_valid("/bar:muu/baz:2")
with self.assertRaisesRegexp(ValueError, "Cannot specify multiple device"):
device.check_valid("/cpu:0/device:GPU:2")
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/device_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that old style division works for Dimension."""
from __future__ import absolute_import
# from __future__ import division # Intentionally skip this import
from __future__ import print_function
import six
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class DimensionDivTest(test_util.TensorFlowTestCase):
def testDivSucceeds(self):
"""Without from __future__ import division, __div__ should work."""
if six.PY2: # Old division exists only in Python 2
values = [tensor_shape.Dimension(x) for x in (3, 7, 11, None)]
for x in values:
for y in values:
self.assertEqual((x / y).value, (x // y).value)
def testRDivFail(self):
# Note: This test is related to GitHub issue 25790.
"""Without from __future__ import division, __rdiv__ is used."""
if six.PY2: # Old division exists only in Python 2
two = tensor_shape.Dimension(2)
message = (r"unsupported operand type\(s\) for /: "
r"'int' and 'Dimension', please use // instead")
with self.assertRaisesRegexp(TypeError, message):
_ = 6 / two
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/tensor_shape_div_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import gc
import numpy as np
import os
import threading
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.client import session
from tensorflow.python.compat import compat as forward_compat
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.gradients # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
ops._set_call_cpp_shape_fn(common_shapes.call_cpp_shape_fn)
class ResourceTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBuildGraph(self):
with self.cached_session():
pt = test_ops.stub_resource_handle_op(container="a", shared_name="b")
test_ops.resource_create_op(pt).run()
@test_util.run_deprecated_v1
def testInitialize(self):
with self.cached_session():
handle = test_ops.stub_resource_handle_op(container="a", shared_name="b")
resources.register_resource(
handle=handle,
create_op=test_ops.resource_create_op(handle),
is_initialized_op=test_ops.resource_initialized_op(handle))
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 1)
resources.initialize_resources(resources.shared_resources()).run()
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 0)
class TensorAndShapeTest(test_util.TensorFlowTestCase):
def testShape(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertEqual(tensor_shape.unknown_shape(), t.get_shape())
t.set_shape([1, 2, 3])
self.assertEqual([1, 2, 3], t.get_shape())
def testIterable(self):
if not context.executing_eagerly():
self.skipTest("Eager-mode test")
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
with self.assertRaisesRegexp(TypeError, "Cannot iterate"):
next(iter(t))
def testIterableGraph(self):
if context.executing_eagerly():
self.skipTest("Graph-mode test")
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
with self.assertRaisesRegexp(TypeError, "iterating.*not allowed in Graph"):
next(iter(t))
with self.assertRaisesRegexp(
TypeError, "iterating.*AutoGraph did not convert"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
next(iter(t))
with self.assertRaisesRegexp(
TypeError, "iterating.*AutoGraph is disabled"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
next(iter(t))
def testImplicitBool(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.bool])
t = op.outputs[0]
with self.assertRaisesRegexp(
TypeError, "using.*as a.*bool.*not allowed in Graph"):
bool(t)
with self.assertRaisesRegexp(
TypeError, "using.*as a.*bool.*AutoGraph did not convert"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
bool(t)
with self.assertRaisesRegexp(
TypeError, "using.*as a.*bool.*AutoGraph is disabled"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
bool(t)
def testAddShape(self):
with self.cached_session():
a = array_ops.zeros([2, 3])
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual([2, 3], c.shape)
@test_util.run_deprecated_v1
def testUnknownDim(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
b = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
c = a + b
self.assertEqual([2, None, 3], c.shape.as_list())
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=None)
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual(tensor_shape.unknown_shape(), c.shape)
@test_util.run_deprecated_v1
def testScalarShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[])
b = array_ops.ones([])
c = a + b
self.assertEqual(tensor_shape.TensorShape([]), c.shape)
@test_util.run_deprecated_v1
def testShapeFunctionError(self):
with self.cached_session():
a = array_ops.ones([1, 2, 3])
b = array_ops.ones([4, 5, 6])
with self.assertRaisesRegexp(
ValueError, r"Dimensions must be equal, but are 2 and 5 for 'add' "
r"\(op: 'Add(V2)?'\) with input shapes: \[1,2,3\], \[4,5,6\]."):
_ = a + b
def testNumpyArray(self):
with ops.Graph().as_default():
x = array_ops.ones((3, 4), name="test_ones")
with self.assertRaisesRegexp(NotImplementedError,
r"Cannot convert a symbolic.+test_ones"):
np.array(x)
with self.assertRaisesRegexp(TypeError, "not well defined.+test_ones"):
len(x)
# EagerTensors should still behave as numpy arrays.
with context.eager_mode():
x = array_ops.ones((3, 4))
self.assertAllEqual(x, np.ones((3, 4)))
self.assertAllEqual(np.array(x), np.ones((3, 4)))
self.assertEqual(len(x), 3)
def testRef(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.experimental_ref(), x1.experimental_ref())
self.assertEqual(x2.experimental_ref(), x2.experimental_ref())
self.assertEqual(x1.experimental_ref(), x2.experimental_ref())
self.assertEqual(y.experimental_ref(), y.experimental_ref())
self.assertEqual(z.experimental_ref(), z.experimental_ref())
self.assertEqual(w.experimental_ref(), w.experimental_ref())
self.assertNotEqual(x1.experimental_ref(), y.experimental_ref())
self.assertNotEqual(x1.experimental_ref(), z.experimental_ref())
self.assertNotEqual(x1.experimental_ref(), w.experimental_ref())
self.assertNotEqual(y.experimental_ref(), z.experimental_ref())
self.assertNotEqual(y.experimental_ref(), w.experimental_ref())
self.assertNotEqual(z.experimental_ref(), w.experimental_ref())
def testRefDeref(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertIs(x1, x1.experimental_ref().deref())
self.assertIs(x2, x2.experimental_ref().deref())
self.assertIs(x1, x2.experimental_ref().deref())
self.assertIs(x2, x1.experimental_ref().deref())
self.assertIs(y, y.experimental_ref().deref())
self.assertIs(z, z.experimental_ref().deref())
self.assertIsNot(x1, y.experimental_ref().deref())
self.assertIsNot(x1, z.experimental_ref().deref())
self.assertIsNot(x1, w.experimental_ref().deref())
self.assertIsNot(y, z.experimental_ref().deref())
self.assertIsNot(y, w.experimental_ref().deref())
self.assertIsNot(z, w.experimental_ref().deref())
def testRefInSet(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.experimental_ref(), x2.experimental_ref())
tensor_set = {
x1.experimental_ref(),
x2.experimental_ref(),
y.experimental_ref(),
z.experimental_ref(),
w.experimental_ref(),
}
self.assertEqual(len(tensor_set), 4)
self.assertIn(x1.experimental_ref(), tensor_set)
self.assertIn(x2.experimental_ref(), tensor_set)
self.assertIn(y.experimental_ref(), tensor_set)
self.assertIn(z.experimental_ref(), tensor_set)
self.assertIn(w.experimental_ref(), tensor_set)
def testRefInDict(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.experimental_ref(), x2.experimental_ref())
tensor_dict = {
x1.experimental_ref(): "x1",
y.experimental_ref(): "y",
z.experimental_ref(): "z",
w.experimental_ref(): "w",
}
self.assertEqual(len(tensor_dict), 4)
# Overwriting x1
tensor_dict[x2.experimental_ref()] = "x2"
self.assertEqual(len(tensor_dict), 4)
self.assertEqual(tensor_dict[x1.experimental_ref()], "x2")
self.assertEqual(tensor_dict[x2.experimental_ref()], "x2")
self.assertEqual(tensor_dict[y.experimental_ref()], "y")
self.assertEqual(tensor_dict[z.experimental_ref()], "z")
self.assertEqual(tensor_dict[w.experimental_ref()], "w")
def testTensorRefStrong(self):
x = constant_op.constant(1.)
x_ref = x.experimental_ref()
del x
self.assertIsNotNone(x_ref.deref())
def testVariableRefStrong(self):
x = variables.Variable(1.)
x_ref = x.experimental_ref()
del x
self.assertIsNotNone(x_ref.deref())
class IndexedSlicesTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testToTensor(self):
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
dense_shape = constant_op.constant([3, 2])
x = ops.IndexedSlices(values, indices, dense_shape)
tensor = ops.convert_to_tensor(x, name="tensor")
self.assertAllEqual(self.evaluate(tensor), [[2, 3], [0, 0], [5, 7]])
@test_util.run_gpu_only
def testEagerCopy(self):
with context.eager_mode():
var = variables.Variable([[0.0], [0.0], [0.0], [0.0]], name="tensor")
with backprop.GradientTape() as tape:
a = array_ops.gather(array_ops.gather(var, [0, 1]), [0, 1])
b = array_ops.gather(array_ops.gather(var, [2, 3]), [0, 1])
r = special_math_ops.einsum("ij,ij->i", a, b)
g = tape.gradient(r, [var])[0]
values = g.values if isinstance(g, ops.IndexedSlices) else g
self.assertAllEqual(values.get_shape(), [4, 1])
@test_util.run_deprecated_v1
def testNegation(self):
with self.cached_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = -ops.IndexedSlices(values, indices)
self.assertAllEqual(x.values.eval(), [[-2, -3], [-5, -7]])
self.assertAllEqual(x.indices.eval(), [0, 2])
@test_util.run_deprecated_v1
def testScalarMul(self):
with self.cached_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = math_ops.scalar_mul(-2, ops.IndexedSlices(values, indices))
self.assertAllEqual(x.values.eval(), [[-4, -6], [-10, -14]])
self.assertAllEqual(x.indices.eval(), [0, 2])
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertAllTensorsEqual(self, list1, list2):
self.assertLen(list1, len(list2))
for (t1, t2) in zip(list1, list2):
self.assertAllEqual(t1, t2)
def testConstruction(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertEqual(spec1._shape.rank, None)
self.assertEqual(spec1._values_dtype, dtypes.float32)
self.assertEqual(spec1._indices_dtype, dtypes.int64)
self.assertEqual(spec1._dense_shape_dtype, None)
self.assertEqual(spec1._indices_shape.as_list(), [None])
spec2 = indexed_slices.IndexedSlicesSpec([None, None], dtypes.string,
dtypes.int32, dtypes.int64, [10])
self.assertEqual(spec2._shape.as_list(), [None, None])
self.assertEqual(spec2._values_dtype, dtypes.string)
self.assertEqual(spec2._indices_dtype, dtypes.int32)
self.assertEqual(spec2._dense_shape_dtype, dtypes.int64)
self.assertEqual(spec2._indices_shape.as_list(), [10])
def testValueType(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertEqual(spec1.value_type, ops.IndexedSlices)
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(shape=[5, None, None]),
(tensor_shape.TensorShape([5, None, None]), dtypes.float32,
dtypes.int64, None, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.int32, dense_shape_dtype=dtypes.int64),
(tensor_shape.TensorShape(None), dtypes.int32, dtypes.int64,
dtypes.int64, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(indices_shape=[100]),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([100]))),
]) # pyformat: disable
def testSerialize(self, spec, expected):
serialization = spec._serialize()
# TensorShape has an unconventional definition of equality, so we can't use
# assertEqual directly here. But repr() is deterministic and lossless for
# the expected values, so we can use that instead.
self.assertEqual(repr(serialization), repr(expected))
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(dtype=dtypes.string), (
tensor_spec.TensorSpec(None, dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64),
)),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.string, dense_shape_dtype=dtypes.int32), (
tensor_spec.TensorSpec(None, dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32), (
tensor_spec.TensorSpec([None, 10, 15], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.int64),
tensor_spec.TensorSpec([3], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32,
indices_shape=[20]), (
tensor_spec.TensorSpec([20, 10, 15], dtypes.float32),
tensor_spec.TensorSpec([20], dtypes.int64),
tensor_spec.TensorSpec([3], dtypes.int32),
)),
])
def testComponentSpecs(self, spec, expected):
self.assertEqual(spec._component_specs, expected)
@parameterized.parameters([
{
"spec": indexed_slices.IndexedSlicesSpec(),
"values": [3.0, 5.0],
"indices": [5, 10]
},
{
"spec":
indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32),
"values": [3.0, 5.0],
"indices": [5, 10],
"dense_shape": [100]
},
])
def testToFromComponents(self, spec, indices, values, dense_shape=None):
x = ops.IndexedSlices(indices, values, dense_shape)
actual_components = spec._to_components(x)
if dense_shape is None:
self.assertAllTensorsEqual(actual_components, [indices, values])
else:
self.assertAllTensorsEqual(actual_components,
[indices, values, dense_shape])
st_reconstructed = spec._from_components(actual_components)
self.assertAllEqual(x.indices, st_reconstructed.indices)
self.assertAllEqual(x.values, st_reconstructed.values)
if dense_shape is None:
self.assertIs(st_reconstructed.dense_shape, None)
else:
self.assertAllEqual(x.dense_shape, st_reconstructed.dense_shape)
@test_util.run_v1_only("IndexedSlicesValue is deprecated in v2")
def testFromNumpyComponents(self):
indices = np.array([3, 8])
values = np.array([1.0, 9.0])
dense_shape = np.array([100])
spec1 = indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32)
st1 = spec1._from_components((values, indices, dense_shape))
self.assertIsInstance(st1, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st1.indices, indices)
self.assertAllEqual(st1.values, values)
self.assertAllEqual(st1.dense_shape, dense_shape)
spec2 = indexed_slices.IndexedSlicesSpec()
st2 = spec2._from_components((values, indices))
self.assertIsInstance(st2, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st2.indices, indices)
self.assertAllEqual(st2.values, values)
self.assertIs(st2.dense_shape, None)
class NodeDefConstructorTest(test_util.TensorFlowTestCase):
def testNoArgs(self):
nodedef = ops._NodeDef("None", "bar")
self.assertProtoEquals("op: 'None' name: 'bar'", nodedef)
def testArgs(self):
nodedef = ops._NodeDef("foo", "bar", device="/device:baz:*")
self.assertProtoEquals("op:'foo' name:'bar' device:'/device:baz:*'",
nodedef)
nodedef = ops._NodeDef("foo", "bar", device=pydev.DeviceSpec(job="j"))
self.assertProtoEquals("op:'foo' name:'bar' device:'/job:j'", nodedef)
def _apply_op(g, *args, **kwargs):
op = g.create_op(*args, **kwargs)
if len(op.outputs) == 1:
return op.outputs[0]
else:
return op.outputs
class OperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoInputs(self):
op = test_ops.float_output_string_output(name="myop").a.op
self.assertEqual(2, len(op.values()))
self.assertEqual(0, len(op.inputs))
self.assertEqual("myop", op.name)
float_t, label_str_t = op.values()
self.assertEqual(dtypes.float32, float_t.dtype)
self.assertEqual(op, float_t.op)
self.assertEqual(0, float_t._value_index)
self.assertEqual(0, len(float_t.consumers()))
self.assertEqual("myop", float_t._as_node_def_input())
self.assertEqual(dtypes.string, label_str_t.dtype)
self.assertEqual(op, label_str_t.op)
self.assertEqual(1, label_str_t._value_index)
self.assertEqual(0, len(label_str_t.consumers()))
self.assertEqual("myop:1", label_str_t._as_node_def_input())
self.assertProtoEquals("op:'FloatOutputStringOutput' name:'myop'",
op.node_def)
@test_util.run_deprecated_v1
def testNoOutputs(self):
op1 = test_ops.float_output(name="myop1").op
float_t, = op1.values()
op2 = test_ops.float_input(float_t, name="myop2")
self.assertEqual(0, len(op2.values()))
self.assertEqual(1, len(op2.inputs))
self.assertIs(float_t, op2.inputs[0])
self.assertEqual(1, len(float_t.consumers()))
self.assertEqual(op2, float_t.consumers()[0])
self.assertProtoEquals("op:'FloatOutput' name:'myop1'", op1.node_def)
self.assertProtoEquals("op:'FloatInput' name:'myop2' input:'myop1'",
op2.node_def)
@test_util.run_deprecated_v1
def testInputsAndOutputs(self):
op1 = test_ops.float_output(name="myop1").op
self.assertEqual(1, len(op1.values()))
float1_t, = op1.values()
op2 = test_ops.float_output_string_output(name="myop2").a.op
self.assertEqual(2, len(op2.values()))
float2_t, label2_str_t = op2.values()
# Note that we consume label2_str_t twice here.
op3 = test_ops.foo2(float1_t, label2_str_t, label2_str_t, name="myop3").d.op
self.assertEqual(2, len(op3.values()))
self.assertEqual(1, len(float1_t.consumers()))
self.assertEqual(op3, float1_t.consumers()[0])
self.assertEqual(0, len(float2_t.consumers()))
self.assertEqual(2, len(label2_str_t.consumers()))
self.assertEqual(op3, label2_str_t.consumers()[0])
self.assertEqual(op3, label2_str_t.consumers()[1])
self.assertProtoEquals("""
op:'Foo2' name:'myop3'
input:'myop1' input:'myop2:1' input:'myop2:1'
""", op3.node_def)
def testDeviceFromNodeDef(self):
op = ops.Operation(
ops._NodeDef("None", "myop", device="/job:goo/device:GPU:0"),
ops.Graph(), [], [])
self.assertEqual("/job:goo/device:GPU:0", op.device)
def testDeviceObject(self):
op = ops.Operation(ops._NodeDef("None", "myop"), ops.Graph(), [], [])
op._set_device("/job:goo/device:GPU:0")
self.assertProtoEquals(
"op:'None' name:'myop' device:'/job:goo/device:GPU:0' ", op.node_def)
op = ops.Operation(ops._NodeDef("None", "op2"), ops.Graph(), [], [])
op._set_device(
pydev.DeviceSpec(
job="muu", device_type="CPU", device_index=0))
self.assertProtoEquals(
"op:'None' name:'op2' device:'/job:muu/device:CPU:0'", op.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("RefOutputFloatOutput", "op1"), g, [],
[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
self.assertEquals([], list(op1.inputs))
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = ops.Operation(
ops._NodeDef("RefInputFloatInput", "op2"),
g, [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
self.assertEquals([ref_t, nonref_t], list(op2.inputs))
op3 = ops.Operation(
ops._NodeDef("TwoFloatInputs", "op3"), g, [ref_t, nonref_t], [])
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testInvalidNames(self):
g = ops.Graph()
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", ""), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "_invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "-invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "/invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "invalid:0"), g)
@test_util.run_deprecated_v1
def testNoShapeFunction(self):
op = test_ops.a()
self.assertEqual(tensor_shape.unknown_shape(), op.get_shape())
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedArray(self):
values = [[2], [3], [5], [7]]
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
def testShapeTuple(self):
with self.cached_session():
c = constant_op.constant(1)
self.assertEqual(c._shape_tuple(), ()) # pylint: disable=protected-access
def testConvertToTensorEager(self):
with context.eager_mode():
t = constant_op.constant(1)
self.assertTrue(isinstance(t, ops.EagerTensor))
converted = ops.convert_to_tensor(t)
self.assertTrue(isinstance(converted, ops.EagerTensor))
converted = ops.convert_to_tensor(1)
self.assertTrue(isinstance(converted, ops.EagerTensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTuple(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(ops.convert_to_tensor(values)))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTensors(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(
[constant_op.constant(row) for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
tensor = ops.convert_to_tensor(
[[constant_op.constant(v) for v in row] for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedMix(self):
values = ([2], (3,), [constant_op.constant(5)], constant_op.constant([7]))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(((2,), (3,), (5,), (7,)), self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorPreferred(self):
values = [2, 3, 5, 7]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.float32)
self.assertEqual(dtypes.float32, tensor.dtype)
# Convert empty tensor to anything.
values = []
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.int64, tensor.dtype)
# The preferred dtype is a type error and will convert to
# float32 instead.
values = [1.23]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.float32, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToInvalidTensorType(self):
with self.assertRaises(TypeError):
# Forcing an invalid dtype should fail with a type error.
values = [1.23]
ops.convert_to_tensor(values, dtype=dtypes.int64)
@test_util.run_in_graph_and_eager_modes
def testConvertToLongLongTensorType(self):
tensor = ops.convert_to_tensor(
# Get a numpy array of dtype NPY_LONGLONG
np.prod(constant_op.constant([1])._shape_tuple()))
self.assertEqual(dtypes.int64, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorFromInvalidTensor(self):
tensor = constant_op.constant(42.0, dtype=dtypes.float32)
with self.assertRaises(ValueError):
ops.convert_to_tensor(tensor, dtype=dtypes.int32)
@test_util.run_deprecated_v1
def testNoConvert(self):
# Operation cannot be converted to Tensor.
op = control_flow_ops.no_op()
with self.assertRaisesRegexp(TypeError,
r"Can't convert Operation '.*' to Tensor"):
ops.convert_to_tensor(op)
def testStr(self):
node_def = ops._NodeDef("None", "op1")
op = ops.Operation(node_def, ops.Graph(), [], [dtypes.float32])
self.assertEqual(str(node_def), str(op))
def testRepr(self):
op = ops.Operation(
ops._NodeDef("None", "op1"), ops.Graph(), [], [dtypes.float32])
self.assertEqual("<tf.Operation 'op1' type=None>", repr(op))
@test_util.run_deprecated_v1
def testGetAttr(self):
op = test_ops.default_attrs()
self.assertEqual(op.get_attr("string_val"), b"abc")
self.assertEqual(op.get_attr("string_list_val"), [b"abc", b""])
self.assertEqual(op.get_attr("int_val"), 123)
self.assertEqual(op.get_attr("int_list_val"), [1, 2, 3])
self.assertEqual(op.get_attr("float_val"), 10.0)
self.assertEqual(op.get_attr("float_list_val"), [10.0])
self.assertEqual(op.get_attr("bool_val"), True)
self.assertEqual(op.get_attr("bool_list_val"), [True, False])
self.assertEqual(op.get_attr("shape_val"),
tensor_shape.as_shape([2, 1]).as_proto())
self.assertEqual(op.get_attr("shape_list_val"),
[tensor_shape.as_shape([]).as_proto(),
tensor_shape.as_shape([1]).as_proto()])
self.assertEqual(op.get_attr("tensor_val"),
tensor_util.make_tensor_proto(1, dtypes.int32))
self.assertEqual(op.get_attr("tensor_list_val"),
[tensor_util.make_tensor_proto(1, dtypes.int32)])
type_val = op.get_attr("type_val")
# First check that type_val is a DType, because the assertEquals will work
# no matter what since DType overrides __eq__
self.assertIsInstance(type_val, dtypes.DType)
self.assertEqual(type_val, dtypes.int32)
type_list_val = op.get_attr("type_list_val")
self.assertTrue(all(isinstance(x, dtypes.DType) for x in type_list_val))
self.assertEqual(type_list_val, [dtypes.int32, dtypes.float32])
@function.Defun(dtypes.float32, func_name="MyFunc")
def func(x):
return x
op = test_ops.func_attr(func)
self.assertEqual(op.get_attr("f"),
attr_value_pb2.NameAttrList(name="MyFunc"))
# Try fetching missing attr
with self.assertRaisesRegexp(
ValueError, "Operation 'FuncAttr' has no attr named 'FakeAttr'."):
op.get_attr("FakeAttr")
# TODO(b/65162920): remove this test when users who are directly mutating the
# node_def have been updated to proper usage.
@test_util.run_deprecated_v1
def testSetAttr(self):
op = test_ops.int_attr().op
op._set_attr("foo", attr_value_pb2.AttrValue(i=2))
# TODO(skyewm): add node_def check
self.assertEqual(op.get_attr("foo"), 2)
# TODO(nolivia): test all error cases
def testAddControlInput(self):
with ops.Graph().as_default():
x = constant_op.constant(1).op
y = constant_op.constant(2).op
z = constant_op.constant(3).op
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_inputs([x, y, y]) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x, y])
self.assertEqual(x._control_outputs, [z])
@test_util.run_deprecated_v1
def testRemoveAllControlInputs(self):
a = constant_op.constant(1)
with ops.control_dependencies([a]):
b = constant_op.constant(2)
c = constant_op.constant(3)
d = constant_op.constant(4)
e = constant_op.constant(5)
with ops.control_dependencies([a, c]):
f = d + e
self.assertEqual(a.op.control_inputs, [])
self.assertEqual(b.op.control_inputs, [a.op])
self.assertEqual(f.op.control_inputs, [a.op, c.op])
a.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(a.op.control_inputs, [])
b.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(b.op.control_inputs, [])
f.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(f.op.control_inputs, [])
self.assertEqual(list(f.op.inputs), [d, e])
@test_util.run_deprecated_v1
def testControlInputCycle(self):
graph = ops.Graph()
with graph.as_default():
z = constant_op.constant(0)
x = constant_op.constant(1)
y = constant_op.constant(2)
y.op._add_control_input(z.op) # pylint: disable=protected-access
y.op._add_control_input(x.op) # pylint: disable=protected-access
x.op._add_control_input(y.op) # pylint: disable=protected-access
with self.session(graph=graph) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Graph is invalid, contains a cycle with 2 nodes"):
self.evaluate(x)
def testUpdateInput(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = x + y
z.op._update_input(0, y) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [y, y])
self.assertEquals(x.consumers(), [])
self.assertEquals(y.consumers(), [z.op, z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 4)
z.op._update_input(0, x) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [x, y])
self.assertEquals(x.consumers(), [z.op])
self.assertEquals(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 3)
z.op._update_input(1, y) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [x, y])
self.assertEquals(x.consumers(), [z.op])
self.assertEquals(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 3)
def testUpdateInputGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
z = y * 2
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
z.op._update_input(0, x) # pylint: disable=protected-access
def testUpdateInputTypeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(0)
x = constant_op.constant("")
y = constant_op.constant(1)
z = y + w
z.op._update_input(0, x) # pylint: disable=protected-access
with session.Session(graph=g) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Input 0 of node add was passed string from Const_1:0 incompatible "
"with expected int32"):
self.evaluate(z)
def testUpdateInputShapeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(2, shape=[3, 1])
x = constant_op.constant(0, shape=[3, 1])
y = constant_op.constant(1, shape=[2, 2])
z = w + x
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Cannot update edge, incompatible shapes: \[2,2\] and \[3,1\]"):
z.op._update_input(0, y) # pylint: disable=protected-access
def testUpdateInputOutOfRange(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
with self.assertRaisesRegexp(
errors.OutOfRangeError,
r"Cannot update edge. Input index \[1\] is greater than the number of "
r"total inputs \[0\]."
):
x.op._update_input(1, x) # pylint: disable=protected-access
@test_util.enable_control_flow_v2
@test_util.run_v1_only("b/120545219")
def testAddWhileInput(self):
if forward_compat.forward_compatible(2019, 8, 23):
@eager_function.defun
def test():
output = control_flow_ops.while_loop(lambda x: x < 3, lambda x: x + 1,
[1])
while_op = output.op.inputs[0].op
self.assertEqual(while_op.type, "StatelessWhile")
orig_num_inputs = len(while_op.inputs)
# Make sure we can handle the while op having a control input.
while_op._add_control_input(constant_op.constant(0).op)
new_input1 = constant_op.constant(1.0)
new_input2 = constant_op.constant(True)
# Clear output shapes to bypass shape checking.
while_op._set_shape_list_attr("output_shapes", [])
while_op._set_type_list_attr("T",
[t.dtype for t in while_op.inputs] +
[new_input1.dtype, new_input2.dtype])
while_op._add_while_inputs([new_input1, new_input2])
# Can't add an edge beyond what's specified by "T"
with self.assertRaises(errors.OutOfRangeError):
while_op._add_while_inputs([new_input2])
self.assertEqual(len(while_op.inputs), orig_num_inputs + 2) # pylint: disable=g-deprecated-assert
test()
@test_util.run_deprecated_v1
def testOpDef(self):
x = constant_op.constant(0)
y = constant_op.constant(1)
z = x + y
self.assertEqual(x.op.op_def.name, "Const")
self.assertEqual(len(x.op.op_def.input_arg), 0)
self.assertEqual(len(x.op.op_def.output_arg), 1)
self.assertRegexpMatches(z.op.op_def.name, "Add(V2)?")
self.assertEqual(len(z.op.op_def.input_arg), 2)
self.assertEqual(len(z.op.op_def.output_arg), 1)
def testInputFromDifferentGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
y * x # pylint: disable=pointless-statement
def testInputsAreImmutable(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
op = test_ops.int_input_int_output(x, name="myop").op
with self.assertRaisesRegexp(
AttributeError, "'_InputList' object has no attribute 'append'"):
op.inputs.append(None)
class CreateOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
op1 = g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
with g.device("/device:GPU:0"):
op2 = g.create_op(
"FloatOutputStringOutput", [], [dtypes.float32, dtypes.string], None,
name="myop2")
op3 = g.create_op(
"Foo3",
[list(op1.values())[0], list(op2.values())[1], list(op2.values())[0]],
[dtypes.float32, dtypes.int32],
None,
name="myop3")
self.assertDeviceEqual(None, op1.device)
self.assertDeviceEqual("/device:GPU:0", op2.device)
self.assertDeviceEqual(None, op3.device)
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", op1.node_def)
self.assertProtoEquals(
"name:'myop2' op:'FloatOutputStringOutput' device:'/device:GPU:0'",
op2.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo3'",
op3.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = g.create_op(
"RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = g.create_op(
"RefInputFloatInput", [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = g.create_op("TwoFloatInputs", [ref_t, nonref_t], [], name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testFinalized(self):
g = ops.Graph()
g.finalize()
with self.assertRaises(RuntimeError):
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# Test unfinalize.
g._unsafe_unfinalize()
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# NOTE(skyewm): these cases test the private Graph._create_op_from_tf_operation
# method. Arguably we should only test the public APIs that depend on this
# method. However, this logic is complex and tricky, and it can be difficult to
# ascertain if we have adequate coverage (e.g. a graph may run successfully if
# the control flow context isn't set properly, but a more complicated use case
# that might not be obvious to test will fail). Thus we instead explicitly test
# the low-level behavior.
class CreateOpFromTFOperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c_op = ops._create_c_op(
g, ops._NodeDef("IntInputIntOutput", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "IntInputIntOutput")
self.assertEqual(len(op.outputs), 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.unknown_shape())
self.assertEqual(list(op.inputs), [x])
self.assertEqual(op.control_inputs, [])
self.assertEqual(op.graph, g)
self.assertEqual(x.consumers(), [op])
self.assertIsNotNone(op.traceback)
self.assertEqual(g.get_operation_by_name("myop"), op)
self.assertEqual(g.get_tensor_by_name("myop:0"), op.outputs[0])
def testShape(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
c_op = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "Identity")
self.assertEqual(len(op.outputs), 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.TensorShape([2, 3]))
def testUniqueName(self):
g = ops.Graph()
with g.as_default():
c_op = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop"), [], [])
c_op2 = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop_1"), [], [])
op = g._create_op_from_tf_operation(c_op)
op2 = g._create_op_from_tf_operation(c_op2)
# Create ops with same names as op1 and op2. We expect the new names to be
# uniquified.
op3 = test_ops.int_output(name="myop").op
op4 = test_ops.int_output(name="myop_1").op
self.assertEqual(op.name, "myop")
self.assertEqual(op2.name, "myop_1")
self.assertEqual(op3.name, "myop_2")
self.assertEqual(op4.name, "myop_1_1")
@test_util.run_v1_only("b/120545219")
def testCond(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def true_fn():
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "cond/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return x
control_flow_ops.cond(x < 10, true_fn, lambda: x)
op = g.get_operation_by_name("cond/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "cond/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Switch")
self.assertEqual(op_input.inputs[0], x)
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"cond/cond_text")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "myloop/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Enter")
self.assertEqual(list(op_input.inputs), [x])
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"myloop/while_context")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithInternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
c = constant_op.constant(1.0, name="c")
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
c = g.get_operation_by_name("myloop/c")
self.assertIsNotNone(c)
# Internal control dep is preserved
self.assertEqual(op.control_inputs, [c])
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithExternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c = constant_op.constant(1.0)
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
# External control dep is removed and replaced with internal control dep
self.assertNotEqual(op.control_inputs[0], c.op)
self.assertIsNotNone(op.control_inputs[0]._get_control_flow_context())
class ApplyOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
with g.device("/device:GPU:0"):
t2 = _apply_op(
g, "TwoIntOutputs", [], [dtypes.int32, dtypes.int32], name="myop2")
t3 = _apply_op(
g,
"Foo1", [t1, t2[1], t2[0]], [dtypes.float32, dtypes.int32],
name="myop3")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, list))
self.assertTrue(isinstance(t3, list))
self.assertTrue(isinstance(t3[0], ops.Tensor))
self.assertEqual("myop1", t1._as_node_def_input())
self.assertEqual("myop2", t2[0]._as_node_def_input())
self.assertEqual("myop2:1", t2[1]._as_node_def_input())
self.assertEqual("myop3", t3[0]._as_node_def_input())
# Validate that we got the right ops as well
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", t1.op.node_def)
self.assertProtoEquals(
"name:'myop2' op:'TwoIntOutputs' device:'/device:GPU:0'",
t2[0].op.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo1'",
t3[0].op.node_def)
def testReferenceInput(self):
g = ops.Graph()
ref_t, nonref_t = _apply_op(
g, "RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'",
ref_t.op.node_def)
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
out_2 = _apply_op(
g,
"RefInputFloatInputIntOutput", [ref_t, nonref_t], [dtypes.int32],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInputIntOutput' name:'op2' input:'op1' input:'op1:1'",
out_2.op.node_def)
out_3 = _apply_op(
g, "TwoFloatInputsIntOutput", [ref_t, nonref_t], [dtypes.int32],
name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputsIntOutput' name:'op3' input:'op1' input:'op1:1'",
out_3.op.node_def)
class NameStackTest(test_util.TensorFlowTestCase):
def testBasics(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_2", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_2", g.unique_name("foo"))
self.assertEqual("foo_1_1", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_1", g.unique_name("foo_1"))
self.assertEqual("foo_1_2", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_2", g.unique_name("foo_1"))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2", mark_as_used=False))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo", g.unique_name("foo"))
self.assertEqual("bar/foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo_1", g.unique_name("foo"))
with g.name_scope(None):
self.assertEqual("foo_3", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_3", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo_1", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz_1/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz_1/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo_1", g.unique_name("foo"))
with g.name_scope("quux"):
self.assertEqual("quux/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("quux/foo", g.unique_name("foo"))
with g.name_scope("bar"):
with g.name_scope("baz"):
self.assertEqual(
"bar_1/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar_1/baz/foo", g.unique_name("foo"))
self.assertEqual("foo_4", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_4", g.unique_name("foo"))
self.assertEqual("bar_2", g.unique_name("bar", mark_as_used=False))
self.assertEqual("bar_2", g.unique_name("bar"))
@test_util.run_deprecated_v1
def testNameAndVariableScope(self):
with self.cached_session() as sess:
with sess.graph.name_scope("l0"):
with variable_scope.variable_scope("l1"):
with sess.graph.name_scope("l1") as scope:
self.assertEqual("l0/l1/l1/", scope)
self.assertEqual(
"l0/l1/l1/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l1/foo", sess.graph.unique_name("foo"))
with sess.graph.name_scope("l2") as scope:
self.assertEqual("l0/l1/l2/", scope)
self.assertEqual(
"l0/l1/l2/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l2/foo", sess.graph.unique_name("foo"))
def testOutOfOrderUniqueName(self):
g = ops.Graph()
self.assertEqual("foo_2", g.unique_name("foo_2"))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_3", g.unique_name("foo"))
def testUniqueNameCaseInsensitivity(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("Foo_1", g.unique_name("Foo"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo"))
with g.name_scope("Bar"):
self.assertEqual("Bar_1/foo", g.unique_name("foo"))
def testInvalidNameRaisesError(self):
g = ops.Graph()
with g.name_scope(""): # Should not raise
pass
with g.name_scope("foo/"): # Should not raise
with g.name_scope("_bar"): # Should not raise
pass
with self.assertRaises(ValueError):
with g.name_scope("foo:0"):
pass
with self.assertRaises(ValueError):
with g.name_scope("_bar"):
pass
class NameTest(test_util.TensorFlowTestCase):
def testGenerateName(self):
g = ops.Graph()
op0 = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
self.assertEqual("TwoFloatOutputs", op0.name)
self.assertEqual("TwoFloatOutputs:0", op0.outputs[0].name)
self.assertEqual("TwoFloatOutputs:1", op0.outputs[1].name)
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput", op1.name)
self.assertEqual("FloatOutput:0", op1.outputs[0].name)
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput_1", op2.name)
self.assertEqual("FloatOutput_1:0", op2.outputs[0].name)
op3 = g.create_op("FloatOutput", [], [dtypes.float32], name="my_op")
self.assertEqual("my_op", op3.name)
self.assertEqual("my_op:0", op3.outputs[0].name)
def testNameScope(self):
g = ops.Graph()
with g.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with g.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with g.name_scope(None) as empty1:
self.assertEqual("", empty1)
with g.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with g.name_scope("") as empty2:
self.assertEqual("", empty2)
self.assertEqual("FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar") as scope:
self.assertEqual("bar/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual("bar/FloatOutput_1",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from "with .. as", that values is used as-is.
self.assertEqual(
"bar", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
with g.name_scope("baz") as scope:
with g.name_scope("quux"):
self.assertEqual("baz/quux/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from the enclosing "with .. as", nothing is pushed.
with g.name_scope(scope):
self.assertEqual("baz/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual(
"baz", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
self.assertEqual(
"trailing",
g.create_op(
"FloatOutput", [], [dtypes.float32], name="trailing/").name)
with g.name_scope("bar"):
self.assertEqual("bar_1/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar/"):
self.assertEqual("bar/FloatOutput_2",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
class DeviceTest(test_util.TensorFlowTestCase):
def testNoDevice(self):
g = ops.Graph()
op = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertDeviceEqual(None, op.device)
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput" }
""", gd)
def testEagerBackingDevice(self):
with context.eager_mode():
with ops.device("/device:CPU:0"):
t = constant_op.constant(1.0)
self.assertRegexpMatches(t.device, "/device:CPU:0")
self.assertRegexpMatches(t.backing_device, "/device:CPU:0")
def testDevicePartialString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testDeviceFull(self):
g = ops.Graph()
with g.device(
pydev.DeviceSpec(
job="worker", replica=2, task=0, device_type="CPU",
device_index=3)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/task:0/device:CPU:3" }
""", gd)
def testNesting(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingOverrideGpuCpu(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:2/device:GPU:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:2/device:GPU:2" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNestingWithMergeDeviceFunction(self):
g = ops.Graph()
with g.device(pydev.merge_device("/device:GPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:worker")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/device:CPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device(None)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStrings(self):
g = ops.Graph()
with g.device("/device:GPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(""):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStringWildcard(self):
g = ops.Graph()
with g.device("/device:GPU:7"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:GPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:5"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/device:CPU:*" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/device:CPU:5" }
""", gd)
def testNestingErrorGraph(self):
g = ops.Graph()
scope = g.device("/device:GPU:8")
scope.__enter__()
with g.device("/device:GPU:9"):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNestingErrorEager(self):
with context.eager_mode():
scope = ops.device("/device:CPU:0")
scope.__enter__()
with ops.device(None):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNoneClearsDefault(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNoneIgnoresOuterDeviceFunction(self):
g = ops.Graph()
with g.device(lambda op: "/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def _overwritingDeviceFunction(self, unused_op):
# This device function unconditionally overwrites the device of ops.
#
# NOTE(mrry): Writing device functions like this is not
# recommended. Instead, in most cases you should use
# `pydev.merge_device("/job:ps")` or simply `"/job:ps"` as the
# argument to `tf.device()` and the device component will be merged in.
return "/job:overwrite"
def testOverwritingBehavior(self):
g = ops.Graph()
with g.device(self._overwritingDeviceFunction):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps" }
""", gd)
class MultithreadedGraphStateTest(test_util.TensorFlowTestCase):
class TestThread(threading.Thread):
def __init__(self, graph, replica_id):
super(MultithreadedGraphStateTest.TestThread, self).__init__()
self._graph = graph
self._replica_id = replica_id
# This thread sets this event when it mutated the graph. The caller can
# wait for that.
self.has_mutated_graph = threading.Event()
# This thread waits for when it should continue. The caller can set this
# event.
self.should_continue = threading.Event()
def run(self):
# Mutate a graph's stack, then set `has_mutated_graph`, then wait for
# `should_continue`, then add an op to the graph affected by the graph's
# stack.
raise NotImplementedError("must be implemented in descendants")
def testDeviceFunctionStack(self):
class DeviceSettingThread(self.TestThread):
def run(self):
with g.device("/job:worker/replica:{}".format(self._replica_id)):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
# If `switch_to_thread` isn't called, then device placement of the ops
# below is not deterministic.
g.switch_to_thread_local()
threads = [DeviceSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testColocateWith(self):
class ColocatingThread(self.TestThread):
def __init__(self, graph, replica_id, op_to_colocate_with):
super(ColocatingThread, self).__init__(graph, replica_id)
self._op_to_colocate_with = op_to_colocate_with
def run(self):
with g.colocate_with(self._op_to_colocate_with):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
ops_to_colocate_with = []
for i in range(3):
with g.device("/job:worker/replica:{}".format(i)):
ops_to_colocate_with.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `device` and `attr` values for
# the ops below are not deterministic.
g.switch_to_thread_local()
threads = [
ColocatingThread(g, i, ops_to_colocate_with[i]) for i in range(3)
]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "ColocateWithMe_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "ColocateWithMe_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_0"}}}}
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_1"}}}}
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_2"}}}}
""", gd)
def testControlDependencies(self):
class DependingThread(self.TestThread):
def __init__(self, graph, replica_id, dependency_op):
super(DependingThread, self).__init__(graph, replica_id)
self._dependency_op = dependency_op
def run(self):
with g.control_dependencies([self._dependency_op]):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
dependency_ops = []
for i in range(3):
dependency_ops.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `input` values for the ops below
# are not deterministic.
g.switch_to_thread_local()
threads = [DependingThread(g, i, dependency_ops[i]) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput" }
node { name: "ColocateWithMe_1" op: "FloatOutput" }
node { name: "ColocateWithMe_2" op: "FloatOutput" }
node { name: "FloatOutput_0" op: "FloatOutput"
input: "^ColocateWithMe_0" }
node { name: "FloatOutput_1" op: "FloatOutput"
input: "^ColocateWithMe_1" }
node { name: "FloatOutput_2" op: "FloatOutput"
input: "^ColocateWithMe_2" }
""", gd)
def testNameStack(self):
class NameSettingThread(self.TestThread):
def run(self):
with g.name_scope("foo"):
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.result = (op1, op2)
g = ops.Graph()
threads = [NameSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
suffixes = ["", "_1", "_2"]
for t, s in zip(threads, suffixes):
self.assertEquals("foo" + s + "/FloatOutput", t.result[0].name)
self.assertEquals("foo" + s + "/FloatOutput_1", t.result[1].name)
class ObjectWithName(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class CollectionTest(test_util.TensorFlowTestCase):
def test_get_collections(self):
g = ops.Graph()
self.assertSequenceEqual(g.collections, [])
g.add_to_collection("key", 12)
g.add_to_collection("key", 15)
self.assertSequenceEqual(g.collections, ["key"])
g.add_to_collection("other", "foo")
self.assertSequenceEqual(sorted(g.collections), ["key", "other"])
self.assertSequenceEqual(
sorted(g.get_all_collection_keys()), ["key", "other"])
def test_add_to_collection(self):
g = ops.Graph()
g.add_to_collection("key", 12)
g.add_to_collection("other", "foo")
g.add_to_collection("key", 34)
# Note that only blank1 is returned.
g.add_to_collection("blah", 27)
blank1 = ObjectWithName("prefix/foo")
g.add_to_collection("blah", blank1)
blank2 = ObjectWithName("junk/foo")
g.add_to_collection("blah", blank2)
self.assertEqual([12, 34], g.get_collection("key"))
self.assertEqual([], g.get_collection("nothing"))
self.assertEqual([27, blank1, blank2], g.get_collection("blah"))
self.assertEqual([blank1], g.get_collection("blah", "prefix"))
self.assertEqual([blank1], g.get_collection("blah", ".*x"))
# Make sure that get_collection() returns a first-level
# copy of the collection, while get_collection_ref() returns
# the original list.
other_collection_snapshot = g.get_collection("other")
other_collection_ref = g.get_collection_ref("other")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo"], other_collection_ref)
g.add_to_collection("other", "bar")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo", "bar"], other_collection_ref)
self.assertEqual(["foo", "bar"], g.get_collection("other"))
self.assertTrue(other_collection_ref is g.get_collection_ref("other"))
# Verify that getting an empty collection ref returns a modifiable list.
empty_coll_ref = g.get_collection_ref("empty")
self.assertEqual([], empty_coll_ref)
empty_coll = g.get_collection("empty")
self.assertEqual([], empty_coll)
self.assertFalse(empty_coll is empty_coll_ref)
empty_coll_ref2 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref2 is empty_coll_ref)
# Add to the collection.
empty_coll_ref.append("something")
self.assertEqual(["something"], empty_coll_ref)
self.assertEqual(["something"], empty_coll_ref2)
self.assertEqual([], empty_coll)
self.assertEqual(["something"], g.get_collection("empty"))
empty_coll_ref3 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref3 is empty_coll_ref)
def test_add_to_collections_uniquify(self):
g = ops.Graph()
g.add_to_collections([1, 2, 1], "key")
# Make sure "key" is not added twice
self.assertEqual(["key"], g.get_collection(1))
def test_add_to_collections_from_list(self):
g = ops.Graph()
g.add_to_collections(["abc", "123"], "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_tuple(self):
g = ops.Graph()
g.add_to_collections(("abc", "123"), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_generator(self):
g = ops.Graph()
def generator():
yield "abc"
yield "123"
g.add_to_collections(generator(), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_set(self):
g = ops.Graph()
g.add_to_collections(set(["abc", "123"]), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_string(self):
g = ops.Graph()
g.add_to_collections("abc", "key")
self.assertEqual(["key"], g.get_collection("abc"))
def test_default_graph(self):
with ops.Graph().as_default():
ops.add_to_collection("key", 90)
ops.add_to_collection("key", 100)
# Collections are ordered.
self.assertEqual([90, 100], ops.get_collection("key"))
def test_defun(self):
with context.eager_mode():
@eager_function.defun
def defun():
ops.add_to_collection("int", 1)
ops.add_to_collection("tensor", constant_op.constant(2))
@eager_function.defun
def inner_defun():
self.assertEqual(ops.get_collection("int"), [1])
three = ops.get_collection("tensor")[0] + ops.get_collection("int")[0]
ops.add_to_collection("int", 2)
self.assertEqual(ops.get_collection("int"), [1, 2])
ops.add_to_collection("foo", "bar")
self.assertEqual(ops.get_collection("foo"), ["bar"])
return three
self.assertEqual(ops.get_collection("int"), [1])
three = inner_defun()
self.assertEqual(ops.get_collection("int"), [1])
self.assertEqual(ops.get_collection("foo"), [])
return three
three = defun()
self.assertEqual(three.numpy(), 3)
ops.NotDifferentiable("FloatOutput")
@ops.RegisterGradient("CopyOp")
def _CopyGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
@ops.RegisterGradient("copy_override")
def _CopyOverrideGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
class RegistrationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testRegisterGradients(self):
x = test_ops.float_output()
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyGrad, fn)
def testOverrideGradients(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "copy_override"}):
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyOverrideGrad, fn)
def testNonExistentOverride(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "unknown_override"}):
y = test_ops.copy_op(x)
with self.assertRaisesRegexp(LookupError, "unknown_override"):
ops.get_gradient_function(y.op)
class ComparisonTest(test_util.TensorFlowTestCase):
def testMembershipAllowed(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
t2 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop2")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, ops.Tensor))
self.assertTrue(t1 in [t1])
self.assertTrue(t1 not in [t2])
class ControlDependenciesTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
# Creating unregistered ops with _apply_op() doesn't work with the C API
# TODO(skyewm): address this more consistently. Possible solutions are
# to use registered ops in all tests, create a way to register ops in
# Python tests, or conditionally disable the op registration check in
# the C API.
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
with g.control_dependencies([a]):
c = constant_op.constant(1.0)
d = array_ops.identity(b)
e = array_ops.identity(c)
self.assertEqual(c.op.control_inputs, [a.op])
self.assertEqual(d.op.control_inputs, [a.op])
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
@test_util.run_in_graph_and_eager_modes
def testEager(self):
def future():
future.calls += 1
return constant_op.constant(2.0)
future.calls = 0
if context.executing_eagerly():
a = constant_op.constant(1.0)
b = future
with ops.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(future.calls, 1)
else:
g = ops.Graph()
with g.as_default():
a = constant_op.constant(1.0)
b = future()
with g.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(c.op.control_inputs, [a.op, b.op])
self.assertEqual(future.calls, 1)
def testBasicWithConversion(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
class ConvertibleObj(object):
def _as_graph_element(self):
return a
with g.control_dependencies([ConvertibleObj()]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
def testNested(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1, a_2, a_3, a_4]):
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
b_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_1.op, a_2.op, a_3.op, a_4.op],
b_1.op.control_inputs)
self.assertItemsEqual(b_1.op.control_inputs, b_2.op.control_inputs)
def testClear(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies(None):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies(None):
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testComplex(self):
g = ops.Graph()
# Usage pattern:
# * Nodes a_i are constants defined at the outermost scope, and are used
# as control inputs for the ith nested scope.
# * Nodes b_i are defined as Mul(a_3, a_4) at each scope.
# * Nodes c_i are defined as Mul(a_1, b_1) at each scope.
# * Nodes d_i are defined as Mul(b_i, c_i) at each scope.
# * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1.
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
b_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_1, c_1],
[dtypes.float32])
e_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_2]):
b_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_2, c_2],
[dtypes.float32])
e_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_1, e_1],
[dtypes.float32])
with g.control_dependencies([a_3]):
b_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_3, c_3],
[dtypes.float32])
e_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_2, e_2],
[dtypes.float32])
with g.control_dependencies([a_4]):
b_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_4, c_4],
[dtypes.float32])
e_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_3, e_3],
[dtypes.float32])
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_2.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_3.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_4.op.control_inputs)
self.assertItemsEqual([], c_1.op.control_inputs)
self.assertItemsEqual([a_2.op], c_2.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op], c_3.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op, a_4.op], c_4.op.control_inputs)
self.assertItemsEqual([], d_1.op.control_inputs)
self.assertItemsEqual([], d_2.op.control_inputs)
self.assertItemsEqual([], d_3.op.control_inputs)
self.assertItemsEqual([], d_4.op.control_inputs)
self.assertItemsEqual([a_1.op], e_1.op.control_inputs)
self.assertItemsEqual([a_2.op], e_2.op.control_inputs)
self.assertItemsEqual([a_3.op], e_3.op.control_inputs)
self.assertItemsEqual([a_4.op], e_4.op.control_inputs)
def testRepeatedDependency(self):
g = ops.Graph()
a = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
a_0, a_1 = a.outputs
with g.control_dependencies([a_0]):
b = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [a])
self.assertEqual(c.op.control_inputs, [a])
def testNoControlDependencyWithDataDependency(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a]):
b = _apply_op(g, "Identity", [a], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [])
class OpScopeTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testNames(self):
with ops.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with ops.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with ops.name_scope(None) as empty1:
self.assertEqual("", empty1)
with ops.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with ops.name_scope("") as empty2:
self.assertEqual("", empty2)
with ops.name_scope("foo/") as outer_foo:
self.assertEqual("foo/", outer_foo)
with ops.name_scope("") as empty3:
self.assertEqual("", empty3)
with ops.name_scope("foo4") as foo4:
self.assertEqual("foo/foo4/", foo4)
with ops.name_scope("foo5//") as foo5:
self.assertEqual("foo5//", foo5)
with ops.name_scope("foo6") as foo6:
self.assertEqual("foo5//foo6/", foo6)
with ops.name_scope("/") as foo7:
self.assertEqual("/", foo7)
with ops.name_scope("//") as foo8:
self.assertEqual("//", foo8)
with ops.name_scope("a//b/c") as foo9:
self.assertEqual("foo/a//b/c/", foo9)
with ops.name_scope("a//b/c") as foo10:
self.assertEqual("a//b/c/", foo10)
@test_util.run_in_graph_and_eager_modes
def testEagerDefaultScopeName(self):
with ops.name_scope(None, "default") as scope:
self.assertEqual(scope, "default/")
with ops.name_scope(None, "default2") as scope2:
self.assertEqual(scope2, "default/default2/")
@test_util.run_in_graph_and_eager_modes
def testNameScopeV2IsReEntrant(self):
foo = ops.name_scope_v2("foo")
bar = ops.name_scope_v2("bar")
with foo as scope_name:
self.assertEqual("foo/", scope_name)
with foo as scope_name:
self.assertEqual("foo/foo/", scope_name)
with bar as scope_name:
self.assertEqual("foo/bar/", scope_name)
with foo as scope_name:
self.assertEqual("foo/bar/foo/", scope_name)
with bar as scope_name:
self.assertEqual("bar/", scope_name)
@test_util.run_deprecated_v1
def testNoScopeName(self):
g0 = ops.Graph()
values = [
g0.create_op("A", [], [dtypes.float32]),
g0.create_op("B", [], [dtypes.float32])
]
with self.assertRaises(ValueError):
with ops.name_scope(None, values=values):
pass
with self.assertRaises(ValueError):
with ops.name_scope(None, None, values):
pass
@test_util.run_deprecated_v1
def testEmptyScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
with ops.name_scope("", values=[a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope("", "my_default_scope", [a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
@test_util.run_deprecated_v1
def testDefaultScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
scope_name = "my_scope"
default_scope_name = "my_default_scope"
with ops.name_scope(scope_name, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope(None, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % default_scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with self.assertRaises(TypeError):
with ops.name_scope(scope_name, [a, b]):
pass
def _testGraphElements(self, graph_elements):
scope_name = "my_scope"
with ops.name_scope(scope_name, values=graph_elements) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(graph_elements[0].graph, ops.get_default_graph())
g1 = ops.Graph()
a = g1.create_op("A", [], [dtypes.float32])
with self.assertRaises(ValueError):
with ops.name_scope(scope_name, values=graph_elements + [a]):
pass
@test_util.run_deprecated_v1
def testTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, b])
@test_util.run_deprecated_v1
def testSparseTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
sparse = sparse_tensor.SparseTensor(
_apply_op(g0, "Int64Output", [], [dtypes.int64]),
_apply_op(g0, "FloatOutput", [], [dtypes.float32]),
_apply_op(g0, "Int64Output", [], [dtypes.int64]))
self._testGraphElements([a, sparse, b])
@test_util.run_deprecated_v1
def testVariable(self):
g0 = ops.Graph()
with g0.as_default():
variable = variables.Variable([1.0])
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, variable, b])
class InitScopeTest(test_util.TensorFlowTestCase):
def testClearsControlDependencies(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.as_default():
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with ops.init_scope():
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with ops.init_scope():
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testLiftsOpsFromFunctions(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with g2.as_default():
with ops.init_scope():
_ = constant_op.constant(1.0)
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 1)
def testPreservesDevices(self):
g0 = ops.Graph()
with g0.as_default(), ops.device("CPU:0"):
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
with g1.as_default():
with ops.device("GPU:0"):
with ops.init_scope():
# init_scope should preserve device set under `g1`.
on_gpu = constant_op.constant(1.0)
self.assertEqual(on_gpu.device, "/device:GPU:0")
still_on_gpu = constant_op.constant(1.0)
self.assertEqual(still_on_gpu.device, "/device:GPU:0")
blank = constant_op.constant(1.0)
self.assertEqual(blank.device, "")
with ops.init_scope():
now_on_cpu = constant_op.constant(1.0)
self.assertEqual(now_on_cpu.device, "/device:CPU:0")
on_cpu = constant_op.constant(1.0)
self.assertEqual(on_cpu.device, "/device:CPU:0")
def testComposes(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
g3 = ops.Graph()
g3._building_function = False # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 1)
with g2.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
with g3.as_default():
with ops.init_scope():
# This op should be lifted into g3, because g3 is not building a
# function.
_ = constant_op.constant(1.0)
self.assertIs(g3, ops.get_default_graph())
self.assertEqual(len(g3.get_operations()), 1)
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 2)
def testEscapesToEagerContext(self):
g = ops.Graph()
g._building_function = True # pylint: disable=protected-access
with context.eager_mode():
with context.graph_mode():
with g.as_default():
with ops.init_scope():
# Because g is building a function, init_scope should
# escape out to the eager context.
self.assertTrue(context.executing_eagerly())
# g should be reinstated as the default graph, and the
# graph context should be re-entered.
self.assertIs(g, ops.get_default_graph())
self.assertFalse(context.executing_eagerly())
def testStaysInEagerWhenOnlyEagerContextActive(self):
with context.eager_mode():
with ops.init_scope():
self.assertTrue(context.eager_mode())
self.assertTrue(context.eager_mode())
def testEscapesDefunWhenInEagerMode(self):
def function_with_variables():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(3)
return self.v.assign_add(1)
with context.eager_mode():
# Each invocation of function_with_variables recreates a variable.
self.assertEqual(4, int(function_with_variables()))
self.assertEqual(4, int(function_with_variables()))
compiled = eager_function.defun(function_with_variables)
# The init_scope in function_with_variables lifts the variable out
# of the graph function constructed by defun; hence,
# compiled now appears to be stateful.
self.assertEqual(4, int(compiled()))
self.assertEqual(5, int(compiled()))
def testEscapesDefunWhenInGraphMode(self):
def function_with_variables(name):
with ops.init_scope():
_ = variable_scope.get_variable(name, shape=(1,))
g = ops.Graph()
with g.as_default():
with self.cached_session():
# First ensure that graphs that are not building functions are
# not escaped.
function_with_variables("foo")
with self.assertRaisesRegexp(ValueError,
r"Variable foo already exists.*"):
# This will fail because reuse is not set to True.
function_with_variables("foo")
compiled = eager_function.defun(function_with_variables)
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
# The second call to `compiled` should not create variables: the
# init_scope has lifted the variable creation code out of the defun.
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
def testEscapesNestedDefun(self):
def inner_function():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(1)
return self.v.assign_add(2)
def outer_function(inner=None):
with ops.init_scope():
self.v0 = resource_variable_ops.ResourceVariable(0)
return self.v0.assign_add(1) + inner()
with context.eager_mode():
# Each invocation of outer_function recreates variables.
self.assertEqual(4, int(outer_function(inner=inner_function)))
self.assertEqual(4, int(outer_function(inner=inner_function)))
compiled_inner = eager_function.defun(inner_function)
compiled_outer = eager_function.defun(outer_function)
# The init_scope lifts variables out of the graph functions
# constructed by defun; hence, compiled_outer should now appear to be
# stateful.
self.assertEqual(4, int(compiled_outer(inner=compiled_inner)))
self.assertEqual(7, int(compiled_outer(inner=compiled_inner)))
@test_util.run_v1_only("b/120545219")
def testFallsBackToGlobalGraphWhenAllGraphsAreBuildingFunctions(self):
with context.graph_mode():
ops.reset_default_graph()
# This doesn't push anything onto the graph stack, but it does
# set the stack's global graph.
global_graph = ops.get_default_graph()
fn_graph = ops.Graph()
# pylint: disable=protected-access
fn_graph._building_function = True
self.assertEqual(len(ops._default_graph_stack.stack), 0)
with fn_graph.as_default():
self.assertEqual(len(ops._default_graph_stack.stack), 1)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 1)
dummy = constant_op.constant(1.0)
self.assertEqual(len(ops._default_graph_stack.stack), 1)
# Note that the global graph is _not_ on the graph stack.
self.assertEqual(len(ops._default_graph_stack.stack), 0)
# Ensure that `dummy` was added to the global graph.
self.assertEqual(global_graph, dummy.graph)
# pylint: enable=protected-access
def testInstallsDefaultGraphWhenGraphStackIsEmptyInGraphMode(self):
with context.graph_mode():
# pylint: disable=protected-access
self.assertEqual(len(ops._default_graph_stack.stack), 0)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 0)
self.assertEqual(len(ops._default_graph_stack.stack), 0)
# pylint: enable=protected-access
def testPreservesNameScopeInGraphConstruction(self):
with ops.Graph().as_default():
function_graph = ops.Graph()
with function_graph.as_default():
with ops.name_scope("inner"), ops.init_scope():
self.assertEqual(ops.get_name_scope(), "inner")
self.assertEqual(ops.get_name_scope(), "")
def testEnteringGraphFromEagerIsSticky(self):
with context.eager_mode():
g = ops.Graph()
with g.as_default():
with ops.init_scope():
self.assertFalse(context.executing_eagerly())
self.assertEqual(g, ops.get_default_graph())
def testMixGraphEager(self):
with context.eager_mode():
c = constant_op.constant(1.0)
with ops.Graph().as_default():
with self.assertRaisesRegexp(
RuntimeError, "Attempting to capture an EagerTensor"):
math_ops.add(c, c)
c2 = constant_op.constant(2.0)
with self.assertRaisesRegexp(
TypeError, "Graph tensors"):
math_ops.add(c2, c2)
def testPreservesNameScopeInEagerExecution(self):
with context.eager_mode():
def foo():
with ops.name_scope("inner"), ops.init_scope():
if context.executing_eagerly():
# A trailing slash is always appended when eager execution is
# enabled.
self.assertEqual(context.context().scope_name, "inner/")
else:
self.assertEqual(ops.get_name_scope(), "inner")
foo()
self.assertEqual(ops.get_name_scope(), "")
foo_compiled = eager_function.defun(foo)
foo_compiled()
self.assertEqual(ops.get_name_scope(), "")
def testExecutingEagerlyOutsideFunctions(self):
@def_function.function
def f():
return ops.executing_eagerly_outside_functions()
with context.graph_mode():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
# Need self.evaluate for these as the return type of functions is
# tensors.
self.assertFalse(self.evaluate(f()))
with context.eager_mode():
self.assertTrue(ops.executing_eagerly_outside_functions())
self.assertTrue(f())
with ops.Graph().as_default():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
self.assertFalse(self.evaluate(f()))
class GraphTest(test_util.TensorFlowTestCase):
def setUp(self):
ops.reset_default_graph()
def _AssertDefault(self, expected):
self.assertIs(expected, ops.get_default_graph())
def testResetDefaultGraphNesting(self):
g0 = ops.Graph()
with self.assertRaises(AssertionError):
with g0.as_default():
ops.reset_default_graph()
def testGraphContextManagerCancelsEager(self):
with context.eager_mode():
with ops.Graph().as_default():
self.assertFalse(context.executing_eagerly())
def testGraphContextManager(self):
g0 = ops.Graph()
with g0.as_default() as g1:
self.assertIs(g0, g1)
def testDefaultGraph(self):
orig = ops.get_default_graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
g0 = ops.Graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
context_manager_0 = g0.as_default()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
with context_manager_0 as g0:
self._AssertDefault(g0)
with ops.Graph().as_default() as g1:
self.assertTrue(ops.has_default_graph())
self._AssertDefault(g1)
self._AssertDefault(g0)
self._AssertDefault(orig)
self.assertFalse(ops.has_default_graph())
def testPreventFeeding(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_feedable(a))
g.prevent_feeding(a)
self.assertFalse(g.is_feedable(a))
@test_util.run_deprecated_v1
def testPreventFetching(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_fetchable(a))
g.prevent_fetching(a.op)
self.assertFalse(g.is_fetchable(a))
def testAsGraphElementConversions(self):
class ConvertibleObj(object):
def _as_graph_element(self):
return "FloatOutput:0"
class NonConvertibleObj(object):
pass
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(a, g.as_graph_element(ConvertibleObj()))
with self.assertRaises(TypeError):
g.as_graph_element(NonConvertibleObj())
# Regression test against creating custom __del__ functions in classes
# involved in cyclic references, e.g. Graph and Operation. (Python won't gc
# cycles that require calling a __del__ method, because the __del__ method can
# theoretically increase the object's refcount to "save" it from gc, and any
# already-deleted objects in the cycle would have be to restored.)
def testGarbageCollected(self):
# Create a graph we can delete and a weak reference to monitor if it's gc'd
g = ops.Graph()
g_ref = weakref.ref(g)
# Create some ops
with g.as_default():
a = constant_op.constant(2.0)
b = constant_op.constant(3.0)
c = math_ops.add(a, b)
# Create a session we can delete
with session.Session(graph=g) as sess:
self.evaluate(c)
# Delete all references and trigger gc
del g
del a
del b
del c
del sess
gc.collect()
self.assertIsNone(g_ref())
def testRunnableAfterInvalidShape(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
math_ops.add([1, 2], [1, 2, 3])
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
def testRunnableAfterInvalidShapeWithKernelLabelMap(self):
g = ops.Graph()
with g.as_default():
with g._kernel_label_map({"KernelLabelRequired": "overload_1"}):
with self.assertRaises(ValueError):
test_ops.kernel_label_required(1)
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
class AttrScopeTest(test_util.TensorFlowTestCase):
def _get_test_attrs(self):
x = control_flow_ops.no_op()
try:
a = compat.as_text(x.get_attr("_A"))
except ValueError:
a = None
try:
b = compat.as_text(x.get_attr("_B"))
except ValueError:
b = None
return (a, b)
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual((None, None), self._get_test_attrs())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
a1 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("foo"))
}):
a2 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": None,
"_B": attr_value_pb2.AttrValue(s=compat.as_bytes("bar"))
}):
a3 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("baz"))
}):
a4 = self._get_test_attrs()
a5 = self._get_test_attrs()
a6 = self._get_test_attrs()
a7 = self._get_test_attrs()
self.assertAllEqual((None, None), a1)
self.assertAllEqual(("foo", None), a2)
self.assertAllEqual((None, "bar"), a3)
self.assertAllEqual(("baz", "bar"), a4)
self.assertAllEqual((None, "bar"), a5)
self.assertAllEqual(("foo", None), a6)
self.assertAllEqual((None, None), a7)
ops.RegisterShape("KernelLabel")(common_shapes.scalar_shape)
class KernelLabelTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual(b"My label is: default",
test_ops.kernel_label().eval())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
default_1 = test_ops.kernel_label()
# pylint: disable=protected-access
with sess.graph._kernel_label_map({"KernelLabel": "overload_1"}):
overload_1_1 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": "overload_2"}):
overload_2 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": ""}):
default_2 = test_ops.kernel_label()
overload_1_2 = test_ops.kernel_label()
# pylint: enable=protected-access
default_3 = test_ops.kernel_label()
self.assertAllEqual(b"My label is: default", self.evaluate(default_1))
self.assertAllEqual(b"My label is: default", self.evaluate(default_2))
self.assertAllEqual(b"My label is: default", self.evaluate(default_3))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_1))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_2))
self.assertAllEqual(b"My label is: overload_2", self.evaluate(overload_2))
class AsGraphDefTest(test_util.TensorFlowTestCase):
def testGraphDefVersion(self):
"""Test that the graphdef version is plumbed through to kernels."""
with ops.Graph().as_default() as g:
version = g.graph_def_versions.producer
with self.session(graph=g):
v = test_ops.graph_def_version().eval()
self.assertEqual(version, v)
def testAddShapes(self):
with ops.Graph().as_default() as g:
t1, t2, t3, t4, t5 = _apply_op(g, "FiveFloatOutputs", [],
[dtypes.float32] * 5)
t1.set_shape(None)
t2.set_shape([])
t3.set_shape([None])
t4.set_shape([43, 37])
t5.set_shape([43, None])
b = constant_op.constant(1.0) # pylint: disable=unused-variable
gd = g.as_graph_def(add_shapes=True)
self.assertProtoEqualsVersion("""
node { name: "FiveFloatOutputs" op: "FiveFloatOutputs"
attr {
key: "_output_shapes"
value {
list {
shape { unknown_rank: true }
shape { }
shape { dim { size: -1 } }
shape { dim { size: 43 } dim { size: 37 } }
shape { dim { size: 43 } dim { size: -1 } }
}
}
}
}
node { name: "Const" op: "Const"
attr {
key: "_output_shapes"
value {
list {
shape { }
}
}
}
attr {
key: "dtype"
value { type: DT_FLOAT }
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape { }
float_val: 1.0 } } } }
""", gd)
@ops.RegisterStatistics("a", "flops")
def _calc_a_forward_flops(unused_graph, unused_node):
return ops.OpStats("flops", 20)
class StatisticsTest(test_util.TensorFlowTestCase):
def testRegisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("a", "an_a")
flops = ops.get_stats_for_node_def(graph, node, "flops")
self.assertEqual(20, flops.value)
missing_stat = ops.get_stats_for_node_def(graph, node, "missing_stat")
self.assertEqual(None, missing_stat.value)
def testUnregisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("b", "a_b")
weight_params = ops.get_stats_for_node_def(graph, node, "weight_params")
self.assertEqual(None, weight_params.value)
def testAccumulateStatistics(self):
flops_total = ops.OpStats("flops")
self.assertEqual(None, flops_total.value)
second_flops = ops.OpStats("flops", 3)
flops_total += second_flops
self.assertEqual(3, flops_total.value)
class DeviceStackTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasicDeviceAssignmentMetadata(self):
def device_func(unused_op):
return "/cpu:*"
const_zero = constant_op.constant([0.0], name="zero")
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.device("/cpu:0"):
const_two = constant_op.constant([2.0], name="two")
with ops.device(device_func):
const_three = constant_op.constant(3.0, name="three")
self.assertEqual(0, len(const_zero.op._device_assignments))
one_list = const_one.op._device_assignments
self.assertEqual(1, len(one_list))
self.assertEqual("/cpu", one_list[0].obj)
self.assertEqual("ops_test.py", os.path.basename(one_list[0].filename))
two_list = const_two.op._device_assignments
self.assertEqual(2, len(two_list))
devices = [t.obj for t in two_list]
self.assertEqual(set(["/cpu", "/cpu:0"]), set(devices))
three_list = const_three.op._device_assignments
self.assertEqual(1, len(three_list))
func_description = three_list[0].obj
expected_regex = r"device_func<.*ops_test.py, [0-9]+"
self.assertRegexpMatches(func_description, expected_regex)
@test_util.run_deprecated_v1
def testDeviceAssignmentMetadataForGraphDeviceAndTfDeviceFunctions(self):
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.get_default_graph().device("/cpu"):
const_two = constant_op.constant([2.0], name="two")
one_metadata = const_one.op._device_assignments[0]
two_metadata = const_two.op._device_assignments[0]
# Verify both types of device assignment return the right stack info.
self.assertRegexpMatches("ops_test.py",
os.path.basename(one_metadata.filename))
self.assertEqual(one_metadata.filename, two_metadata.filename)
self.assertEqual(one_metadata.lineno + 2, two_metadata.lineno)
class ColocationGroupTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], a.op.colocation_groups())
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
with self.assertRaises(ValueError):
c.op.get_attr("_class")
@test_util.run_deprecated_v1
def testBasicColocationMetadata(self):
const_two = constant_op.constant([2.0], name="two")
with ops.colocate_with(const_two.op):
const_three = constant_op.constant(3.0, name="three")
locations_dict = const_three.op._colocation_dict
self.assertIn("two", locations_dict)
metadata = locations_dict["two"]
self.assertIsNone(metadata.obj)
# Check that this test's filename is recorded as the file containing the
# colocation statement.
self.assertEqual("ops_test.py", os.path.basename(metadata.filename))
@test_util.run_deprecated_v1
def testColocationDeviceInteraction(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# 'b' is created in the scope of /cpu:0, but it is
# colocated with 'a', which is on '/device:GPU:0'. colocate_with
# overrides devices because it is a stronger constraint.
b = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual(a.op.device, b.op.device)
@test_util.run_deprecated_v1
def testColocationCanonicalization(self):
with ops.device("/device:GPU:0"):
_ = constant_op.constant(2.0)
with ops.device(lambda op: "/device:GPU:0"):
b = constant_op.constant(3.0)
with ops.get_default_graph().colocate_with(b):
with ops.device("/device:GPU:0"):
c = constant_op.constant(4.0)
# A's device will be /device:GPU:0
# B's device will be /device:GPU:0
# C's device will be /device:GPU:0 because it
# inherits B's device name, after canonicalizing the names.
self.assertEqual(b.op.device, c.op.device)
@test_util.run_deprecated_v1
def testLocationOverrides(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
# Note that this colocation is "redundant", since we are
# within the scope of "/device:GPU:0". However, we would like to
# preserve in the GraphDef that these two ops should be
# colocated in a portable way.
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
d = constant_op.constant(5.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual("/device:GPU:0", a.op.device)
self.assertEqual(a.op.device, b.op.device)
# Test that device function stack is restored.
self.assertEqual("/device:GPU:0", c.op.device)
self.assertEqual("/device:CPU:0", d.op.device)
@test_util.run_deprecated_v1
def testNestedColocateWith(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testMultiColocationGroups(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@a", b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocationIgnoreStack(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op, ignore_existing=True):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocateWithReset(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(None, ignore_existing=True):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@c"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateWithInitialNoneThenNested(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
with ops.colocate_with(None, ignore_existing=True):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(b.op):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@b"], b.op.colocation_groups())
self.assertEqual([b"loc:@b"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateVariables(self):
a = variables.Variable([2.0], name="a")
with ops.colocate_with(a.op):
b = variables.Variable([3.0], name="b")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
def testColocateWithVariableInFunction(self):
v = variables.Variable(1.)
@def_function.function
def f():
with ops.colocate_with(v):
return array_ops.ones([], name="output")
f()
graph_def = f.get_concrete_function().graph.as_graph_def()
wrap_function.function_from_graph_def(graph_def, [], ["output"])
class DeprecatedTest(test_util.TensorFlowTestCase):
def testSuccess(self):
with ops.Graph().as_default() as g:
test_util.set_producer_version(g, 7)
old = test_ops.old()
with self.session(graph=g):
old.run()
def _error(self):
return ((r"Op Old is not available in GraphDef version %d\. "
r"It has been removed in version 8\. For reasons\.") %
versions.GRAPH_DEF_VERSION)
def testGraphConstructionFail(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(NotImplementedError, self._error()):
test_ops.old()
class DenseTensorLikeTypeTest(test_util.TensorFlowTestCase):
def testSuccess(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertTrue(ops.is_dense_tensor_like(t))
v = variables.Variable([17])
self.assertTrue(ops.is_dense_tensor_like(v))
class BadClassNoName(object):
pass
class BadClassBadName(object):
def name(self):
pass
class BadClassNoDtype(object):
@property
def name(self):
pass
class BadClassBadDtype(object):
@property
def name(self):
pass
def dtype(self):
pass
def testBadClass(self):
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoName)
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadName)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoDtype)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadDtype)
class NameScopeTest(test_util.TensorFlowTestCase):
def testStripAndPrependScope(self):
strs = [
"hidden1/hidden1/weights", # Same prefix. Should strip.
"hidden1///hidden1/weights", # Extra "/". Should strip.
"^hidden1/hidden1/weights", # Same prefix. Should strip.
"loc:@hidden1/hidden1/weights", # Same prefix. Should strip.
"hhidden1/hidden1/weights", # Different prefix. Should keep.
"hidden1"
] # Not a prefix. Should keep.
expected_striped = [
"hidden1/weights", "hidden1/weights", "^hidden1/weights",
"loc:@hidden1/weights", "hhidden1/hidden1/weights", "hidden1"
]
expected_prepended = [
"hidden2/hidden1/weights", "hidden2/hidden1/weights",
"^hidden2/hidden1/weights", "loc:@hidden2/hidden1/weights",
"hidden2/hhidden1/hidden1/weights", "hidden2/hidden1"
]
name_scope_to_strip = "hidden1"
name_scope_to_add = "hidden2"
for es, ep, s in zip(expected_striped, expected_prepended, strs):
striped = ops.strip_name_scope(s, name_scope_to_strip)
self.assertEqual(es, striped)
self.assertEqual(ep, ops.prepend_name_scope(striped, name_scope_to_add))
def testGetNameScope(self):
with ops.Graph().as_default() as g:
with ops.name_scope("scope1"):
with ops.name_scope("scope2"):
with ops.name_scope("scope3"):
self.assertEqual("scope1/scope2/scope3", g.get_name_scope())
self.assertEqual("scope1/scope2", g.get_name_scope())
self.assertEqual("scope1", g.get_name_scope())
self.assertEqual("", g.get_name_scope())
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with ops.name_scope("_"):
pass
self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
class TracebackTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testTracebackWithStartLines(self):
with self.cached_session() as sess:
a = constant_op.constant(2.0)
sess.run(
a,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(sess.graph.get_operations())
# Tests that traceback_with_start_lines is the same as traceback
# but includes one more element at the end.
for op in sess.graph.get_operations():
self.assertEquals(len(op.traceback), len(op.traceback_with_start_lines))
for frame, frame_with_start_line in zip(
op.traceback, op.traceback_with_start_lines):
self.assertEquals(5, len(frame_with_start_line))
self.assertEquals(frame, frame_with_start_line[:-1])
class EnableEagerExecutionTest(test_util.TensorFlowTestCase):
@test_util.run_v1_only("b/120545219")
def testBadArgumentsToEnableEagerExecution(self):
with self.assertRaisesRegexp(TypeError, "config must be a tf.ConfigProto"):
ops.enable_eager_execution(context.DEVICE_PLACEMENT_SILENT)
with self.assertRaisesRegexp(ValueError, "device_policy must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, c)
with self.assertRaisesRegexp(ValueError, "execution_mode must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, execution_mode=c)
class _TupleTensor(composite_tensor.CompositeTensor):
"""`Tensor`-like `tuple`-like for custom `Tensor` conversion masquerading."""
def __init__(self, components):
super(_TupleTensor, self).__init__()
self._components = tuple(ops.convert_to_tensor(c) for c in components)
@property
def _type_spec(self):
return _TupleTensorSpec(type_spec.from_value(c) for c in self._components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
class _TupleTensorSpec(type_spec.TypeSpec):
def __init__(self, specs):
self._specs = specs
value_type = property(lambda self: _TupleTensor)
_component_specs = property(lambda self: self._specs)
def _to_components(self, value):
return value._components
def _from_components(self, components):
return _TupleTensor(*components)
def _serialize(self):
return (self._specs,)
class _MyTuple(object):
"""Pretend user-side class for `ConvertToCompositeTensorTest ."""
def __init__(self, components):
super(_MyTuple, self).__init__()
self._components = tuple(components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
ops.register_tensor_conversion_function(
_MyTuple, conversion_func=lambda x, *_, **__: _TupleTensor(x))
class CustomConvertToCompositeTensorTest(test_util.TensorFlowTestCase):
def testCompositeTensorConversion(self):
"""Tests that a user can register a CompositeTensor converter."""
x = _MyTuple((1, [2., 3.], [[4, 5], [6, 7]]))
y = ops.convert_to_tensor_or_composite(x)
self.assertFalse(tensor_util.is_tensor(y))
self.assertIsInstance(y, _TupleTensor)
self.assertLen(y, len(x))
for x_, y_ in zip(x, y):
self.assertIsInstance(y_, ops.Tensor)
self.assertTrue(tensor_util.is_tensor(y_))
self.assertAllEqual(x_, tensor_util.constant_value(y_))
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for common shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class CommonShapesTest(test_util.TensorFlowTestCase):
# Asserts that we get the same result with numpy (for known shapes), and that
# the order of arguments does not matter (i.e., broadcasting is reflexive).
def _assert_incompatible_broadcast(self, shape1, shape2):
if shape1.dims is not None and shape2.dims is not None:
zeros1 = np.zeros(shape1.as_list())
zeros2 = np.zeros(shape2.as_list())
with self.assertRaises(ValueError):
np.broadcast(zeros1, zeros2)
with self.assertRaises(ValueError):
np.broadcast(zeros2, zeros1)
self.assertFalse(common_shapes.is_broadcast_compatible(shape1, shape2))
self.assertFalse(common_shapes.is_broadcast_compatible(shape2, shape1))
with self.assertRaises(ValueError):
common_shapes.broadcast_shape(shape1, shape2)
with self.assertRaises(ValueError):
common_shapes.broadcast_shape(shape2, shape1)
# Asserts that we get the same result with numpy (for known shapes), and that
# the order of arguments does not matter (i.e., broadcasting is reflexive).
def _assert_broadcast(self, expected, shape1, shape2):
if shape1.dims is not None and shape2.dims is not None:
expected_np = expected.as_list()
zeros1 = np.zeros(shape1.as_list())
zeros2 = np.zeros(shape2.as_list())
self.assertAllEqual(expected_np, np.broadcast(zeros1, zeros2).shape)
self.assertAllEqual(expected_np, np.broadcast(zeros2, zeros1).shape)
self.assertEqual(
expected, common_shapes.broadcast_shape(shape1, shape2))
self.assertEqual(
expected, common_shapes.broadcast_shape(shape2, shape1))
else:
self.assertEqual(expected, common_shapes.broadcast_shape(shape1, shape2))
self.assertEqual(expected, common_shapes.broadcast_shape(shape2, shape1))
def testBroadcast_one_dimension(self):
s1 = tensor_shape.TensorShape([5])
s2 = tensor_shape.TensorShape([7])
unknown = tensor_shape.unknown_shape()
scalar = tensor_shape.TensorShape([])
expanded_scalar = tensor_shape.TensorShape([1])
# Tensors with same shape should have the same broadcast result.
for shape in (s1, s2, unknown, scalar, expanded_scalar):
self._assert_broadcast(expected=shape, shape1=shape, shape2=shape)
# [] and [1] act like identity.
self._assert_broadcast(expected=s1, shape1=s1, shape2=scalar)
self._assert_broadcast(expected=s2, shape1=s2, shape2=scalar)
self._assert_broadcast(expected=s1, shape1=s1, shape2=expanded_scalar)
self._assert_broadcast(expected=s2, shape1=s2, shape2=expanded_scalar)
self._assert_broadcast(expected=unknown, shape1=s1, shape2=unknown)
self._assert_broadcast(expected=unknown, shape1=s2, shape2=unknown)
self._assert_broadcast(
expected=expanded_scalar, shape1=scalar, shape2=expanded_scalar)
self._assert_incompatible_broadcast(shape1=s1, shape2=s2)
def testBroadcast_many_dimensions(self):
unknown = tensor_shape.unknown_shape()
shape_0 = tensor_shape.TensorShape([])
shape_1 = tensor_shape.TensorShape([1])
shape_4 = tensor_shape.TensorShape([4])
shape_1x4 = tensor_shape.TensorShape([1, 4])
shape_4x1 = tensor_shape.TensorShape([4, 1])
shape_3x4 = tensor_shape.TensorShape([3, 4])
shape_4x3 = tensor_shape.TensorShape([4, 3])
# Tensors with same shape should have the same broadcast result.
for shape in (
shape_0, shape_1, shape_4, shape_1x4, shape_4x1, shape_3x4, shape_4x3):
self._assert_broadcast(expected=shape, shape1=shape, shape2=shape)
# [] and [1] act like identity.
for identity in (shape_0, shape_1):
for shape in (shape_4, shape_1x4, shape_4x1, shape_3x4, shape_4x3):
self._assert_broadcast(expected=shape, shape1=identity, shape2=shape)
# Unknown in, unknown out.
for shape in (shape_4, shape_1x4, shape_4x1, shape_3x4, shape_4x3):
self._assert_broadcast(expected=unknown, shape1=shape, shape2=unknown)
self._assert_broadcast(expected=shape_1x4, shape1=shape_4, shape2=shape_1x4)
shape_4x4 = tensor_shape.TensorShape([4, 4])
self._assert_broadcast(expected=shape_4x4, shape1=shape_4, shape2=shape_4x1)
self._assert_broadcast(expected=shape_3x4, shape1=shape_4, shape2=shape_3x4)
self._assert_incompatible_broadcast(shape1=shape_4, shape2=shape_4x3)
self._assert_broadcast(
expected=shape_4x4, shape1=shape_1x4, shape2=shape_4x1)
self._assert_broadcast(
expected=shape_3x4, shape1=shape_1x4, shape2=shape_3x4)
self._assert_incompatible_broadcast(shape1=shape_1x4, shape2=shape_4x3)
self._assert_incompatible_broadcast(shape1=shape_4x1, shape2=shape_3x4)
self._assert_broadcast(
expected=shape_4x3, shape1=shape_4x1, shape2=shape_4x3)
self._assert_incompatible_broadcast(shape1=shape_3x4, shape2=shape_4x3)
# Asserts that the order of arguments does not matter (i.e., broadcasting is
# reflexive).
def _assert_broadcast_with_unknown_dims(self, expected, shape1, shape2):
actual_dims = common_shapes.broadcast_shape(shape1, shape2).dims
reflexive_actual_dims = common_shapes.broadcast_shape(shape2, shape1).dims
if actual_dims is None:
self.assertIsNone(reflexive_actual_dims)
elif reflexive_actual_dims is None:
self.assertIsNone(actual_dims)
else:
self.assertEqual(len(actual_dims), len(reflexive_actual_dims))
for actual_dim, reflexive_actual_dim in zip(
actual_dims, reflexive_actual_dims):
self.assertEqual(actual_dim.value, reflexive_actual_dim.value)
expected_dims = expected.dims
if expected_dims is None:
self.assertIsNone(actual_dims)
elif actual_dims is None:
self.assertIsNone(expected_dims)
else:
self.assertEqual(len(expected_dims), len(actual_dims))
for expected_dim, actual_dim in zip(expected_dims, actual_dims):
self.assertEqual(expected_dim.value, actual_dim.value)
def testBroadcast_unknown_dims(self):
unknown = tensor_shape.unknown_shape()
shape_0 = tensor_shape.TensorShape([])
shape_1 = tensor_shape.TensorShape([1])
# pylint: disable=invalid-name
shape_U = tensor_shape.TensorShape([None])
shape_1xU = tensor_shape.TensorShape([1, None])
shape_Ux1 = tensor_shape.TensorShape([None, 1])
shape_4xU = tensor_shape.TensorShape([4, None])
shape_Ux4 = tensor_shape.TensorShape([None, 4])
# pylint: enable=invalid-name
# Tensors with same shape should have the same broadcast result.
for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
self._assert_broadcast_with_unknown_dims(
expected=shape, shape1=shape, shape2=shape)
# [] and [1] act like identity.
for identity in (shape_0, shape_1):
for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
self._assert_broadcast_with_unknown_dims(
expected=shape, shape1=identity, shape2=shape)
# Unknown in, unknown out.
for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
self._assert_broadcast_with_unknown_dims(
expected=unknown, shape1=shape, shape2=unknown)
self._assert_broadcast_with_unknown_dims(
expected=shape_1xU, shape1=shape_U, shape2=shape_1xU)
shape_UxU = tensor_shape.TensorShape([None, None]) # pylint: disable=invalid-name
self._assert_broadcast_with_unknown_dims(
expected=shape_UxU, shape1=shape_U, shape2=shape_Ux1)
self._assert_broadcast_with_unknown_dims(
expected=shape_4xU, shape1=shape_U, shape2=shape_4xU)
self._assert_broadcast_with_unknown_dims(
expected=shape_Ux4, shape1=shape_U, shape2=shape_Ux4)
self._assert_broadcast_with_unknown_dims(
expected=shape_UxU, shape1=shape_1xU, shape2=shape_Ux1)
self._assert_broadcast_with_unknown_dims(
expected=shape_4xU, shape1=shape_1xU, shape2=shape_4xU)
self._assert_broadcast_with_unknown_dims(
expected=shape_Ux4, shape1=shape_1xU, shape2=shape_Ux4)
self._assert_broadcast_with_unknown_dims(
expected=shape_4xU, shape1=shape_Ux1, shape2=shape_4xU)
self._assert_broadcast_with_unknown_dims(
expected=shape_Ux4, shape1=shape_Ux1, shape2=shape_Ux4)
shape_4x4 = tensor_shape.TensorShape([4, 4])
self._assert_broadcast_with_unknown_dims(
expected=shape_4x4, shape1=shape_4xU, shape2=shape_Ux4)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/common_shapes_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function for interpolating formatted errors from the TensorFlow runtime.
Exposes the function `interpolate` to interpolate messages with tags of the form
{{type name}}.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import os
import re
import six
from tensorflow.core.protobuf import graph_debug_info_pb2
from tensorflow.python.util import tf_stack
_NAME_REGEX = r"[A-Za-z0-9_.][A-Za-z0-9_.\-/]*?"
_TAG_REGEX = r"{{{{({name}) ({name})}}}}".format(name=_NAME_REGEX)
_INTERPOLATION_REGEX = r"^(.*?)({tag})".format(tag=_TAG_REGEX)
_INTERPOLATION_PATTERN = re.compile(_INTERPOLATION_REGEX, re.DOTALL)
_ParseTag = collections.namedtuple("_ParseTag", ["type", "name"])
_BAD_FILE_SUBSTRINGS = [
os.path.join("tensorflow", "python"),
os.path.join("tensorflow", "contrib"),
os.path.join("tensorflow_estimator", "python"),
os.path.join("tensorflow_estimator", "contrib"),
"<embedded",
]
def parse_message(message):
"""Parses the message.
Splits the message into separators and tags. Tags are named tuples
representing the string {{type name}} and they are separated by
separators. For example, in "123{{node Foo}}456{{node Bar}}789", there are
two tags and three separators. The separators are the numeric characters.
Args:
message: String to parse
Returns:
(list of separator strings, list of _ParseTags).
For example, if message is "123{{node Foo}}456" then this function
returns (["123", "456"], [_ParseTag("node", "Foo")])
"""
seps = []
tags = []
pos = 0
while pos < len(message):
match = re.match(_INTERPOLATION_PATTERN, message[pos:])
if match:
seps.append(match.group(1))
tags.append(_ParseTag(match.group(3), match.group(4)))
pos += match.end()
else:
break
seps.append(message[pos:])
return seps, tags
def _compute_device_summary_from_list(name, device_assignment_list, prefix=""):
"""Return a summary of an op's device function stack.
Args:
name: The name of the op.
device_assignment_list: The op._device_assignments list.
prefix: An optional string prefix used before each line of the multi-
line string returned by this function.
Returns:
A multi-line string similar to:
Device assignments active during op 'foo' creation:
with tf.device(/cpu:0): <test_1.py:27>
with tf.device(some_func<foo.py, 123>): <test_2.py:38>
The first line will have no padding to its left by default. Subsequent
lines will have two spaces of left-padding. Use the prefix argument
to increase indentation.
"""
if not device_assignment_list:
message = "No device assignments were active during op '%s' creation."
message %= name
return prefix + message
str_list = []
str_list.append(
"%sDevice assignments active during op '%s' creation:" % (prefix, name))
for traceable_obj in device_assignment_list:
location_summary = "<{file}:{line}>".format(
file=traceable_obj.filename, line=traceable_obj.lineno)
subs = {
"prefix": prefix,
"indent": " ",
"dev_name": traceable_obj.obj,
"loc": location_summary,
}
str_list.append(
"{prefix}{indent}with tf.device({dev_name}): {loc}".format(**subs))
return "\n".join(str_list)
def _compute_device_assignment_summary_from_op(op, prefix=""):
# pylint: disable=protected-access
return _compute_device_summary_from_list(op.name, op._device_assignments,
prefix)
# pylint: enable=protected-access
def _compute_colocation_summary_from_dict(name, colocation_dict, prefix=""):
"""Return a summary of an op's colocation stack.
Args:
name: The op name.
colocation_dict: The op._colocation_dict.
prefix: An optional string prefix used before each line of the multi-
line string returned by this function.
Returns:
A multi-line string similar to:
Node-device colocations active during op creation:
with tf.compat.v1.colocate_with(test_node_1): <test_1.py:27>
with tf.compat.v1.colocate_with(test_node_2): <test_2.py:38>
The first line will have no padding to its left by default. Subsequent
lines will have two spaces of left-padding. Use the prefix argument
to increase indentation.
"""
if not colocation_dict:
message = "No node-device colocations were active during op '%s' creation."
message %= name
return prefix + message
str_list = []
str_list.append("%sNode-device colocations active during op '%s' creation:" %
(prefix, name))
for coloc_name, location in colocation_dict.items():
location_summary = "<{file}:{line}>".format(
file=location.filename, line=location.lineno)
subs = {
"prefix": prefix,
"indent": " ",
"name": coloc_name,
"loc": location_summary,
}
str_list.append(
"{prefix}{indent}with tf.colocate_with({name}): {loc}".format(**subs))
return "\n".join(str_list)
def _compute_colocation_summary_from_op(op, prefix=""):
"""Fetch colocation file, line, and nesting and return a summary string."""
# pylint: disable=protected-access
return _compute_colocation_summary_from_dict(op.name, op._colocation_dict,
prefix)
# pylint: enable=protected-access
def _find_index_of_defining_frame_for_op(op):
"""Return index in op.traceback with first 'useful' frame.
This method reads through the stack stored in op.traceback looking for the
innermost frame which (hopefully) belongs to the caller. It accomplishes this
by rejecting frames whose filename appears to come from TensorFlow (see
error_interpolation._BAD_FILE_SUBSTRINGS for the list of rejected substrings).
Args:
op: the Operation object for which we would like to find the defining
location.
Returns:
Integer index into op.traceback where the first non-TF file was found
(innermost to outermost), or 0 (for the outermost stack frame) if all files
came from TensorFlow.
"""
# Index 0 of tf_traceback is the outermost frame.
tf_traceback = op.traceback
size = len(tf_traceback)
filenames = [frame[tf_stack.TB_FILENAME] for frame in tf_traceback]
# We process the filenames from the innermost frame to outermost.
for idx, filename in enumerate(reversed(filenames)):
contains_bad_substrings = [ss in filename for ss in _BAD_FILE_SUBSTRINGS]
if not any(contains_bad_substrings):
return size - idx - 1
return 0
def _get_defining_frame_from_op(op):
"""Find and return stack frame where op was defined."""
frame_index = _find_index_of_defining_frame_for_op(op)
return op.traceback[frame_index]
def _compute_useful_frames(op, num):
"""Return a list of frames, which form a 'useful' stack.
Starting from the defining frame to the outermost one, this method computes
the contiguous portion of the 'useful' stack trace and returns the selected
frames.
Args:
op: op.Operation object having a _traceback member.
num: total number of frames to return.
Returns:
A list of frames.
"""
defining_frame_index = _find_index_of_defining_frame_for_op(op)
# The stack trace is collected from two lines before the defining frame in the
# model file to the outermost with `num` frames at most. These two extra lines
# are included from the TensorFlow library to give the context which node is
# defined.
innermost_excluded = min(defining_frame_index + 2 + 1, len(op.traceback))
outermost_included = max(innermost_excluded - num, 0)
return op.traceback[outermost_included:innermost_excluded]
def create_graph_debug_info_def(operations):
"""Construct and returns a `GraphDebugInfo` protocol buffer.
Args:
operations: An iterable of op.Operation objects having _traceback members.
Returns:
GraphDebugInfo protocol buffer.
Raises:
TypeError: If the arguments are not of the correct proto buffer type.
"""
# Creates an empty GraphDebugInfoDef proto.
graph_debug_info_def = graph_debug_info_pb2.GraphDebugInfo()
# Gets the file names and line numbers for the exported node names. Also
# collects the unique file names.
all_file_names = set()
node_to_trace = {}
for func, op in operations:
# Gets the stack trace of the operation and then the file location.
node_name = func + op.name
node_to_trace[node_name] = _compute_useful_frames(op, 10)
for frame in node_to_trace[node_name]:
all_file_names.add(frame[tf_stack.TB_FILENAME])
# Sets the `files` field in the GraphDebugInfo proto
graph_debug_info_def.files.extend(all_file_names)
# Builds a mapping between file names and index of the `files` field, so we
# only store the indexes for the nodes in the GraphDebugInfo.
file_to_index = dict(
[(y, x) for x, y in enumerate(graph_debug_info_def.files)])
# Creates the FileLineCol proto for each node and sets the value in the
# GraphDebugInfo proto. We only store the file name index for each node to
# save the storage space.
for node_name, frames in node_to_trace.items():
trace_def = graph_debug_info_def.traces[node_name]
for frame in reversed(frames):
trace_def.file_line_cols.add(
file_index=file_to_index[frame[tf_stack.TB_FILENAME]],
line=frame[tf_stack.TB_LINENO])
return graph_debug_info_def
def compute_field_dict(op, strip_file_prefix=""):
"""Return a dictionary mapping interpolation tokens to values.
Args:
op: op.Operation object having a _traceback member.
strip_file_prefix: The common path in the stacktrace. We remove the prefix
from the file names.
Returns:
A dictionary mapping string tokens to string values. The keys are shown
below along with example values.
{
"file": "tool_utils.py",
"line": "124",
"defined_at": " (defined at tool_utils.py:124)",
"colocations":
'''Node-device colocations active during op creation:
with tf.compat.v1.colocate_with(test_node_1): <test_1.py:27>
with tf.compat.v1.colocate_with(test_node_2): <test_2.py:38>'''
"devices":
'''Device assignments active during op 'foo' creation:
with tf.device(/cpu:0): <test_1.py:27>
with tf.device(some_func<foo.py, 123>): <test_2.py:38>'''
"devs_and_colocs": A concatenation of colocations and devices, e.g.
'''Node-device colocations active during op creation:
with tf.compat.v1.colocate_with(test_node_1): <test_1.py:27>
with tf.compat.v1.colocate_with(test_node_2): <test_2.py:38>'''
Device assignments active during op 'foo' creation:
with tf.device(/cpu:0): <test_1.py:27>
with tf.device(some_func<foo.py, 123>): <test_2.py:38>'''
}
"""
frame = _get_defining_frame_from_op(op)
filename = frame[tf_stack.TB_FILENAME]
if filename.startswith(strip_file_prefix):
filename = filename[len(strip_file_prefix):]
lineno = frame[tf_stack.TB_LINENO]
defined_at = " (defined at %s:%d)" % (filename, lineno)
colocation_summary = _compute_colocation_summary_from_op(op)
device_summary = _compute_device_assignment_summary_from_op(op)
combined_summary = "\n".join([colocation_summary, device_summary])
field_dict = {
"file": filename,
"line": lineno,
"defined_at": defined_at,
"colocations": colocation_summary,
"devices": device_summary,
"devs_and_colocs": combined_summary,
}
return field_dict
def traceback_files_common_prefix(all_ops):
"""Determines the common prefix from the paths of the stacktrace of 'all_ops'.
For example, if the paths are '/foo/bar/baz/' and '/foo/car', this would
return '/foo'.
Args:
all_ops: All the input nodes in the form of a list of lists of ops.
Returns:
The common prefix.
"""
files = set()
for ops in all_ops:
if ops is None:
continue
for op in ops:
for frame in op.traceback:
filename = frame[tf_stack.TB_FILENAME]
if "<embedded" not in filename:
files.add(filename)
return os.path.split(os.path.commonprefix(list(files)))[0]
def _sources_for_node(node, graph):
"""Gets the input op nodes for 'node'.
Args:
node: The node.
graph: The graph containing the node.
Returns:
The unique input nodes.
"""
inputs = set()
for name in node.node_def.input:
if name.startswith("^"):
name = name[1:]
try:
tensor = graph.get_tensor_by_name(name)
op = tensor.op
except (KeyError, ValueError):
try:
op = graph.get_operation_by_name(name)
except KeyError:
continue
inputs.add(op)
return list(inputs)
def _build_error_message(op, input_ops, common_prefix):
"""Returns the formatted error message for the given op.
Args:
op: The node.
input_ops: The input nodes to the 'op' node
common_prefix: The prefix path common to the stacktrace of inputs.
Returns:
The formatted error message for the given op. The error message also
includes the information about the input sources for the given op.
"""
field_dict = compute_field_dict(op, common_prefix)
msg = "node %s%s " % (op.name, field_dict["defined_at"])
input_debug_info = []
# This stores the line numbers that we have already printed.
done = set()
done.add(field_dict["defined_at"])
for op_inp in input_ops:
field_dict_inp = compute_field_dict(op_inp, common_prefix)
if field_dict_inp["defined_at"] not in done:
input_debug_info.append(
" %s%s" % (op_inp.name, field_dict_inp["defined_at"]))
done.add(field_dict_inp["defined_at"])
if input_debug_info:
end_msg = ("\nInput Source operations connected to node %s:\n") % (op.name)
end_msg += "\t\n".join(input_debug_info)
else:
end_msg = ""
return msg, end_msg
def interpolate(error_message, graph):
"""Interpolates an error message.
The error message can contain tags of the form `{{type name}}` which will be
replaced. For example: "{{node <name>}}" would get expanded to:
"node <name>(defined at <path>)".
Args:
error_message: A string to interpolate.
graph: ops.Graph object containing all nodes referenced in the error
message.
Returns:
The string with tags of the form {{type name}} interpolated.
"""
seps, tags = parse_message(error_message)
subs = []
end_msg = collections.defaultdict(list)
tagged_ops = []
for t in tags:
try:
op = graph.get_operation_by_name(t.name)
except KeyError:
op = None
if op is None:
tagged_ops.append(None)
else:
tagged_ops.append([op] + _sources_for_node(op, graph))
common_prefix = traceback_files_common_prefix(tagged_ops)
for tag, ops in zip(tags, tagged_ops):
msg = "{{%s %s}}" % (tag.type, tag.name)
if ops is not None:
if tag.type == "node":
msg, source_msg = _build_error_message(ops[0], ops[1:], common_prefix)
if source_msg:
end_msg["source_nodes"].append(source_msg)
elif tag.type == "colocation_node":
field_dict = compute_field_dict(ops[0], common_prefix)
msg = "node %s%s placed on device %s " % (
ops[0].name, field_dict["defined_at"], field_dict["devices"])
end_msg["colocations"].append(field_dict["devs_and_colocs"])
if tag.type == "function_node":
msg = ""
subs.append(msg)
if "source_nodes" in end_msg:
subs.append("\n\nErrors may have originated from an input operation.")
subs.append("\n".join(end_msg["source_nodes"]))
end_msg.pop("source_nodes", None)
for k, messages in end_msg.items():
subs.append("Additional information about %s:" % k)
subs.append("\n".join(messages))
return "".join(
itertools.chain(*six.moves.zip_longest(seps, subs, fillvalue="")))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/error_interpolation.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util.tf_export import tf_export
__version__ = pywrap_tensorflow.__version__
__git_version__ = pywrap_tensorflow.__git_version__
__compiler_version__ = pywrap_tensorflow.__compiler_version__
__cxx11_abi_flag__ = pywrap_tensorflow.__cxx11_abi_flag__
__monolithic_build__ = pywrap_tensorflow.__monolithic_build__
VERSION = __version__
tf_export(
"version.VERSION",
"__version__",
v1=["version.VERSION", "VERSION", "__version__"]).export_constant(
__name__, "VERSION")
GIT_VERSION = __git_version__
tf_export(
"version.GIT_VERSION",
"__git_version__",
v1=["version.GIT_VERSION", "GIT_VERSION",
"__git_version__"]).export_constant(__name__, "GIT_VERSION")
COMPILER_VERSION = __compiler_version__
tf_export(
"version.COMPILER_VERSION",
"__compiler_version__",
v1=["version.COMPILER_VERSION", "COMPILER_VERSION",
"__compiler_version__"]).export_constant(__name__, "COMPILER_VERSION")
CXX11_ABI_FLAG = __cxx11_abi_flag__
tf_export(
"sysconfig.CXX11_ABI_FLAG",
"__cxx11_abi_flag__",
v1=["sysconfig.CXX11_ABI_FLAG", "CXX11_ABI_FLAG",
"__cxx11_abi_flag__"]).export_constant(__name__, "CXX11_ABI_FLAG")
MONOLITHIC_BUILD = __monolithic_build__
tf_export(
"sysconfig.MONOLITHIC_BUILD",
"__monolithic_build__",
v1=[
"sysconfig.MONOLITHIC_BUILD", "MONOLITHIC_BUILD", "__monolithic_build__"
]).export_constant(__name__, "MONOLITHIC_BUILD")
GRAPH_DEF_VERSION = pywrap_tensorflow.GRAPH_DEF_VERSION
tf_export(
"version.GRAPH_DEF_VERSION",
v1=["version.GRAPH_DEF_VERSION", "GRAPH_DEF_VERSION"]).export_constant(
__name__, "GRAPH_DEF_VERSION")
GRAPH_DEF_VERSION_MIN_CONSUMER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_CONSUMER)
tf_export(
"version.GRAPH_DEF_VERSION_MIN_CONSUMER",
v1=[
"version.GRAPH_DEF_VERSION_MIN_CONSUMER",
"GRAPH_DEF_VERSION_MIN_CONSUMER"
]).export_constant(__name__, "GRAPH_DEF_VERSION_MIN_CONSUMER")
GRAPH_DEF_VERSION_MIN_PRODUCER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_PRODUCER)
tf_export(
"version.GRAPH_DEF_VERSION_MIN_PRODUCER",
v1=[
"version.GRAPH_DEF_VERSION_MIN_PRODUCER",
"GRAPH_DEF_VERSION_MIN_PRODUCER"
]).export_constant(__name__, "GRAPH_DEF_VERSION_MIN_PRODUCER")
__all__ = [
"__version__",
"__git_version__",
"__compiler_version__",
"__cxx11_abi_flag__",
"__monolithic_build__",
"COMPILER_VERSION",
"CXX11_ABI_FLAG",
"GIT_VERSION",
"GRAPH_DEF_VERSION",
"GRAPH_DEF_VERSION_MIN_CONSUMER",
"GRAPH_DEF_VERSION_MIN_PRODUCER",
"VERSION",
"MONOLITHIC_BUILD",
]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/versions.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception types for TensorFlow errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.framework import errors_impl as _impl
# pylint: enable=unused-import
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.framework.errors_impl import *
# pylint: enable=wildcard-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/errors.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.composite_tensor_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import composite_tensor_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import googletest
class CompositeTensorTest(test_util.TensorFlowTestCase):
def test_is_composite(self):
# Validate that all composite tensor and value types return true.
self.assertTrue(
composite_tensor_utils.is_composite_or_composite_value(
sparse_tensor.SparseTensor([[0, 0]], [1], [1, 1])))
self.assertTrue(
composite_tensor_utils.is_composite_or_composite_value(
sparse_tensor.SparseTensorValue([[0, 0]], [1], [1, 1])))
self.assertTrue(
composite_tensor_utils.is_composite_or_composite_value(
ragged_tensor.RaggedTensor.from_row_splits(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))))
self.assertTrue(
composite_tensor_utils.is_composite_or_composite_value(
ragged_tensor_value.RaggedTensorValue(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))))
# Test that numpy arrays and tensors return false.
self.assertFalse(
composite_tensor_utils.is_composite_or_composite_value(
np.ndarray([0, 1])))
self.assertFalse(
composite_tensor_utils.is_composite_or_composite_value(
ops.convert_to_tensor([3, 1])))
def test_sparse_concatenation(self):
tensor_1 = sparse_tensor.SparseTensor([[0, 0]], [1], [1, 1])
tensor_2 = sparse_tensor.SparseTensor([[0, 0]], [2], [1, 1])
concatenated_tensor = composite_tensor_utils.append_composite_tensor(
tensor_1, tensor_2)
evaluated_tensor = self.evaluate(concatenated_tensor)
self.assertAllEqual(evaluated_tensor.indices, [[0, 0], [1, 0]])
self.assertAllEqual(evaluated_tensor.values, [1, 2])
self.assertAllEqual(evaluated_tensor.dense_shape, [2, 1])
def test_sparse_value_concatenation(self):
tensor_1 = sparse_tensor.SparseTensorValue([[0, 0]], [1], [1, 1])
tensor_2 = sparse_tensor.SparseTensorValue([[0, 0]], [2], [1, 1])
concatenated_tensor = composite_tensor_utils.append_composite_tensor(
tensor_1, tensor_2)
self.assertAllEqual(concatenated_tensor.indices, [[0, 0], [1, 0]])
self.assertAllEqual(concatenated_tensor.values, [1, 2])
self.assertAllEqual(concatenated_tensor.dense_shape, [2, 1])
def test_ragged_concatenation(self):
tensor_1 = ragged_tensor.RaggedTensor.from_row_splits(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))
tensor_2 = ragged_tensor.RaggedTensor.from_row_splits(
np.array([3, 4, 5]), np.array([0, 2, 3], dtype=np.int64))
concatenated_tensor = composite_tensor_utils.append_composite_tensor(
tensor_1, tensor_2)
evaluated_tensor = self.evaluate(concatenated_tensor)
self.assertAllEqual(evaluated_tensor.values, [0, 1, 2, 3, 4, 5])
self.assertAllEqual(evaluated_tensor.row_splits, [0, 1, 3, 5, 6])
def test_ragged_value_concatenation(self):
tensor_1 = ragged_tensor_value.RaggedTensorValue(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))
tensor_2 = ragged_tensor_value.RaggedTensorValue(
np.array([3, 4, 5]), np.array([0, 2, 3], dtype=np.int64))
concatenated_tensor = composite_tensor_utils.append_composite_tensor(
tensor_1, tensor_2)
self.assertAllEqual(concatenated_tensor.values, [0, 1, 2, 3, 4, 5])
self.assertAllEqual(concatenated_tensor.row_splits, [0, 1, 3, 5, 6])
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/composite_tensor_utils_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import random
import threading
import weakref
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
@test_util.run_deprecated_v1
def test_session_functions(self):
with self.test_session() as sess:
sess_ref = weakref.ref(sess)
with self.cached_session(graph=None, config=None) as sess2:
# We make sure that sess2 is sess.
assert sess2 is sess
# We make sure we raise an exception if we use cached_session with
# different values.
with self.assertRaises(ValueError):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
# with scope.
assert not sess_ref()._closed
with self.session() as unique_sess:
unique_sess_ref = weakref.ref(unique_sess)
with self.session() as sess2:
assert sess2 is not unique_sess
# We make sure the session is closed when we leave the with statement.
assert unique_sess_ref()._closed
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node '{{node seven}}"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegexp(AssertionError,
r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
@test_util.run_in_graph_and_eager_modes
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
@test_util.run_in_graph_and_eager_modes
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
@test_util.run_in_graph_and_eager_modes
def testAllCloseTensors(self):
a_raw_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = constant_op.constant(a_raw_data)
b = math_ops.add(1, constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
self.assertAllClose(a, b)
self.assertAllClose(a, a_raw_data)
a_dict = {"key": a}
b_dict = {"key": b}
self.assertAllClose(a_dict, b_dict)
x_list = [a, b]
y_list = [a_raw_data, b]
self.assertAllClose(x_list, y_list)
@test_util.run_in_graph_and_eager_modes
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
@test_util.run_in_graph_and_eager_modes
def testAllCloseList(self):
with self.assertRaisesRegexp(AssertionError, r"not close dif"):
self.assertAllClose([0], [1])
@test_util.run_in_graph_and_eager_modes
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
@test_util.run_in_graph_and_eager_modes
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegexp(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegexp(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
@test_util.run_in_graph_and_eager_modes
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegexp(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
@test_util.skip_if(True) # b/117665998
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
@test_util.run_in_graph_and_eager_modes
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-8], dtype=dtypes.float64),
constant_op.constant([2e-8], dtype=dtypes.float64),
rtol=1e-8,
atol=1e-8)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-7], dtype=dtypes.float32),
constant_op.constant([2e-7], dtype=dtypes.float32),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-4], dtype=dtypes.float16),
constant_op.constant([2e-4], dtype=dtypes.float16),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7,
half_rtol=1e-4,
half_atol=1e-4)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
@test_util.run_in_graph_and_eager_modes
def testAssertAllEqual(self):
i = variables.Variable([100] * 3, dtype=dtypes.int32, name="i")
j = constant_op.constant([20] * 3, dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([120] * 3, k)
self.assertAllEqual([20] * 3, j)
with self.assertRaisesRegexp(AssertionError, r"not equal lhs"):
self.assertAllEqual([0] * 3, k)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllClose(self):
# Test with arrays
self.assertNotAllClose([0.1], [0.2])
with self.assertRaises(AssertionError):
self.assertNotAllClose([-1.0, 2.0], [-1.0, 2.0])
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
self.assertNotAllClose([0.9, 1.0], x)
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.0, 1.0], x)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseRTol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], rtol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, rtol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseATol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], atol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, atol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLess(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllClose([110.0, 120.0, 130.0], z)
self.assertAllGreater(x, 95.0)
self.assertAllLess(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 95.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLessEqual(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllEqual([110.0, 120.0, 130.0], z)
self.assertAllGreaterEqual(x, 95.0)
self.assertAllLessEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 95.0)
@test_util.run_deprecated_v1
def testAssertAllInRangeWithNonNumericValuesFails(self):
s1 = constant_op.constant("Hello, ", name="s1")
c = constant_op.constant([1 + 2j, -3 + 5j], name="c")
b = constant_op.constant([False, True], name="b")
with self.assertRaises(AssertionError):
self.assertAllInRange(s1, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(c, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(b, 0, 1)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRange(self):
x = constant_op.constant([10.0, 15.0], name="x")
self.assertAllInRange(x, 10, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_upper_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(
x, 10, 15, open_lower_bound=True, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeErrorMessageEllipses(self):
x_init = np.array([[10.0, 15.0]] * 12)
x = constant_op.constant(x_init, name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 5, 10)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeDetectsNaNs(self):
x = constant_op.constant(
[[np.nan, 0.0], [np.nan, np.inf], [np.inf, np.nan]], name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 0.0, 2.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeWithInfinities(self):
x = constant_op.constant([10.0, np.inf], name="x")
self.assertAllInRange(x, 10, np.inf)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, np.inf, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInSet(self):
b = constant_op.constant([True, False], name="b")
x = constant_op.constant([13, 37], name="x")
self.assertAllInSet(b, [False, True])
self.assertAllInSet(b, (False, True))
self.assertAllInSet(b, {False, True})
self.assertAllInSet(x, [0, 13, 37, 42])
self.assertAllInSet(x, (0, 13, 37, 42))
self.assertAllInSet(x, {0, 13, 37, 42})
with self.assertRaises(AssertionError):
self.assertAllInSet(b, [False])
with self.assertRaises(AssertionError):
self.assertAllInSet(x, (42,))
@test_util.run_deprecated_v1
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new defeault graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
with self.test_session():
a_rand = random_ops.random_normal([1]).eval()
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
with self.test_session():
b_rand = random_ops.random_normal([1]).eval()
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_util.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_util.run_in_graph_and_eager_modes()(inc)
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegexp(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
class Foo(object):
pass
del Foo # Make pylint unused happy.
def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
modes = []
def _test(self):
if not context.executing_eagerly():
self.skipTest("Skipping in graph mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["eager"])
def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
modes = []
def _test(self):
if context.executing_eagerly():
self.skipTest("Skipping in eager mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["graph"])
@test_util.run_deprecated_v1
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
class ExampleTest(test_util.TensorFlowTestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_util.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[0:2], ["setup_graph", "run_graph"])
self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_parameterized_keyword(self, arg):
self.assertEqual(arg, True)
def test_build_as_function_and_v1_graph(self):
class GraphModeAndFuncionTest(parameterized.TestCase):
def __init__(inner_self): # pylint: disable=no-self-argument
super(GraphModeAndFuncionTest, inner_self).__init__()
inner_self.graph_mode_tested = False
inner_self.inside_function_tested = False
def runTest(self):
del self
@test_util.build_as_function_and_v1_graph
def test_modes(inner_self): # pylint: disable=no-self-argument
is_building_function = ops.get_default_graph().building_function
if is_building_function:
self.assertFalse(inner_self.inside_function_tested)
inner_self.inside_function_tested = True
else:
self.assertFalse(inner_self.graph_mode_tested)
inner_self.graph_mode_tested = True
test_object = GraphModeAndFuncionTest()
test_object.test_modes_v1_graph()
test_object.test_modes_function()
self.assertTrue(test_object.graph_mode_tested)
self.assertTrue(test_object.inside_function_tested)
# Its own test case to reproduce variable sharing issues which only pop up when
# setUp() is overridden and super() is not called.
class GraphAndEagerNoVariableSharing(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.run_in_graph_and_eager_modes
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.], name="leak")
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.], name="no-leak")
with self.assertRaisesRegexp(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
inner_self.accumulation = []
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
with self.assertRaises(AssertionError):
LeakedObjectTest().test_has_leak()
LeakedObjectTest().test_has_no_leak()
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/test_util_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor-like objects that are composed from tf.Tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util import nest
@six.add_metaclass(abc.ABCMeta)
class CompositeTensor(object):
"""Abstract base class for Tensor-like objects that are composed from Tensors.
Each `CompositeTensor` can be decomposed into a structured collection of
component `tf.Tensor`s, and reconstructed from those components.
The `tensorflow.python.util.nest` module has support for treating composite
tensors as structure, which makes it easy to flatten and reconstruct
composite tensors (or larger structures that contain composite tensors).
E.g.:
```python
ct = ... # Create a composite tensor.
flat_list_of_tensors = nest.flatten(ct, expand_composites=True)
transformed_list_of_tensors = ... # do something with the flat tensors.
result = nest.pack_sequence_as(ct, transformed_list_of_tensors,
expand_composites=True)
```
"""
@abc.abstractproperty
def _type_spec(self):
"""A `TypeSpec` describing the type of this value."""
raise NotImplementedError("%s._type_spec()" % type(self).__name__)
# Deprecated -- use self._type_spec._to_components(self) instead.
# TODO(b/133606651) Remove all callers and then delete this method.
def _to_components(self):
"""Decomposes this composite tensor into its component tensors.
Returns:
A nested structure of `tf.Tensor`s and `CompositeTensor`s that can be
used to reconstruct this composite tensor (along with metadata returned
by `_component_metadata`).
"""
return self._type_spec._to_components(self) # pylint: disable=protected-access
# Deprecated -- use self._type_spec instead.
# TODO(b/133606651) Remove all callers and then delete this method.
def _component_metadata(self):
"""Returns any non-tensor metadata needed to reconstruct a composite tensor.
Returns:
A nested structure of metadata that can be used to reconstruct this
composite tensor (along with the tensors returned by `_to_components`).
"""
return self._type_spec
# Deprecated -- use metadata._from_components(components) instead.
# TODO(b/133606651) Remove all callers and then delete this method.
@staticmethod
def _from_components(components, metadata):
"""Creates a composite tensor of type `cls` from components.
Args:
components: A nested structure whose values are `tf.Tensor`s or
`tf.CompositeTensor`s (as returned by `_to_components`).
metadata: A nested structure containing any additional metadata needed to
reconstruct the composite tensor (as returned by `_composite_metadata`).
Returns:
A `CompositeTensor` of type `cls`.
"""
return metadata._from_components(components) # pylint: disable=protected-access
def _shape_invariant_to_type_spec(self, shape):
"""Returns a TypeSpec given a shape invariant (used by `tf.while_loop`).
Args:
shape: A `tf.TensorShape` object. The shape invariant for this
`CompositeTensor`, or `None` if a default shape invariant should be
used (based on the value of this `CompositeTensor`).
Returns:
A nested structure whose values are `tf.TensorShape` objects, specifying
the shape invariants for the tensors that comprise this `CompositeTensor`.
"""
# New TypeSpec subclasses generally do not need to implement this --
# this method is used for backwards compatibility. Users of tf.while_loop
# can specify a type by passing in TypeSpec instead.
raise NotImplementedError("%s._shape_invariant_to_type_spec"
% type(self).__name__)
# TODO(b/133606651) Remove this property, since it's not clear what it should
# return if a CompositeTensor has a mix of graph and non-graph components.
# Update users to perform an appropraite check themselves.
@property
def _is_graph_tensor(self):
"""Returns True if this tensor's components belong to a TF graph."""
components = self._type_spec._to_components(self) # pylint: disable=protected-access
tensors = nest.flatten(components, expand_composites=True)
return any(hasattr(t, "graph") for t in tensors)
def _consumers(self):
"""Returns a list of `Operation`s that consume this `CompositeTensor`.
Returns:
A list of `Operation`s.
Raises:
RuntimeError: If this method is called while executing eagerly.
"""
consumers = nest.flatten([
component.consumers()
for component in self._to_components()
if getattr(component, "graph", None) is not None
])
return list(set(consumers))
pywrap_tensorflow.RegisterType("CompositeTensor", CompositeTensor)
def replace_composites_with_components(structure):
"""Recursively replaces CompositeTensors with their components.
Args:
structure: A `nest`-compatible structure, possibly containing composite
tensors.
Returns:
A copy of `structure`, where each composite tensor has been replaced by
its components. The result will contain no composite tensors.
Note that `nest.flatten(replace_composites_with_components(structure))`
returns the same value as `nest.flatten(structure)`.
"""
if isinstance(structure, CompositeTensor):
return replace_composites_with_components(structure._to_components()) # pylint: disable=protected-access
elif not nest.is_sequence(structure):
return structure
else:
return nest.map_structure(replace_composites_with_components, structure,
expand_composites=False)
# @TODO(edloper): Can we replace convert_to_tensor_or_xyz with just
# convert_to_tensor_or_composite? Alternatively, should composite tensors
# register a dispatch override for tf.convert_to_tensor?
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/composite_tensor.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for reading/writing graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
from google.protobuf import text_format
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.tf_export import tf_export
@tf_export('io.write_graph', v1=['io.write_graph', 'train.write_graph'])
def write_graph(graph_or_graph_def, logdir, name, as_text=True):
"""Writes a graph proto to a file.
The graph is written as a text proto unless `as_text` is `False`.
```python
v = tf.Variable(0, name='my_variable')
sess = tf.compat.v1.Session()
tf.io.write_graph(sess.graph_def, '/tmp/my-model', 'train.pbtxt')
```
or
```python
v = tf.Variable(0, name='my_variable')
sess = tf.compat.v1.Session()
tf.io.write_graph(sess.graph, '/tmp/my-model', 'train.pbtxt')
```
Args:
graph_or_graph_def: A `Graph` or a `GraphDef` protocol buffer.
logdir: Directory where to write the graph. This can refer to remote
filesystems, such as Google Cloud Storage (GCS).
name: Filename for the graph.
as_text: If `True`, writes the graph as an ASCII proto.
Returns:
The path of the output proto file.
"""
if isinstance(graph_or_graph_def, ops.Graph):
graph_def = graph_or_graph_def.as_graph_def()
else:
graph_def = graph_or_graph_def
# gcs does not have the concept of directory at the moment.
if not file_io.file_exists(logdir) and not logdir.startswith('gs:'):
file_io.recursive_create_dir(logdir)
path = os.path.join(logdir, name)
if as_text:
file_io.atomic_write_string_to_file(path,
text_format.MessageToString(
graph_def, float_format=''))
else:
file_io.atomic_write_string_to_file(path, graph_def.SerializeToString())
return path
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/graph_io.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Utility to convert a Graph to a FunctionDef."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import op_def_registry
def _make_argname_from_tensor_name(name):
return re.sub(":0$", "", name).replace(":", "_o")
def _tensor_to_argdef(t, name=None, used_names=None):
"""Convert tensor t to an argdef, with a specified name or a unique name."""
arg = op_def_pb2.OpDef.ArgDef()
if name is None:
arg.name = _make_argname_from_tensor_name(t.name)
if used_names is not None:
if arg.name in used_names:
i = 0
while True:
new_name = "%s_U%d" % (arg.name, i)
if new_name not in used_names:
arg.name = new_name
break
i += 1
used_names.add(arg.name)
else:
arg.name = name
arg.type = t.dtype.as_datatype_enum
return arg
def _is_in_placeholders(op, func_arg_placeholders):
"""Checks whether any output of this op is in func_arg_placeholders."""
return op.values() and any(x.name in func_arg_placeholders
for x in op.values())
def _get_node_def(op):
return op.node_def # pylint: disable=protected-access
def _get_op_def(op):
return op.op_def or op_def_registry.get_registered_ops()[op.type]
def _create_input_dict(function_graph,
func_arg_placeholders,
initial_value=None):
"""Create a mapping from graph tensor names to function tensor names."""
if initial_value is None:
input_dict = {}
else:
input_dict = dict(initial_value)
for op in function_graph.get_operations():
if _is_in_placeholders(op, func_arg_placeholders):
input_dict[op.name] = op.name
else:
op_def = _get_op_def(op)
attrs = _get_node_def(op).attr
o = 0
for arg_def in op_def.output_arg:
if arg_def.number_attr:
num = attrs[arg_def.number_attr].i
elif arg_def.type_list_attr:
num = len(attrs[arg_def.type_list_attr].list.type)
else:
num = 1
for i in range(num):
result = "%s:%s:%d" % (op.name, arg_def.name, i)
input_dict[op.values()[o].name] = result
if o == 0:
input_dict[op.name] = result
o += 1
return input_dict
def _add_op_node(op, func, input_dict):
"""Converts an op to a function def node and add it to `func`."""
# Add an entry in func.node_def
# Note that extend() makes a copy in this case, see:
# https://developers.google.com/protocol-buffers/docs/reference/python-generated#repeated-message-fields
func.node_def.extend([_get_node_def(op)])
node_def = func.node_def[-1]
for i in range(len(node_def.input)):
if not node_def.input[i].startswith("^"):
assert node_def.input[i] in input_dict, ("%s missing from %s" %
(node_def.input[i],
input_dict.items()))
node_def.input[i] = input_dict[node_def.input[i]]
# The function is stateful if any of its operations are stateful.
# NOTE(mrry): The "Const" node typically does not have an `OpDef` associated
# with it, so we assume any nodes without an `OpDef` are stateless.
# TODO(skyewm): Remove the `is not None` test after we transition to the C
# API.
if op.op_def is not None and op.op_def.is_stateful:
func.signature.is_stateful = True
def graph_to_function_def(graph, operations, inputs, outputs, out_names=None):
"""Returns `graph` as a `FunctionDef` protocol buffer.
This method creates a [`FunctionDef`](
https://www.tensorflow.org/code/tensorflow/core/framework/function.proto)
protocol buffer that contains all the ops in `operations`. The
operations become the body of the function.
The arguments `inputs` and `outputs` will be listed as the inputs
and outputs tensors of the function. They must be lists of
tensors present in the graph. The lists can optionally be empty.
Args:
graph: Graph.
operations: the operations to put in the function. Must be a subset of
the operations in the graph.
inputs: List of tensors. Inputs to the function.
outputs: List of tensors. Outputs of the function.
out_names: Optional list of string names for the outputs.
Returns:
A FunctionDef protocol buffer.
Raises:
ValueError: if out_names is specified and the wrong length.
"""
func = function_pb2.FunctionDef()
func.signature.name = "_"
used_names = set()
func.signature.input_arg.extend(
[_tensor_to_argdef(i, used_names=used_names) for i in inputs])
# Initializes the input map with all placeholder input tensors.
initial_dict = {}
for o, m in zip(inputs, func.signature.input_arg):
initial_dict[o.name] = m.name
if out_names is None:
used_names = set()
func.signature.output_arg.extend(
[_tensor_to_argdef(o, used_names=used_names) for o in outputs])
elif len(outputs) != len(out_names):
raise errors_impl.InvalidArgumentError(
None, None,
"output names must be either empty or equal in size to outputs. "
"output names size = %d outputs size = %d" %
(len(out_names), len(outputs)))
elif len(out_names) != len(set(out_names)):
raise ValueError(
"Must not have duplicates in out_names: %s" % ", ".join(out_names))
else:
func.signature.output_arg.extend(
[_tensor_to_argdef(o, name=n) for o, n in zip(outputs, out_names)])
func_arg_placeholders = set([i.name for i in inputs])
input_dict = _create_input_dict(graph, func_arg_placeholders,
initial_value=initial_dict)
for op in operations:
if _is_in_placeholders(op, func_arg_placeholders):
continue
_add_op_node(op, func, input_dict)
if out_names is None:
for index, o in enumerate(outputs):
k = func.signature.output_arg[index].name
func.ret[k] = input_dict[o.name]
else:
for o, n in zip(outputs, out_names):
func.ret[n] = input_dict[o.name]
return func
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/graph_to_function_def.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MetaGraph and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from distutils import version as distutils_version # pylint: disable=g-bad-import-order
import os.path
import re
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
# Prefix to be added to unbound input names so they are easily identifiable.
_UNBOUND_INPUT_PREFIX = "$unbound_inputs_"
# List of collections that didn't register proto functions, as a result in
# a previously exported meta_graph the items are of a different data type.
_COMPAT_COLLECTION_LIST = [ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.MODEL_VARIABLES,
ops.GraphKeys.METRIC_VARIABLES]
def _node_def(from_node_def, export_scope, unbound_inputs, clear_devices=False):
"""Create a `NodeDef` proto with export_scope stripped.
Args:
from_node_def: A `node_def_pb2.NodeDef` protocol buffer.
export_scope: A `string` representing the name scope to remove.
unbound_inputs: An array of unbound input names if they exist.
clear_devices: Boolean which controls whether to clear device information
from node_def. Default false.
Returns:
A `node_def_pb2.NodeDef` protocol buffer.
"""
node_def = copy.deepcopy(from_node_def)
for i, v in enumerate(node_def.input):
if (export_scope and
not node_def.input[i].lstrip("^").startswith(export_scope)):
# Adds "$unbound_inputs_" prefix to the unbound name so they are easily
# identifiable.
node_def.input[i] = re.sub(r"([\^]|^)(.*)",
r"\1" + _UNBOUND_INPUT_PREFIX + r"\2",
compat.as_str(v))
unbound_inputs.append(node_def.input[i])
else:
node_def.input[i] = ops.strip_name_scope(v, export_scope)
node_def.name = compat.as_bytes(
ops.strip_name_scope(from_node_def.name, export_scope))
for k, v in six.iteritems(from_node_def.attr):
if k == "_class":
new_s = [compat.as_bytes(
ops.strip_name_scope(s, export_scope)) for s in v.list.s
if not export_scope or
compat.as_str(s).split("@")[1].startswith(export_scope)]
node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=new_s)))
elif node_def.op in ("Enter", "RefEnter") and k == "frame_name":
if not export_scope or compat.as_str(v.s).startswith(export_scope):
new_s = compat.as_bytes(ops.strip_name_scope(v.s, export_scope))
node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(s=new_s))
else:
node_def.attr[k].CopyFrom(v)
if clear_devices:
node_def.device = ""
return node_def
def _read_file(filename):
"""Reads a file containing `GraphDef` and returns the protocol buffer.
Args:
filename: `graph_def` filename including the path.
Returns:
A `GraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
graph_def = graph_pb2.GraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.FileIO(filename, "rb").read()
try:
graph_def.ParseFromString(file_content)
return graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content, graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return graph_def
def ops_used_by_graph_def(graph_def):
"""Collect the list of ops used by a graph.
Does not validate that the ops are all registered.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
A list of strings, each naming an op used by the graph.
"""
# Map function names to definitions
name_to_function = {}
for fun in graph_def.library.function:
name_to_function[fun.signature.name] = fun
# Collect the list of op names. Since functions can reference functions, we
# need a recursive traversal.
used_ops = set() # Includes both primitive ops and functions
functions_to_process = [] # A subset of used_ops
def mark_op_as_used(op):
if op not in used_ops and op in name_to_function:
functions_to_process.append(name_to_function[op])
used_ops.add(op)
for node in graph_def.node:
mark_op_as_used(node.op)
while functions_to_process:
fun = functions_to_process.pop()
for node in fun.node_def:
mark_op_as_used(node.op)
return [op for op in used_ops if op not in name_to_function]
def stripped_op_list_for_graph(graph_def):
"""Collect the stripped OpDefs for ops used by a graph.
This function computes the `stripped_op_list` field of `MetaGraphDef` and
similar protos. The result can be communicated from the producer to the
consumer, which can then use the C++ function
`RemoveNewDefaultAttrsFromGraphDef` to improve forwards compatibility.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
An `OpList` of ops used by the graph.
Raises:
ValueError: If an unregistered op is used.
"""
# This is the Python equivalent of StrippedOpListForGraph in C++.
# Unfortunately, since the Python op registry can differ from that in C++, we
# can't remove the duplication using swig (at least naively).
# TODO(irving): Support taking graphs directly.
used_ops = ops_used_by_graph_def(graph_def)
# Verify that all used ops are registered.
registered_ops = op_def_registry.get_registered_ops()
# These internal ops used by functions are not registered, so we need to
# whitelist them. # TODO(irving): Do something better here.
op_whitelist = ("_Arg", "_Retval", "_ListToArray", "_ArrayToList")
for op in used_ops:
if op not in registered_ops and op not in op_whitelist:
raise ValueError("Op %s is used by the graph, but is not registered" % op)
# Build the stripped op list in sorted order
return op_def_pb2.OpList(op=[registered_ops[op] for op in sorted(used_ops)
if op in registered_ops])
def _get_kind_name(item):
"""Returns the kind name in CollectionDef.
Args:
item: A data item.
Returns:
The string representation of the kind in CollectionDef.
"""
if isinstance(item, (six.string_types, six.binary_type)):
kind = "bytes_list"
elif isinstance(item, six.integer_types):
kind = "int64_list"
elif isinstance(item, float):
kind = "float_list"
elif isinstance(item, Any):
kind = "any_list"
else:
kind = "node_list"
return kind
SAVE_AND_RESTORE_OPS = ["SaveV2",
"Save", "SaveSlice",
"LegacySave", "LegacySaveSlice",
"RestoreV2",
"Restore", "RestoreSlice",
"LegacyRestore", "LegacyRestoreSlice"]
def _op_name(tensor_name):
"""Extract the Op name from a Tensor name.
The Op name is everything before a colon, if present,
not including any ^ prefix denoting a control dependency.
Args:
tensor_name: the full name of a Tensor in the graph.
Returns:
The name of the Op of which the given Tensor is an output.
Raises:
ValueError: if tensor_name is None or empty.
"""
if not tensor_name:
raise ValueError("Tensor name cannot be empty or None.")
# Control dependency inputs start with ^.
if tensor_name.startswith("^"):
tensor_name = tensor_name[1:]
if ":" in tensor_name:
op_name, _ = tensor_name.split(":")
return op_name
return tensor_name
def _get_scope(node_name):
"""Extract the scope name from a node name.
The scope name is everything before the final slash,
not including any ^ prefix denoting a control dependency.
Args:
node_name: the full name of an Op or a Tensor in the graph.
Returns:
The deepest named scope containing the node.
Raises:
ValueError: if tensor_name is None or empty
"""
if not node_name:
raise ValueError("Node name cannot be empty or None.")
# Control dependency inputs start with ^.
if node_name.startswith("^"):
node_name = node_name[1:]
if "/" in node_name:
scope, _ = node_name.rsplit("/", 1)
return scope
return ""
def _find_extraneous_saver_nodes(graph_def, saver_def):
"""Identifies any nodes in the graph_def related to unused Savers.
This approach assumes that each Saver is cleanly isolated in its own name
scope, so we need only identify the scopes associated with extraneous Savers
and return all the nodes in those scopes.
Args:
graph_def: a GraphDef proto to evaluate.
saver_def: a SaverDef proto referencing Save/Restore ops to be retained.
Returns:
An iterable of node names that may be safely omitted.
"""
# TODO(soergel): confirm that the assumption of scope isolation is valid.
# If not, we need to walk up the graph from any restore_all nodes, and walk
# down the graph from any Save/Restore nodes. I drafted that approach too,
# but it seems unnecessarily complex given the name scope solution.
# load the graph DAG in minimal form, without initializing a full Graph object
nodes = {node_def.name:
(set([_op_name(x) for x in node_def.input]), node_def.op)
for node_def in graph_def.node}
retain_scope_save = None
retain_scope_restore = None
# It's possible to have no saver if the graph has no Variables
if saver_def is not None:
save_op_name = _op_name(saver_def.save_tensor_name)
restore_op_name = _op_name(saver_def.restore_op_name)
# The save and restore scopes should always be the same, but if they differ
# for some reason, we retain them both to be safe.
retain_scope_restore = _get_scope(restore_op_name) + "/"
retain_scope_save = _get_scope(save_op_name) + "/"
all_saver_node_names = set([name for name, (_, op) in nodes.items()
if op in SAVE_AND_RESTORE_OPS])
all_saver_scopes = (set([_get_scope(x) for x in all_saver_node_names])
- all_saver_node_names)
all_saver_scopes = set([x + "/" for x in all_saver_scopes])
extraneous_scopes = all_saver_scopes - set([retain_scope_save,
retain_scope_restore])
extraneous_node_names = set()
for name, _ in nodes.items():
for extraneous_scope in extraneous_scopes:
if name.startswith(extraneous_scope):
extraneous_node_names.add(name)
break
return extraneous_node_names
def _should_include_node(node_or_node_name, export_scope, exclude_nodes):
"""Returns `True` if a node should be included.
Args:
node_or_node_name: A node or `string` node name.
export_scope: `string`. Name scope under which to extract the subgraph. The
scope name will be stripped from the node definitions for easy import
later into new name scopes.
exclude_nodes: An iterable of nodes or `string` node names to omit from the
export, or None. Note no sanity-checking is done, so this list must be
carefully constructed to avoid producing an invalid graph.
Returns:
`True` if the node should be included.
"""
if not isinstance(node_or_node_name, six.string_types):
try:
node_name = node_or_node_name.name
except AttributeError:
# Keep the object that we don't know how to process.
return True
else:
node_name = node_or_node_name
if exclude_nodes and (node_or_node_name in exclude_nodes
or node_name in exclude_nodes):
return False
return (node_name.startswith(_UNBOUND_INPUT_PREFIX) or
(not export_scope or node_name.startswith(export_scope)))
def add_collection_def(meta_graph_def, key, graph=None,
export_scope=None, exclude_nodes=None,
override_contents=None):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
graph: The `Graph` from which to get collections.
export_scope: Optional `string`. Name scope to remove.
exclude_nodes: An iterable of nodes or `string` node names to omit from the
collection, or None.
override_contents: An iterable of values to place in the collection,
ignoring the current values (if set).
"""
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if not isinstance(key, six.string_types) and not isinstance(key, bytes):
logging.warning("Only collections with string type keys will be "
"serialized. This key has %s", type(key))
return
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
if override_contents:
collection_list = override_contents
else:
collection_list = graph.get_collection(key)
# Remove nodes that should not be exported from the collection list.
collection_list = [x for x in collection_list if
_should_include_node(x, export_scope, exclude_nodes)]
if not collection_list:
return
try:
col_def = meta_graph_def.collection_def[key]
to_proto = ops.get_to_proto_function(key)
proto_type = ops.get_collection_proto_type(key)
if to_proto:
kind = "bytes_list"
for x in collection_list:
# Additional type check to make sure the returned proto is indeed
# what we expect.
proto = to_proto(x, export_scope=export_scope)
if proto:
assert isinstance(proto, proto_type)
getattr(col_def, kind).value.append(proto.SerializeToString())
else:
kind = _get_kind_name(collection_list[0])
if kind == "node_list":
for x in collection_list:
if not export_scope or x.name.startswith(export_scope):
getattr(col_def, kind).value.append(
ops.strip_name_scope(x.name, export_scope))
elif kind == "bytes_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python3 distinguishes between bytes and strings.
getattr(col_def, kind).value.extend(
[compat.as_bytes(x) for x in collection_list])
else:
getattr(col_def, kind).value.extend([x for x in collection_list])
except Exception as e: # pylint: disable=broad-except
logging.warning("Issue encountered when serializing %s.\n"
"Type is unsupported, or the types of the items don't "
"match field type in CollectionDef. Note this is a warning "
"and probably safe to ignore.\n%s", key, str(e))
if key in meta_graph_def.collection_def:
del meta_graph_def.collection_def[key]
return
def _is_default_attr_value(op_def, attr_name, attr_value):
"""Checks if given attribute matches the default value in the op def."""
for attr_def in op_def.attr:
if attr_def.name == attr_name:
if not attr_def.HasField("default_value"):
return False
# pywrap_tensorflow.EqualAttrValueWrapper returns an empty string
# if both arguments represent an equivalent AttrValue instance.
return not pywrap_tensorflow.EqualAttrValueWrapper(
attr_value.SerializeToString(),
attr_def.default_value.SerializeToString())
return False
def strip_graph_default_valued_attrs(meta_graph_def):
"""Strips default valued attributes for node defs in given MetaGraphDef.
This method also sets `meta_info_def.stripped_default_attrs` in the given
`MetaGraphDef` proto to True.
Args:
meta_graph_def: `MetaGraphDef` protocol buffer
Returns:
None.
"""
# Map function op names to their function definitions.
op_name_to_function = {}
for function_def in meta_graph_def.graph_def.library.function:
op_name_to_function[function_def.signature.name] = function_def
# Get all registered ops.
registered_ops = op_def_registry.get_registered_ops()
def _strip_node_default_valued_attrs(node_def):
"""Removes default valued attributes from a single node def."""
if node_def.op in op_name_to_function or node_def.op not in registered_ops:
return
op_def = registered_ops[node_def.op]
attrs_to_strip = set()
for attr_name, attr_value in node_def.attr.items():
if _is_default_attr_value(op_def, attr_name, attr_value):
attrs_to_strip.add(attr_name)
for attr in attrs_to_strip:
del node_def.attr[attr]
# Process all NodeDef instances in graph_def.
for node_def in meta_graph_def.graph_def.node:
_strip_node_default_valued_attrs(node_def)
# Process all NodeDef instances in graph_def.library.function.
for function_def in meta_graph_def.graph_def.library.function:
for function_node_def in function_def.node_def:
_strip_node_default_valued_attrs(function_node_def)
# Tell consumers of this graph that default valued attrs have been stripped.
meta_graph_def.meta_info_def.stripped_default_attrs = True
def create_meta_graph_def(meta_info_def=None,
graph_def=None,
saver_def=None,
collection_list=None,
graph=None,
export_scope=None,
exclude_nodes=None,
clear_extraneous_savers=False,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Construct and returns a `MetaGraphDef` protocol buffer.
Args:
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
graph: The `Graph` to create `MetaGraphDef` out of.
export_scope: Optional `string`. Name scope to remove.
exclude_nodes: An iterable of nodes or `string` node names to omit from all
collection, or None.
clear_extraneous_savers: Remove any preexisting SaverDefs from the SAVERS
collection. Note this method does not alter the graph, so any
extraneous Save/Restore ops should have been removed already, as needed.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
MetaGraphDef protocol buffer.
Raises:
TypeError: If the arguments are not of the correct proto buffer type.
"""
# pylint: enable=line-too-long
# Type check.
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if meta_info_def and not isinstance(meta_info_def,
meta_graph_pb2.MetaGraphDef.MetaInfoDef):
raise TypeError("meta_info_def must be of type MetaInfoDef, not %s",
type(meta_info_def))
if graph_def and not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be of type GraphDef, not %s",
type(graph_def))
if saver_def and not isinstance(saver_def, saver_pb2.SaverDef):
raise TypeError("saver_def must be of type SaverDef, not %s",
type(saver_def))
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Creates a MetaGraphDef proto.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Adds meta_info_def.
if not meta_info_def:
meta_info_def = meta_graph_pb2.MetaGraphDef.MetaInfoDef()
# Set the tf version strings to the current tf build.
meta_info_def.tensorflow_version = versions.__version__
meta_info_def.tensorflow_git_version = versions.__git_version__
meta_graph_def.meta_info_def.MergeFrom(meta_info_def)
# Adds graph_def or the default.
if not graph_def:
meta_graph_def.graph_def.MergeFrom(graph.as_graph_def(add_shapes=True))
else:
meta_graph_def.graph_def.MergeFrom(graph_def)
# Fills in meta_info_def.stripped_op_list using the ops from graph_def.
# pylint: disable=g-explicit-length-test
if len(meta_graph_def.meta_info_def.stripped_op_list.op) == 0:
meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(
stripped_op_list_for_graph(meta_graph_def.graph_def))
# pylint: enable=g-explicit-length-test
# Strip default valued attributes in graph_def.
if strip_default_attrs:
strip_graph_default_valued_attrs(meta_graph_def)
# Adds saver_def.
if saver_def:
meta_graph_def.saver_def.MergeFrom(saver_def)
# Adds collection_list.
if collection_list is not None:
clist = collection_list
else:
clist = graph.get_all_collection_keys()
for ctype in clist:
if clear_extraneous_savers and ctype == ops.GraphKeys.SAVERS:
# Avoid importing Saver here
from_proto = ops.get_from_proto_function(ctype)
add_collection_def(meta_graph_def, ctype,
graph=graph,
export_scope=export_scope,
exclude_nodes=exclude_nodes,
override_contents=[from_proto(saver_def)])
else:
add_collection_def(meta_graph_def, ctype,
graph=graph,
export_scope=export_scope,
exclude_nodes=exclude_nodes)
return meta_graph_def
def read_meta_graph_file(filename):
"""Reads a file containing `MetaGraphDef` and returns the protocol buffer.
Args:
filename: `meta_graph_def` filename including the path.
Returns:
A `MetaGraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
meta_graph_def = meta_graph_pb2.MetaGraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.FileIO(filename, "rb").read()
try:
meta_graph_def.ParseFromString(file_content)
return meta_graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content.decode("utf-8"), meta_graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return meta_graph_def
def import_scoped_meta_graph(meta_graph_or_file,
clear_devices=False,
graph=None,
import_scope=None,
input_map=None,
unbound_inputs_col_name="unbound_inputs",
restore_collections_predicate=(lambda key: True)):
"""Recreates a `Graph` saved in a `MetaGraphDef` proto.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates the desired collections, and returns a dictionary of
all the Variables imported into the name scope.
In combination with `export_scoped_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
from graph_def. Default false.
graph: The `Graph` to import into. If `None`, use the default graph.
import_scope: Optional `string`. Name scope into which to import the
subgraph. If `None`, the graph is imported to the root name scope.
input_map: A dictionary mapping input names (as strings) in `graph_def` to
`Tensor` objects. The values of the named input tensors in the imported
graph will be re-mapped to the respective `Tensor` values.
unbound_inputs_col_name: Collection name for looking up unbound inputs.
restore_collections_predicate: a predicate on collection names. A collection
named c (i.e whose key is c) will be restored iff
1) `restore_collections_predicate(c)` is True, and
2) `c != unbound_inputs_col_name`.
Returns:
A dictionary of all the `Variables` imported into the name scope.
Raises:
ValueError: If the graph_def contains unbound inputs.
"""
return import_scoped_meta_graph_with_return_elements(
meta_graph_or_file, clear_devices, graph, import_scope, input_map,
unbound_inputs_col_name, restore_collections_predicate)[0]
def import_scoped_meta_graph_with_return_elements(
meta_graph_or_file,
clear_devices=False,
graph=None,
import_scope=None,
input_map=None,
unbound_inputs_col_name="unbound_inputs",
restore_collections_predicate=(lambda key: True),
return_elements=None):
"""Imports graph from `MetaGraphDef` and returns vars and return elements.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates the desired collections, and returns a dictionary of
all the Variables imported into the name scope.
In combination with `export_scoped_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
from graph_def. Default false.
graph: The `Graph` to import into. If `None`, use the default graph.
import_scope: Optional `string`. Name scope into which to import the
subgraph. If `None`, the graph is imported to the root name scope.
input_map: A dictionary mapping input names (as strings) in `graph_def` to
`Tensor` objects. The values of the named input tensors in the imported
graph will be re-mapped to the respective `Tensor` values.
unbound_inputs_col_name: Collection name for looking up unbound inputs.
restore_collections_predicate: a predicate on collection names. A collection
named c (i.e whose key is c) will be restored iff
1) `restore_collections_predicate(c)` is True, and
2) `c != unbound_inputs_col_name`.
return_elements: A list of strings containing operation names in the
`MetaGraphDef` that will be returned as `Operation` objects; and/or
tensor names in `MetaGraphDef` that will be returned as `Tensor` objects.
Returns:
A tuple of (
dictionary of all the `Variables` imported into the name scope,
list of `Operation` or `Tensor` objects from the `return_elements` list).
Raises:
ValueError: If the graph_def contains unbound inputs.
"""
if context.executing_eagerly():
raise ValueError("Exporting/importing meta graphs is not supported when "
"eager execution is enabled.")
if isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
meta_graph_def = meta_graph_or_file
else:
meta_graph_def = read_meta_graph_file(meta_graph_or_file)
if unbound_inputs_col_name:
for key, col_def in meta_graph_def.collection_def.items():
if key == unbound_inputs_col_name:
kind = col_def.WhichOneof("kind")
field = getattr(col_def, kind)
if field.value and (
not input_map or
sorted([compat.as_str(v) for v in field.value]) !=
sorted(input_map)):
raise ValueError("Graph contains unbound inputs: %s. Must "
"provide these inputs through input_map." %
",".join([compat.as_str(v) for v in field.value
if not input_map or v not in input_map]))
break
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Gathers the list of nodes we are interested in.
with graph.as_default():
producer_op_list = None
if meta_graph_def.meta_info_def.HasField("stripped_op_list"):
producer_op_list = meta_graph_def.meta_info_def.stripped_op_list
input_graph_def = meta_graph_def.graph_def
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ""
scope_to_prepend_to_names = graph.unique_name(
import_scope or "", mark_as_used=False)
imported_return_elements = importer.import_graph_def(
input_graph_def,
name=(import_scope or scope_to_prepend_to_names),
input_map=input_map,
producer_op_list=producer_op_list,
return_elements=return_elements)
# TensorFlow versions before 1.9 (not inclusive) exported SavedModels
# without a VariableDef.trainable field set.
tf_version = meta_graph_def.meta_info_def.tensorflow_version
if not tf_version:
variables_have_trainable = True
else:
variables_have_trainable = (
distutils_version.LooseVersion(tf_version)
>= distutils_version.LooseVersion("1.9"))
# Sort collections so we see TRAINABLE_VARIABLES first and can default these
# variables to trainable if the value is not set in their VariableDef.
sorted_collections = []
if ops.GraphKeys.TRAINABLE_VARIABLES in meta_graph_def.collection_def:
sorted_collections.append(
(ops.GraphKeys.TRAINABLE_VARIABLES,
meta_graph_def.collection_def[ops.GraphKeys.TRAINABLE_VARIABLES]))
for key, value in sorted(meta_graph_def.collection_def.items()):
if key != ops.GraphKeys.TRAINABLE_VARIABLES:
sorted_collections.append((key, value))
# Restores all the other collections.
variable_objects = {}
for key, col_def in sorted_collections:
# Don't add unbound_inputs to the new graph.
if key == unbound_inputs_col_name:
continue
if not restore_collections_predicate(key):
continue
kind = col_def.WhichOneof("kind")
if kind is None:
logging.error("Cannot identify data type for collection %s. Skipping.",
key)
continue
from_proto = ops.get_from_proto_function(key)
# Temporary change to allow the TFMA evaluator to read metric variables
# saved as a bytes list.
# TODO(kathywu): Remove this hack once cl/248406059 has been submitted.
if key == ops.GraphKeys.METRIC_VARIABLES:
# Metric variables will use the same proto functions as GLOBAL_VARIABLES
from_proto = ops.get_from_proto_function(ops.GraphKeys.GLOBAL_VARIABLES)
if from_proto and kind == "bytes_list":
proto_type = ops.get_collection_proto_type(key)
if key in ops.GraphKeys._VARIABLE_COLLECTIONS: # pylint: disable=protected-access
for value in col_def.bytes_list.value:
variable = variable_objects.get(value, None)
if variable is None:
proto = proto_type()
proto.ParseFromString(value)
if not variables_have_trainable:
# If the VariableDef proto does not contain a "trainable"
# property because it was exported before that property was
# added, we default it to whether the variable is in the
# TRAINABLE_VARIABLES collection. We've sorted
# TRAINABLE_VARIABLES to be first, so trainable variables will
# be created from that collection.
proto.trainable = (key == ops.GraphKeys.TRAINABLE_VARIABLES)
variable = from_proto(
proto, import_scope=scope_to_prepend_to_names)
variable_objects[value] = variable
graph.add_to_collection(key, variable)
else:
for value in col_def.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
graph.add_to_collection(
key, from_proto(
proto, import_scope=scope_to_prepend_to_names))
else:
field = getattr(col_def, kind)
if key in _COMPAT_COLLECTION_LIST:
logging.warning(
"The saved meta_graph is possibly from an older release:\n"
"'%s' collection should be of type 'byte_list', but instead "
"is of type '%s'.", key, kind)
if kind == "node_list":
for value in field.value:
col_op = graph.as_graph_element(
ops.prepend_name_scope(value, scope_to_prepend_to_names))
graph.add_to_collection(key, col_op)
elif kind == "int64_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python2 distinguishes between int and long, while Python3 has
# only int.
for value in field.value:
graph.add_to_collection(key, int(value))
else:
for value in field.value:
graph.add_to_collection(
key, ops.prepend_name_scope(value, scope_to_prepend_to_names))
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=scope_to_prepend_to_names)
for v in variables:
var_list[ops.strip_name_scope(v.name, scope_to_prepend_to_names)] = v
return var_list, imported_return_elements
def export_scoped_meta_graph(filename=None,
graph_def=None,
graph=None,
export_scope=None,
as_text=False,
unbound_inputs_col_name="unbound_inputs",
clear_devices=False,
saver_def=None,
clear_extraneous_savers=False,
strip_default_attrs=False,
save_debug_info=False,
**kwargs):
"""Returns `MetaGraphDef` proto. Optionally writes it to filename.
This function exports the graph, saver, and collection objects into
`MetaGraphDef` protocol buffer with the intention of it being imported
at a later time or location to restart training, run inference, or be
a subgraph.
Args:
filename: Optional filename including the path for writing the
generated `MetaGraphDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
graph: The `Graph` to export. If `None`, use the default graph.
export_scope: Optional `string`. Name scope under which to extract
the subgraph. The scope name will be stripped from the node definitions
for easy import later into new name scopes. If `None`, the whole graph
is exported.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
unbound_inputs_col_name: Optional `string`. If provided, a string collection
with the given name will be added to the returned `MetaGraphDef`,
containing the names of tensors that must be remapped when importing the
`MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
before exporting the graph.
saver_def: `SaverDef` protocol buffer.
clear_extraneous_savers: Remove any Saver-related information from the
graph (both Save/Restore ops and SaverDefs) that are not associated
with the provided SaverDef.
strip_default_attrs: Set to true if default valued attributes must be
removed while exporting the GraphDef.
save_debug_info: If `True`, save the GraphDebugInfo to a separate file,
which in the same directory of filename and with `_debug` added before the
file extension.
**kwargs: Optional keyed arguments, including meta_info_def and
collection_list.
Returns:
A `MetaGraphDef` proto and dictionary of `Variables` in the exported
name scope.
Raises:
ValueError: When the `GraphDef` is larger than 2GB.
ValueError: When executing in Eager mode and either `graph_def` or `graph`
is undefined.
"""
if context.executing_eagerly() and not (graph_def is not None and
graph is not None):
raise ValueError("Exporting/importing meta graphs is not supported when "
"Eager Execution is enabled.")
graph = graph or ops.get_default_graph()
exclude_nodes = None
unbound_inputs = []
if export_scope or clear_extraneous_savers or clear_devices:
if graph_def:
new_graph_def = graph_pb2.GraphDef()
new_graph_def.versions.CopyFrom(graph_def.versions)
new_graph_def.library.CopyFrom(graph_def.library)
if clear_extraneous_savers:
exclude_nodes = _find_extraneous_saver_nodes(graph_def, saver_def)
for node_def in graph_def.node:
if _should_include_node(node_def.name, export_scope, exclude_nodes):
new_node_def = _node_def(node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
new_graph_def.node.extend([new_node_def])
graph_def = new_graph_def
else:
# Only do this complicated work if we want to remove a name scope.
graph_def = graph_pb2.GraphDef()
# pylint: disable=protected-access
graph_def.versions.CopyFrom(graph.graph_def_versions)
bytesize = 0
if clear_extraneous_savers:
exclude_nodes = _find_extraneous_saver_nodes(graph.as_graph_def(),
saver_def)
for key in sorted(graph._nodes_by_id):
if _should_include_node(graph._nodes_by_id[key].name,
export_scope,
exclude_nodes):
value = graph._nodes_by_id[key]
# pylint: enable=protected-access
node_def = _node_def(value.node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
graph_def.node.extend([node_def])
if value.outputs:
assert "_output_shapes" not in graph_def.node[-1].attr
graph_def.node[-1].attr["_output_shapes"].list.shape.extend([
output.get_shape().as_proto() for output in value.outputs])
bytesize += value.node_def.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph._copy_functions_to_graph_def(graph_def, bytesize) # pylint: disable=protected-access
# It's possible that not all the inputs are in the export_scope.
# If we would like such information included in the exported meta_graph,
# add them to a special unbound_inputs collection.
if unbound_inputs_col_name:
# Clears the unbound_inputs collections.
graph.clear_collection(unbound_inputs_col_name)
for k in unbound_inputs:
graph.add_to_collection(unbound_inputs_col_name, k)
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=export_scope)
for v in variables:
if _should_include_node(v, export_scope, exclude_nodes):
var_list[ops.strip_name_scope(v.name, export_scope)] = v
scoped_meta_graph_def = create_meta_graph_def(
graph_def=graph_def,
graph=graph,
export_scope=export_scope,
exclude_nodes=exclude_nodes,
clear_extraneous_savers=clear_extraneous_savers,
saver_def=saver_def,
strip_default_attrs=strip_default_attrs,
**kwargs)
if filename:
graph_io.write_graph(
scoped_meta_graph_def,
os.path.dirname(filename),
os.path.basename(filename),
as_text=as_text)
if save_debug_info:
name, _ = os.path.splitext(filename)
debug_filename = "{name}{ext}".format(name=name, ext=".debug")
# Gets the operation from the graph by the name. Exludes variable nodes,
# so only the nodes in the frozen models are included.
# TODO(liufengdb): fix this for functions.
ops_to_export = []
for node in scoped_meta_graph_def.graph_def.node:
scoped_op_name = ops.prepend_name_scope(node.name, export_scope)
ops_to_export.append(("", graph.get_operation_by_name(scoped_op_name)))
graph_debug_info = error_interpolation.create_graph_debug_info_def(
ops_to_export)
graph_io.write_graph(
graph_debug_info,
os.path.dirname(debug_filename),
os.path.basename(debug_filename),
as_text=as_text)
return scoped_meta_graph_def, var_list
def copy_scoped_meta_graph(from_scope, to_scope,
from_graph=None, to_graph=None):
"""Copies a sub-meta_graph from one scope to another.
Args:
from_scope: `String` name scope containing the subgraph to be copied.
to_scope: `String` name scope under which the copied subgraph will reside.
from_graph: Optional `Graph` from which to copy the subgraph. If `None`, the
default graph is use.
to_graph: Optional `Graph` to which to copy the subgraph. If `None`, the
default graph is used.
Returns:
A dictionary of `Variables` that has been copied into `to_scope`.
Raises:
ValueError: If `from_scope` and `to_scope` are the same while
`from_graph` and `to_graph` are also the same.
"""
from_graph = from_graph or ops.get_default_graph()
to_graph = to_graph or ops.get_default_graph()
if from_graph == to_graph and from_scope == to_scope:
raise ValueError("'from_scope' and 'to_scope' need to be different "
"when performing copy in the same graph.")
orig_meta_graph, var_list = export_scoped_meta_graph(
export_scope=from_scope, graph=from_graph)
var_list = import_scoped_meta_graph(orig_meta_graph,
graph=to_graph,
import_scope=to_scope)
return var_list
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/meta_graph.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FuncGraph and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as py_collections
import itertools
import weakref
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.framework.auto_control_deps import AutomaticControlDependencies
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.lazy_loader import LazyLoader
# This is to avoid a circular dependency:
# function -> func_graph
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
def_function = LazyLoader(
"def_function", globals(),
"tensorflow.python.eager.def_function")
WHITELIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY, # pylint: disable=protected-access
variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access
]
_EAGER_CONST_THRESHOLD = 128
class UnknownArgument(object):
"""Signifies an argument which is not currently handled."""
pass
def convert_structure_to_signature(structure, arg_names=None):
"""Convert a potentially nested structure to a signature.
Args:
structure: Structure to convert, where top level collection is a list or a
tuple.
arg_names: Optional list of arguments that has equal number of elements as
`structure` and is used for naming corresponding TensorSpecs.
Returns:
Identical structure that has TensorSpec objects instead of Tensors and
UknownArgument instead of any unsupported types.
"""
def encode_arg(arg, path):
"""A representation for this argument, for converting into signatures."""
if isinstance(arg, ops.Tensor):
user_specified_name = None
try:
user_specified_name = compat.as_str(
arg.op.get_attr("_user_specified_name"))
except ValueError:
pass
if path and user_specified_name and user_specified_name != path[0]:
# The user has explicitly named the argument differently than the name
# of the function argument.
name = user_specified_name
else:
name = "/".join([str(p) for p in path])
return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)
if isinstance(arg, composite_tensor.CompositeTensor):
# TODO(b/133606651) Do we need to inject arg_name?
return arg._type_spec # pylint: disable=protected-access
if isinstance(arg, (
int,
float,
bool,
type(None),
dtypes.DType,
tensor_spec.TensorSpec,
type_spec.TypeSpec,
)):
return arg
return UnknownArgument()
# We are using the flattened paths to name the TensorSpecs. We need an
# explicit name for them downstream.
flattened = nest.flatten_with_tuple_paths(structure)
if arg_names:
if len(arg_names) != len(structure):
raise ValueError(
"Passed in arg_names don't match actual signature (%s)." % arg_names)
# Replace all top-level names with their actual arg_names. If a path before
# was "(2,'a',1)", it will become "(arg_names[2],'a',1)".
flattened = [
((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened
]
mapped = [encode_arg(arg, path) for path, arg in flattened]
return nest.pack_sequence_as(structure, mapped)
class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
control_outputs: Operations that must be executed before the function
represented by this graph can be said to have been executed.
structured_input_signature: A tuple of (args, kwargs), which are both
possibly-nested python objects that were received by this function. Note
that these structures might contain Python `None`s.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
control_captures: Set of external ops on which this graph has a control
dependency.
seed: The graph-level random seed.
capture_by_value: If True, the func graph will capture Variables by value
instead of reference.
"""
def __init__(self, name, collections=None, capture_by_value=None):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, and distribution
strategy stack from the current context or graph.
Args:
name: the name of the function.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write
to) the outer graph's collections that are not whitelisted, and both
read and write to the outer graph's collections that are whitelisted.
The current whitelisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will
capture Variables by value instead of reference. By default inherit
from outer graphs, and failing that will default to False.
"""
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.control_outputs = []
self.control_captures = set()
self.structured_input_signature = None
self.structured_outputs = None
self._weak_variables = []
self._watched_variables = object_identity.ObjectIdentityWeakSet()
self.outer_graph = ops.get_default_graph()
self._captures = py_collections.OrderedDict()
# If not None, records the names of output args of this function. Used to
# preserve the output names in the signature of a serialized+deserialized
# function. Private at the moment mostly because it's often out of date.
self._output_names = None
# Maps arbitrary key -> (closure, nest of placeholders), where at function
# call time the value of closure() will be used to feed the nest of
# placeholders.
self._deferred_captures = py_collections.OrderedDict()
# Inherit capture-by-value from outer graph.
if capture_by_value is not None:
self.capture_by_value = capture_by_value
elif self.outer_graph is not None and isinstance(
self.outer_graph, FuncGraph):
self.capture_by_value = self.outer_graph.capture_by_value
else:
self.capture_by_value = False
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
# [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of
# any None op_seed for random_op in the function, in which case we end up
# using function seed, which could be unintended behavior for the op.
self._seed_used = False
else:
self.seed = graph.seed
self._seed_used = False
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
if collections is None:
for collection_name in graph.get_all_collection_keys():
if collection_name not in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
else:
self._collections = collections
# Keep track of whether this FuncGraph is exportable to SavedModel. Use
# `graph.mark_as_unsaveable(reason)` to mark this FuncGraph and any
# dependent functions as unsaveable.
self._saveable = True
self._saving_errors = set()
def __str__(self):
return "FuncGraph(name=%s, id=%s)" % (self.name, id(self))
def watch_variable(self, v):
"""Marks the variable v as accessed while building this graph."""
while self is not None and isinstance(self, FuncGraph):
self._watched_variables.add(v)
self = self.outer_graph
def capture_call_time_value(self, closure, spec, key=None):
"""Creates a placeholder which at call time has the value closure().
Useful, for example, to respect TensorFlow context managers, which are often
dynamically scoped.
Args:
closure: function which takes no arguments, to be evaluated at function
call time, returning a nest of tensors compatible with `spec`.
spec: nest of TypeSpec for the value to capture.
key: optional. If not None, multiple calls to lazy_capture with the same
key in the same graph will return the same placeholder, and the
first closure will be used at function call time.
Returns:
Nest of placeholders which, at function call time, will be fed with the
result of calling closure().
Raises:
ValueError: at function call time, if the return value of closure() is
not compatible with `spec`.
"""
if key is None:
key = object()
if key not in self._deferred_captures:
def convert_to_placeholder(s):
if not isinstance(s, tensor_spec.TensorSpec):
raise TypeError(
"Expected a nest of `TypeSpec` objects, found %s of type %s." %
(s, type(s)))
return array_ops.placeholder(dtype=s.dtype, shape=s.shape)
placeholder = nest.map_structure(
convert_to_placeholder, spec, expand_composites=True)
def wrapped_closure():
ret_nest = closure()
nest.assert_same_structure(spec, ret_nest, expand_composites=True)
# This uses the tensor dtype defined in `spec` when converting values
# in `ret_nest` to tensors.
# pylint: disable=protected-access
y = nest.map_structure(lambda s, r: s._to_components(r), spec, ret_nest,
expand_composites=False)
# pylint: enable=protected-access
return nest.flatten(y, expand_composites=True)
self._deferred_captures[key] = (wrapped_closure, placeholder)
return self._deferred_captures[key][1]
def control_dependencies(self, control_inputs):
"""Handles control dependencies.
FuncGraph wraps Graph's control_dependencies logic by first filtering out
any external tensors / operations and storing them in the graph's
control_captures member. Any consumers of this function graph must then
decide how to handle the control captures.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return super(FuncGraph, self).control_dependencies(control_inputs)
filtered_control_inputs = []
for c in control_inputs:
# Check for _UnreadVariable
if (isinstance(c, ops.IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
graph_element = ops._as_graph_element(c) # pylint: disable=protected-access
if graph_element is None:
graph_element = c
if graph_element is not None and getattr(
graph_element, "graph", None) is not self:
self.control_captures.add(graph_element)
else:
filtered_control_inputs.append(graph_element)
return super(FuncGraph, self).control_dependencies(filtered_control_inputs)
def as_default(self):
outer_cm = super(FuncGraph, self).as_default()
@tf_contextlib.contextmanager
def inner_cm():
"""Context manager for copying distribute.Strategy scope information."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on
# inheriting this stack from the default graph even in eager mode. Maybe
# it should be part of the eager context? This would also allow us to
# remove a get_default_graph() call from the function cache lookup.
old_strategy_stack = self._distribution_strategy_stack
self._distribution_strategy_stack = list(
graph._distribution_strategy_stack)
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
# so that the same trace of the Python function may be placed on several
# different devices and saved functions may be placed on new devices when
# restored.
old_device_stack = self._device_function_stack
if context.executing_eagerly():
if self._distribution_strategy_stack:
self._device_function_stack = self._device_function_stack.copy()
self._add_device_to_stack(context.context().device_name)
else:
if (self._distribution_strategy_stack
or device_stack_has_callable(graph._device_function_stack)):
# Hard-code devices from device functions in the function body
self._device_function_stack = graph._device_function_stack.copy()
old_creator_stack = self._variable_creator_stack
self._variable_creator_stack = graph._variable_creator_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
old_graph_key = self._graph_key
self._graph_key = graph._graph_key
# Inherit the auto_cast_variable_read_dtype, since this should not change
# inside a function.
old_auto_cast_var_read_dtype = self._auto_cast_variable_read_dtype
self._auto_cast_variable_read_dtype = graph._auto_cast_variable_read_dtype
# pylint: enable=protected-access
with outer_cm as g:
try:
yield g
finally:
self._distribution_strategy_stack = old_strategy_stack
self._device_function_stack = old_device_stack
self._variable_creator_stack = old_creator_stack
self._graph_key = old_graph_key
self._auto_cast_variable_read_dtype = old_auto_cast_var_read_dtype
return inner_cm()
@property
def output_types(self):
return [t.dtype for t in self.outputs]
@property
def output_shapes(self):
return [t.shape for t in self.outputs]
@property
def variables(self):
"""A list of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Yields:
Strong references to variables accessed by this FuncGraph.
"""
for weak_v in self._weak_variables:
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
yield v
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def _capture_by_value(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
# When capturing by value, do the read outside
reverse_captures = dict((id(v), k) for k, v in self.captures)
uncaptured_inputs = [reverse_captures.get(id(t), t) for t in inputs]
with ops.init_scope():
if context.executing_eagerly():
attr_list = ("dtype", int(attrs["dtype"].type))
value, = execute.execute(
compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
context.context())
else:
op = ops.get_default_graph()._create_op_internal( # pylint: disable=protected-access
op_type,
uncaptured_inputs,
dtypes,
input_types,
name,
attrs,
op_def,
compute_device)
value = op.outputs[0]
captured_value = self.capture(value)
return captured_value.op
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
del compute_shapes
if self.capture_by_value and op_type in ["ReadVariableOp",
"ResourceGather"]:
return self._capture_by_value(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
for i, inp in enumerate(inputs):
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
inputs[i] = inp
return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_device)
def capture(self, tensor, name=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
Returns:
Tensor from this FuncGraph.
Raises:
InaccessibleTensorError: if any tensors are accessed in a manner that
bypasses the mechanisms required for the data dependencies to be correctly
wired.
"""
# Note: _forward_func_graph is currently only set when building the gradient
# graph graph of a defun call. If the backwards graph tries to capture
# tensors those will be captured first in the forward graph. This
# makes sure that any tensor needed by a custom_gradient is correctly
# captured.
if (getattr(tensor, "graph", None) is not self and
hasattr(self, "_forward_func_graph") and
isinstance(self._forward_func_graph, FuncGraph)):
tensor = self._forward_func_graph.capture(tensor)
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
# Small EagerTensors are captured with Const ops
if (tensor.dtype in dtypes.TF_VALUE_DTYPES and
np.prod(tensor.shape) <= _EAGER_CONST_THRESHOLD):
return self.capture_eager_tensor(tensor, name)
# Large EagerTensors and resources are captured with Placeholder ops
return self._capture_helper(tensor, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
inner_graph = tensor.graph
while inner_graph is not None and isinstance(inner_graph, FuncGraph):
if inner_graph is self:
raise errors.InaccessibleTensorError(
"The tensor '%s' cannot be accessed here: it is defined"
" in another function or code block. Use return values,"
" explicit Python locals or TensorFlow collections to access"
" it. Defined in: %s; accessed from: %s.\n"
% (tensor, tensor.graph, self))
inner_graph = inner_graph.outer_graph
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name):
capture = self._captures.get(ops.tensor_id(tensor))
if capture is None:
placeholder = _create_substitute_placeholder(
tensor, name=name, dtype=tensor.dtype)
self.add_capture(tensor, placeholder)
else:
placeholder = capture[1]
tape.record_operation("captured_value", [placeholder], [tensor],
lambda x: [x])
return placeholder
@property
def captures(self):
"""Order list of tuples containing external and internal captures."""
return self._captures.values()
def add_capture(self, tensor, placeholder):
"""Capture a specific tensor and utilize the provided placeholder.
Args:
tensor: Tensor to captures.
placeholder: Provided placeholder for the tensor.
"""
self._captures[ops.tensor_id(tensor)] = (tensor, placeholder)
self.inputs.append(placeholder)
def reset_captures(self, capture_list):
"""Set the captures with the provided list of captures & placeholder."""
self._captures = py_collections.OrderedDict()
for tensor, placeholder in capture_list:
self._captures[ops.tensor_id(tensor)] = (tensor, placeholder)
def pop_capture(self, tensor):
"""Remove the capture and return the generated placeholder."""
capture = self._captures.pop(ops.tensor_id(tensor), None)
if capture is None:
return None
return capture[1]
def clear_captures(self):
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while self._captures:
self._captures.popitem()
memory.dismantle_ordered_dict(self._captures)
while self._deferred_captures:
self._deferred_captures.popitem()
memory.dismantle_ordered_dict(self._deferred_captures)
def capture_distributed_variable(self, variable, placeholder):
"""Add given distributed variable to captures with given placeholder."""
self._captures[ops.tensor_id(variable)] = (variable, placeholder)
tape.record_operation("captured_value", [placeholder], [variable],
lambda x: [x])
def capture_eager_tensor(self, tensor, name):
capture = self._captures.get(ops.tensor_id(tensor))
if capture is None:
# We clear all control dependencies and place the Const op on the same
# device as the source tensor. The device placement may be relaxed at
# a later date.
with ops.control_dependencies(None), self.device(tensor.device):
graph_const = constant_op.constant(tensor.numpy(), dtype=tensor.dtype,
shape=tensor.shape, name=name)
self.add_capture(tensor, graph_const)
else:
graph_const = capture[1]
tape.record_operation("captured_value", [graph_const], [tensor],
lambda x: [x])
return graph_const
@property
def external_captures(self):
"""External tensors captured by this function."""
return [c[0] for c in self._captures.values()]
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return [c[1] for c in self._captures.values()]
@property
def deferred_external_captures(self):
"""Ordered nest of tensors whose placeholders will be fed at call time."""
return [c[0] for c in self._deferred_captures.values()]
@property
def deferred_internal_captures(self):
"""List of nest of placeholders which at call time will be fed."""
return [c[1] for c in self._deferred_captures.values()]
@property
def variable_captures(self):
"""Map of tensor ids of variable handles to variables which are captured."""
return {
ops.tensor_id(self._captures[ops.tensor_id(v.handle)][1]): v
for v in self.variables
if ops.tensor_id(v.handle) in self._captures
}
def mark_as_unsaveable(self, error_message):
"""Marks this FuncGraph as unsaveable.
Any attempts to export this FuncGraph will raise an error with the specified
message.
Args:
error_message: List or string containing the error message to be raised
when saving this FuncGraph to SavedModel.
"""
self._saveable = False
if isinstance(error_message, str):
error_message = [error_message]
self._saving_errors.update(error_message)
@property
def saveable(self):
"""Returns whether this FuncGraph is saveable."""
return self._saveable
@property
def saving_errors(self):
"""Returns set of errors preventing this FuncGraph from being saved."""
return self._saving_errors
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
autograph=False,
autograph_options=None,
add_control_dependencies=True,
arg_names=None,
op_return_value=None,
collections=None,
capture_by_value=None,
override_flat_arg_shapes=None):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwargs: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwargs` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
func_graph: Optional. An instance of FuncGraph. If provided, we will use
this graph else a new one is built and returned.
autograph: whether to use autograph to compile `python_func`.
See https://www.tensorflow.org/guide/autograph for more information.
autograph_options: additional knobs to control when `autograph=True`.
See https://www.tensorflow.org/guide/autograph for more information.
add_control_dependencies: If True, automatically adds control dependencies
to ensure program order matches execution order and stateful ops always
execute.
arg_names: Optional list of argument names, used to give input placeholders
recognizable names.
op_return_value: Optional. A Tensor. If set and `python_func` returns
Operations, those return values will be replaced with this value. If not
set, returning an Operation triggers an error.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write to)
the outer graph's collections that are not whitelisted, and both
read and write to the outer graph's collections that are whitelisted.
The current whitelisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will capture
Variables by value instead of reference. By default inherit from outer
graphs, and failing that will default to False.
override_flat_arg_shapes: An optional list of instances that are either
`None` or `TensorShape`. The length must match that of
`nest.flatten((args, kwargs), expand_composites=True)`. The entries
containing value `None` must match entries in flattened arguments
containing non-tensors, while entries containing a `TensorShape` must
match entries in the flattened arguments containing tensors.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None` nor a
`Tensor`.
ValueError: If both `signature` and `override_flat_arg_shapes` are
passed in.
"""
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name, collections=collections,
capture_by_value=capture_by_value)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
control_manager = AutomaticControlDependencies()
else:
control_manager = ops.NullContextmanager()
with func_graph.as_default(), control_manager as a:
current_scope = variable_scope.get_variable_scope()
default_use_recource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None and override_flat_arg_shapes is not None:
raise ValueError(
"Passed both signature and override_flat_arg_shapes: %s and %s."
% (signature, override_flat_arg_shapes))
if signature is not None:
args = signature
kwargs = {}
# Creates and names placeholders for all arguments.
if override_flat_arg_shapes is not None:
flat_args = nest.flatten(args, expand_composites=True)
arg_shapes = override_flat_arg_shapes[:len(flat_args)]
kwarg_shapes = override_flat_arg_shapes[len(flat_args):]
else:
arg_shapes = None
kwarg_shapes = None
func_args = _get_defun_inputs_from_args(
args, arg_names, flat_shapes=arg_shapes)
func_kwargs = _get_defun_inputs_from_kwargs(
kwargs, flat_shapes=kwarg_shapes)
# Convert all Tensors into TensorSpecs before saving the structured inputs.
# If storing pure concrete functions that are not called through polymorphic
# functions, we don't have access to FunctionSpec, so we need to call the
# TensorSpecs by their `arg_names` for later binding.
func_graph.structured_input_signature = (
convert_structure_to_signature(func_args, arg_names),
convert_structure_to_signature(func_kwargs))
flat_func_args = nest.flatten(func_args, expand_composites=True)
flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)
# Temporarily set inputs to allow graph building code to inspect
# them. Reassigned below.
func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs
if isinstance(arg, ops.Tensor)]
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, flat_func_args,
expand_composites=True)
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, flat_func_kwargs, expand_composites=True)
def convert(x):
"""Converts a function output to a Tensor."""
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# this won't work if x needs to be captured (i.e. if python_func returns
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_composite(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.contrib.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
if add_control_dependencies:
x = a.mark_as_return(x)
return x
try:
if autograph:
from tensorflow.python import autograph # pylint: disable=g-import-not-at-top
_, original_func = tf_decorator.unwrap(python_func)
def wrapper(*args, **kwargs):
"""Calls a converted version of original_func."""
# TODO(mdan): Push this block higher in tf.function's call stack.
try:
return autograph.converted_call(
original_func,
autograph.ConversionOptions(
recursive=True,
optional_features=autograph_options,
user_requested=True,
), args, kwargs)
except Exception as e: # pylint:disable=broad-except
if hasattr(e, "ag_error_metadata"):
raise e.ag_error_metadata.to_exception(e)
else:
raise
# Wrapping around a decorator allows checks like tf_inspect.getargspec
# to be accurate.
converted_func = tf_decorator.make_decorator(original_func, wrapper)
python_func = tf_decorator.rewrap(python_func, original_func,
converted_func)
func_outputs = python_func(*func_args, **func_kwargs)
# invariant: `func_outputs` contains only Tensors, CompositeTensors,
# TensorArrays and `None`s.
func_outputs = nest.map_structure(convert, func_outputs,
expand_composites=True)
check_mutation(func_args_before, func_args)
check_mutation(func_kwargs_before, func_kwargs)
finally:
current_scope.set_use_resource(default_use_recource)
# Variables in `func_args`, `func_kwargs` should be explicit inputs
# to the function, not captured inputs.
graph_variables = list(func_graph._watched_variables) # pylint: disable=protected-access
arg_variables = object_identity.ObjectIdentitySet()
inputs = []
for arg in (nest.flatten(func_args, expand_composites=True) +
nest.flatten(func_kwargs, expand_composites=True)):
if isinstance(arg, resource_variable_ops.BaseResourceVariable):
# Even if an argument variable was not used in the function, we've
# already manually captured the resource Tensor when creating argument
# placeholders.
resource_placeholder = func_graph.pop_capture(arg.handle)
if resource_placeholder is None:
continue
arg_variables.add(arg)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in graph_variables if v not in arg_variables]
func_graph.inputs = (
inputs + func_graph.internal_captures + nest.flatten(
func_graph.deferred_internal_captures, expand_composites=True))
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
if add_control_dependencies:
func_graph.control_outputs.extend(control_manager.ops_which_must_run)
return func_graph
def maybe_captured(tensor):
"""If t is a captured value placeholder, returns the original captured value.
Args:
tensor: Tensor.
Returns:
A tensor, potentially from a different Graph/FuncGraph.
"""
if (not isinstance(tensor, ops.EagerTensor) and
tensor.op.graph.building_function and tensor.op.type == "Placeholder"):
for input_t, placeholder_t in tensor.op.graph.captures:
if tensor == placeholder_t:
return maybe_captured(input_t)
# pylint: enable=protected-access
return tensor
def device_stack_has_callable(device_stack):
"""Checks whether a device stack contains a callable."""
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2):
"""Check if two list of arguments are exactly the same."""
errmsg = ("Function to be traced should not modify structure of input "
"arguments. Check if your function has list and dictionary "
"operations that alter input arguments, "
"such as `list.pop`, `list.append`")
try:
nest.assert_same_structure(n1, n2, expand_composites=True)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),
nest.flatten(n2, expand_composites=True)):
if arg1 is not arg2:
raise ValueError(errmsg)
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def flatten(sequence):
"""Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.
Args:
sequence: A nested structure of Tensors, CompositeTensors, and
TensorArrays.
Returns:
A list of tensors.
"""
flat_sequence = nest.flatten(sequence, expand_composites=True)
return [
item.flow if isinstance(item, tensor_array_ops.TensorArray) else item
for item in flat_sequence]
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def pack_sequence_as(structure, flat_sequence):
"""Like `nest.pack_sequence_as` but also builds TensorArrays from flows.
Args:
structure: The structure to pack into. May contain Tensors,
CompositeTensors, or TensorArrays.
flat_sequence: An iterable containing tensors.
Returns:
A nested structure.
Raises:
AssertionError if `structure` and `flat_sequence` are not compatible.
"""
flat_sequence = list(flat_sequence)
flattened_structure = nest.flatten(structure, expand_composites=True)
if len(flattened_structure) != len(flat_sequence):
raise ValueError("Mismatch in element count")
for i in range(len(flat_sequence)):
if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):
flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(
old_ta=flattened_structure[i], flow=flat_sequence[i])
return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)
def _create_substitute_placeholder(value, name=None, dtype=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
custom_gradient.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names, flat_shapes=None):
"""Maps Python function positional args to graph-construction inputs."""
return _get_defun_inputs(
args, names, structure=args, flat_shapes=flat_shapes)
def _get_defun_inputs(args, names, structure, flat_shapes=None):
"""Maps python function args to graph-construction inputs.
Args:
args: A flat list of user-specified arguments.
names: A list of strings with user-specified argument names, same length as
`args`. May be `None`, in which case a generic name is used.
structure: The original argument list or dictionary.
flat_shapes: A flat list of values that are either `None` or
instances of `TensorShape`. If provided, then length must match
that of `nest.flatten(args, expand_composites=True)`; and locations where
`args` are instances of `Tensor` must have a corresponding `TensorShape`
in `flat_shapes`. May be `None`, in which case exact shapes are read
directly from the args.
Returns:
Placeholders with the same structure as `structure`.
Raises:
RuntimeError: if `flat_shapes` is provided, but
`len(flat_shapes) != len(nest.flatten(args, expand_composites=True))`.
RuntimeError: if a shape from `flat_shapes` is not None
for an argument that is not a `Tensor`, `TensorSpec`,
or `ResourceVariable`.
"""
func_graph = ops.get_default_graph()
function_inputs = []
if names is None:
names = [None] * len(args)
if flat_shapes is None:
shapes_iter = itertools.repeat(None)
else:
len_flat_args = len(nest.flatten(args, expand_composites=True))
if len_flat_args != len(flat_shapes):
raise RuntimeError(
"Length of fully flat shapes (%d) must match that of "
"flatten(args) (%d). args: %s, flat_shapes: %s"
% (len(flat_shapes),
len_flat_args,
args,
flat_shapes))
shapes_iter = iter(flat_shapes)
for arg_value, name in zip(args, names):
flattened = nest.flatten(arg_value, expand_composites=True)
tensor_specs = [
arg for arg in flattened if isinstance(arg, tensor_spec.TensorSpec)
]
specified_names = [arg.name for arg in tensor_specs if arg.name]
if specified_names and len(specified_names) < len(tensor_specs):
raise ValueError("If specifying TensorSpec names for nested structures, "
"either zero or all names have to be specified.")
for arg in flattened:
# We have a shape entry for each arg, regadless of whether it's a real
# Tensor or not. For non-tensor entries it should be None.
shape = next(shapes_iter)
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
if isinstance(arg, tensor_spec.TensorSpec) and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder_shape = shape if shape is not None else arg.shape
try:
placeholder = graph_placeholder(
arg.dtype, placeholder_shape,
name=requested_name)
except ValueError:
# Sometimes parameter names are not valid op names, so fall back to
# unnamed placeholders.
placeholder = graph_placeholder(arg.dtype, placeholder_shape)
if name is not None:
# Record the requested/user-specified name in case it's different than
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
elif isinstance(arg, resource_variable_ops.BaseResourceVariable):
# Capture arg variables to create placeholders for them. These will be
# removed as captures after the function is traced (since otherwise we'd
# just add it back with a new placeholder when the variable was
# referenced).
placeholder = func_graph.capture(arg.handle, name=name)
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
function_inputs.append(arg)
else:
if shape is not None:
raise RuntimeError(
"Expected provided shape override to be None for arg that isn't "
"a Tensor, but saw arg: '%s', shape: '%s'. args: %s"
% (arg, shape, args))
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs,
expand_composites=True)
def _get_defun_inputs_from_kwargs(kwargs, flat_shapes):
"""Maps Python function keyword args to graph-construction inputs."""
if kwargs:
names, args = zip(*sorted(kwargs.items()))
else:
names = []
args = []
return _get_defun_inputs(
args, names, structure=kwargs, flat_shapes=flat_shapes)
def dismantle_func_graph(func_graph):
"""Removes reference cycles in `func_graph` FuncGraph.
Helpful for making sure the garbage collector doesn't need to run when
the FuncGraph goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable
after this function.
"""
func_graph.clear_captures()
ops.dismantle_graph(func_graph)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/func_graph.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.device_spec."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import device_spec
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
TEST_V1_AND_V2 = (("v1", device_spec.DeviceSpecV1),
("v2", device_spec.DeviceSpecV2))
class DeviceSpecTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters(*TEST_V1_AND_V2)
def test_empty(self, device_spec_type):
d = device_spec_type()
self.assertEqual("", d.to_string())
d.parse_from_string("")
self.assertEqual("", d.to_string())
@parameterized.named_parameters(*TEST_V1_AND_V2)
def test_constructor(self, device_spec_type):
d = device_spec_type(job="j", replica=0, task=1,
device_type="CPU", device_index=2)
self.assertEqual("j", d.job)
self.assertEqual(0, d.replica)
self.assertEqual(1, d.task)
self.assertEqual("CPU", d.device_type)
self.assertEqual(2, d.device_index)
self.assertEqual("/job:j/replica:0/task:1/device:CPU:2", d.to_string())
d = device_spec_type(device_type="GPU", device_index=0)
self.assertEqual("/device:GPU:0", d.to_string())
def testto_string_legacy(self):
"""DeviceSpecV1 allows direct mutation."""
d = device_spec.DeviceSpecV1()
d.job = "foo"
self.assertEqual("/job:foo", d.to_string())
d.task = 3
self.assertEqual("/job:foo/task:3", d.to_string())
d.device_type = "CPU"
d.device_index = 0
self.assertEqual("/job:foo/task:3/device:CPU:0", d.to_string())
d.task = None
d.replica = 12
self.assertEqual("/job:foo/replica:12/device:CPU:0", d.to_string())
d.device_type = "GPU"
d.device_index = 2
self.assertEqual("/job:foo/replica:12/device:GPU:2", d.to_string())
d.device_type = "CPU"
d.device_index = 1
self.assertEqual("/job:foo/replica:12/device:CPU:1", d.to_string())
d.device_type = None
d.device_index = None
self.assertEqual("/job:foo/replica:12", d.to_string())
# Test wildcard
d = device_spec.DeviceSpecV1(job="foo", replica=12, task=3,
device_type="GPU")
self.assertEqual("/job:foo/replica:12/task:3/device:GPU:*", d.to_string())
@parameterized.named_parameters(*TEST_V1_AND_V2)
def test_replace(self, device_spec_type):
d = device_spec_type()
d = d.replace(job="foo")
self.assertEqual("/job:foo", d.to_string())
d = d.replace(task=3)
self.assertEqual("/job:foo/task:3", d.to_string())
d = d.replace(device_type="CPU", device_index=0)
self.assertEqual("/job:foo/task:3/device:CPU:0", d.to_string())
d = d.replace(task=None, replica=12)
self.assertEqual("/job:foo/replica:12/device:CPU:0", d.to_string())
d = d.replace(device_type="GPU", device_index=2)
self.assertEqual("/job:foo/replica:12/device:GPU:2", d.to_string())
d = d.replace(device_type="CPU", device_index=1)
self.assertEqual("/job:foo/replica:12/device:CPU:1", d.to_string())
d = d.replace(device_type=None, device_index=None)
self.assertEqual("/job:foo/replica:12", d.to_string())
# Test wildcard
d = device_spec.DeviceSpecV1(job="foo", replica=12, task=3,
device_type="GPU")
self.assertEqual("/job:foo/replica:12/task:3/device:GPU:*", d.to_string())
@parameterized.named_parameters(*TEST_V1_AND_V2)
def testto_string(self, device_spec_type):
d = device_spec_type(job="foo")
self.assertEqual("/job:foo", d.to_string())
d = device_spec_type(job="foo", task=3)
self.assertEqual("/job:foo/task:3", d.to_string())
d = device_spec_type(job="foo", task=3, device_type="cpu", device_index=0)
self.assertEqual("/job:foo/task:3/device:CPU:0", d.to_string())
d = device_spec_type(job="foo", replica=12, device_type="cpu",
device_index=0)
self.assertEqual("/job:foo/replica:12/device:CPU:0", d.to_string())
d = device_spec_type(job="foo", replica=12, device_type="gpu",
device_index=2)
self.assertEqual("/job:foo/replica:12/device:GPU:2", d.to_string())
d = device_spec_type(job="foo", replica=12)
self.assertEqual("/job:foo/replica:12", d.to_string())
# Test wildcard
d = device_spec_type(job="foo", replica=12, task=3, device_type="GPU")
self.assertEqual("/job:foo/replica:12/task:3/device:GPU:*", d.to_string())
def test_parse_legacy(self):
d = device_spec.DeviceSpecV1()
d.parse_from_string("/job:foo/replica:0")
self.assertEqual("/job:foo/replica:0", d.to_string())
d.parse_from_string("/replica:1/task:0/cpu:0")
self.assertEqual("/replica:1/task:0/device:CPU:0", d.to_string())
d.parse_from_string("/replica:1/task:0/device:CPU:0")
self.assertEqual("/replica:1/task:0/device:CPU:0", d.to_string())
d.parse_from_string("/job:muu/device:GPU:2")
self.assertEqual("/job:muu/device:GPU:2", d.to_string())
with self.assertRaisesRegexp(ValueError, "Cannot specify multiple"):
d.parse_from_string("/job:muu/device:GPU:2/cpu:0")
@parameterized.named_parameters(*TEST_V1_AND_V2)
def test_to_from_string(self, device_spec_type):
d = device_spec_type.from_string("/job:foo/replica:0")
self.assertEqual("/job:foo/replica:0", d.to_string())
self.assertEqual(0, d.replica)
d = device_spec_type.from_string("/replica:1/task:0/cpu:0")
self.assertEqual("/replica:1/task:0/device:CPU:0", d.to_string())
self.assertAllEqual([1, 0, "CPU", 0],
[d.replica, d.task, d.device_type, d.device_index])
d = device_spec_type.from_string("/replica:1/task:0/device:CPU:0")
self.assertEqual("/replica:1/task:0/device:CPU:0", d.to_string())
self.assertAllEqual([1, 0, "CPU", 0],
[d.replica, d.task, d.device_type, d.device_index])
d = device_spec_type.from_string("/job:muu/device:GPU:2")
self.assertEqual("/job:muu/device:GPU:2", d.to_string())
self.assertAllEqual(["muu", "GPU", 2],
[d.job, d.device_type, d.device_index])
with self.assertRaisesRegexp(ValueError, "Cannot specify multiple"):
d.parse_from_string("/job:muu/device:GPU:2/cpu:0")
def test_merge_legacy(self):
d = device_spec.DeviceSpecV1.from_string("/job:foo/replica:0")
self.assertEqual("/job:foo/replica:0", d.to_string())
d.merge_from(device_spec.DeviceSpecV1.from_string("/task:1/device:GPU:2"))
self.assertEqual("/job:foo/replica:0/task:1/device:GPU:2", d.to_string())
d = device_spec.DeviceSpecV1()
d.merge_from(device_spec.DeviceSpecV1.from_string("/task:1/cpu:0"))
self.assertEqual("/task:1/device:CPU:0", d.to_string())
d.merge_from(device_spec.DeviceSpecV1.from_string("/job:boo/device:GPU:0"))
self.assertEqual("/job:boo/task:1/device:GPU:0", d.to_string())
d.merge_from(device_spec.DeviceSpecV1.from_string("/job:muu/cpu:2"))
self.assertEqual("/job:muu/task:1/device:CPU:2", d.to_string())
d.merge_from(device_spec.DeviceSpecV1.from_string(
"/job:muu/device:MyFunnyDevice:2"))
self.assertEqual("/job:muu/task:1/device:MyFunnyDevice:2", d.to_string())
def test_merge_removed(self):
with self.assertRaises(AttributeError):
d = device_spec.DeviceSpecV2()
d.merge_from(device_spec.DeviceSpecV2.from_string("/task:1/cpu:0"))
@parameterized.named_parameters(*TEST_V1_AND_V2)
def test_combine(self, device_spec_type):
d = device_spec_type.from_string("/job:foo/replica:0")
self.assertEqual("/job:foo/replica:0", d.to_string())
d = d.make_merged_spec(
device_spec_type.from_string("/task:1/device:GPU:2"))
self.assertEqual("/job:foo/replica:0/task:1/device:GPU:2", d.to_string())
d = device_spec_type()
d = d.make_merged_spec(device_spec_type.from_string("/task:1/cpu:0"))
self.assertEqual("/task:1/device:CPU:0", d.to_string())
d = d.make_merged_spec(
device_spec_type.from_string("/job:boo/device:GPU:0"))
self.assertEqual("/job:boo/task:1/device:GPU:0", d.to_string())
d = d.make_merged_spec(device_spec_type.from_string("/job:muu/cpu:2"))
self.assertEqual("/job:muu/task:1/device:CPU:2", d.to_string())
d = d.make_merged_spec(device_spec_type.from_string(
"/job:muu/device:MyFunnyDevice:2"))
self.assertEqual("/job:muu/task:1/device:MyFunnyDevice:2", d.to_string())
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/device_spec_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module customizes `test_combinations` for Tensorflow.
Additionally it provides `generate()`, `combine()` and `times()` with Tensorflow
customizations as a default.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_combinations
class EagerGraphCombination(test_combinations.TestCombination):
"""Run the test in Graph or Eager mode. Graph is the default.
The optional `mode` parameter controls the test's execution mode. Its
accepted values are "graph" or "eager" literals.
"""
def context_managers(self, kwargs):
# TODO(isaprykin): Switch the default to eager.
mode = kwargs.pop("mode", "graph")
if mode == "eager":
return [context.eager_mode()]
elif mode == "graph":
return [ops.Graph().as_default(), context.graph_mode()]
else:
raise ValueError(
"'mode' has to be either 'eager' or 'graph' and not {}".format(mode))
def parameter_modifiers(self):
return [test_combinations.OptionalParameter("mode")]
class TFVersionCombination(test_combinations.TestCombination):
"""Control the execution of the test in TF1.x and TF2.
If TF2 is enabled then a test with TF1 test is going to be skipped and vice
versa.
Test targets continuously run in TF2 thanks to the tensorflow.v2 TAP target.
A test can be run in TF2 with bazel by passing --test_env=TF2_BEHAVIOR=1.
"""
def should_execute_combination(self, kwargs):
tf_api_version = kwargs.pop("tf_api_version", None)
if tf_api_version == 1 and tf2.enabled():
return (False, "Skipping a TF1.x test when TF2 is enabled.")
elif tf_api_version == 2 and not tf2.enabled():
return (False, "Skipping a TF2 test when TF2 is not enabled.")
return (True, None)
def parameter_modifiers(self):
return [test_combinations.OptionalParameter("tf_api_version")]
generate = functools.partial(
test_combinations.generate,
test_combinations=(EagerGraphCombination(), TFVersionCombination()))
combine = test_combinations.combine
times = test_combinations.times
NamedObject = test_combinations.NamedObject
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/combinations.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Classes and functions for building TensorFlow graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Classes used when building a Graph.
from tensorflow.python.framework.device import DeviceSpec
from tensorflow.python.framework.ops import Graph
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.framework.ops import IndexedSlices
from tensorflow.python.framework.sparse_tensor import SparseTensor
from tensorflow.python.framework.sparse_tensor import SparseTensorValue
# Utilities used when building a Graph.
from tensorflow.python.framework.ops import device
from tensorflow.python.framework.ops import container
from tensorflow.python.framework.ops import name_scope
from tensorflow.python.framework.ops import op_scope
from tensorflow.python.framework.ops import colocate_with
from tensorflow.python.framework.ops import control_dependencies
from tensorflow.python.framework.ops import get_default_graph
from tensorflow.python.framework.ops import reset_default_graph
from tensorflow.python.framework.ops import GraphKeys
from tensorflow.python.framework.ops import add_to_collection
from tensorflow.python.framework.ops import add_to_collections
from tensorflow.python.framework.ops import get_collection
from tensorflow.python.framework.ops import get_collection_ref
from tensorflow.python.framework.ops import convert_to_tensor
from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices
from tensorflow.python.framework.random_seed import get_seed
from tensorflow.python.framework.random_seed import set_random_seed
from tensorflow.python.framework.sparse_tensor import convert_to_tensor_or_sparse_tensor
from tensorflow.python.framework.importer import import_graph_def
# Utilities for working with Tensors
from tensorflow.python.framework.tensor_util import make_tensor_proto
from tensorflow.python.framework.tensor_util import MakeNdarray as make_ndarray
# Needed when you defined a new Op in C++.
from tensorflow.python.framework.ops import RegisterGradient
from tensorflow.python.framework.ops import NotDifferentiable
from tensorflow.python.framework.ops import NoGradient
from tensorflow.python.framework.ops import RegisterShape
from tensorflow.python.framework.tensor_shape import Dimension
from tensorflow.python.framework.tensor_shape import TensorShape
# Needed when interfacing tensorflow to new array libraries
from tensorflow.python.framework.ops import register_tensor_conversion_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.framework.dtypes import * # pylint: disable=redefined-builtin
# Load a TensorFlow plugin
from tensorflow.python.framework.load_library import *
# pylint: enable=wildcard-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/framework_lib.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of common shape functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six.moves
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def has_fully_defined_shape(tensor):
"""Returns true if tensor has a fully defined shape."""
return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()
def rank(tensor):
"""Return a rank if it is a tensor, else return None."""
if isinstance(tensor, ops.Tensor):
return tensor._rank() # pylint: disable=protected-access
return None
def scalar_shape(unused_op):
"""Shape function for ops that output a scalar value."""
return [tensor_shape.TensorShape([])]
def unchanged_shape(op):
"""Shape function for ops that output a tensor like their first input."""
return [op.inputs[0].get_shape()]
def unchanged_shape_with_rank(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: The exact rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_least(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: A lower bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_least(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_most(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: An upper bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_most(rank)]
return _ShapeFunction
def matmul_shape(op):
"""Shape function for a MatMul op."""
a_shape = op.inputs[0].get_shape().with_rank(2)
transpose_a = op.get_attr("transpose_a")
b_shape = op.inputs[1].get_shape().with_rank(2)
transpose_b = op.get_attr("transpose_b")
output_rows = a_shape[1] if transpose_a else a_shape[0]
output_cols = b_shape[0] if transpose_b else b_shape[1]
inner_a = a_shape[0] if transpose_a else a_shape[1]
inner_b = b_shape[1] if transpose_b else b_shape[0]
inner_a.assert_is_compatible_with(inner_b)
return [tensor_shape.TensorShape([output_rows, output_cols])]
def get_conv_output_size(input_size, filter_size, strides, padding_type):
"""Returns the spatial size of a n-d convolution/pooling output."""
input_size = tuple([tensor_shape.as_dimension(x).value for x in input_size])
filter_size = tuple([tensor_shape.as_dimension(x).value for x in filter_size])
strides = [int(x) for x in strides]
if all(x == 1 for x in input_size) and all(x == 1 for x in filter_size):
return input_size
if any(x is not None and y is not None and x > y for x, y in
zip(filter_size, input_size)):
raise ValueError("Filter must not be larger than the input: "
"Filter: %r Input: %r" % (filter_size, input_size))
if padding_type == b"VALID":
def _valid(in_dim, k_dim, s_dim):
if in_dim is not None and k_dim is not None:
return (in_dim - k_dim + s_dim) // s_dim
else:
return None
output_size = [
_valid(in_dim, k_dim, s_dim)
for in_dim, k_dim, s_dim in zip(input_size, filter_size, strides)
]
elif padding_type == b"SAME":
def _same(in_dim, s_dim):
if in_dim is not None:
return (in_dim + s_dim - 1) // s_dim
else:
return None
output_size = [_same(in_dim, s_dim)
for in_dim, s_dim in zip(input_size, strides)]
else:
raise ValueError("Invalid padding: %r" % padding_type)
return tuple(output_size)
def get2d_conv_output_size(input_height, input_width, filter_height,
filter_width, row_stride, col_stride, padding_type):
"""Returns the number of rows and columns in a convolution/pooling output."""
return get_conv_output_size((input_height, input_width),
(filter_height, filter_width),
(row_stride, col_stride), padding_type)
def conv2d_shape(op):
"""Shape function for a Conv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A Conv2D Operation.
Returns:
A list containing the Shape of the Conv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
if data_format == b"NCHW":
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth_out]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def depthwise_conv2d_native_shape(op):
"""Shape function for a DepthwiseConv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depthwise_multiplier]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_in*depthwise_multiplier], where out_rows and out_cols depend
on the value of the op's "padding" and "strides" attrs.
Args:
op: A DepthwiseConv2dNative Operation.
Returns:
A list containing the Shape of the DepthwiseConv2DNative output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3] * filter_shape[2]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def separable_conv2d_shape(op):
"""Shape function for a SeparableConv2D op.
This op has three inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* depthwise_filter, a 4D tensor with shape = [filter_rows,
filter_cols, depth_in, depth_multiplier]
* pointwise_filter, a 4D tensor with shape = [1, 1, depth_in *
depth_multiplier, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A SeparableConv2D Operation.
Returns:
A list containing the Shape of the SeparableConv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
depthwise_filter_shape = op.inputs[1].get_shape().merge_with(
tensor_shape.TensorShape([None, None, input_shape[3], None]))
pointwise_depth_in = depthwise_filter_shape[2] * depthwise_filter_shape[3]
pointwise_filter_shape = op.inputs[2].get_shape().merge_with(
tensor_shape.TensorShape([1, 1, pointwise_depth_in, None]))
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = depthwise_filter_shape[0]
filter_cols = depthwise_filter_shape[1]
depth_out = pointwise_filter_shape[3]
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def avg_pool_shape(op):
"""Shape function for an AvgPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: An AvgPool Operation.
Returns:
A single-element list containing the Shape of the AvgPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1 or ksize_d != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch and depth dimensions.")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not support strides "
"in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def max_pool_shape(op):
"""Shape function for a MaxPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows, out_cols, and depth_out depend
on the value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: A MaxPool Operation.
Returns:
A single-element list containing the Shape of the MaxPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch dimension.")
if stride_b != 1:
raise ValueError("Current implementation does not support strides "
"in the batch dimension.")
if not ((ksize_r == 1 and ksize_c == 1) or ksize_d == 1):
raise ValueError("MaxPooling supports exactly one of pooling across depth "
"or pooling across width/height.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
if ksize_d == 1:
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
else:
if depth % ksize_d > 0:
raise ValueError("Depthwise max pooling requires the depth window "
"to evenly divide the input depth.")
if stride_d != ksize_d:
raise ValueError("Depthwise max pooling requires the depth window "
"to equal the depth stride.")
output_shape = [batch_size, in_rows, in_cols, depth // ksize_d]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def no_outputs(unused_op):
"""Shape function for use with ops that have no outputs."""
return []
def unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
def _broadcast_shape_helper(shape_x, shape_y):
"""Helper functions for is_broadcast_compatible and broadcast_shape.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
Returns None if the shapes are not broadcast compatible,
a list of the broadcast dimensions otherwise.
"""
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
return None
return return_dims
def is_broadcast_compatible(shape_x, shape_y):
"""Returns True if `shape_x` and `shape_y` are broadcast compatible.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
True if a shape exists that both `shape_x` and `shape_y` can be broadcasted
to. False otherwise.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return False
return _broadcast_shape_helper(shape_x, shape_y) is not None
def broadcast_shape(shape_x, shape_y):
"""Returns the broadcasted shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return tensor_shape.unknown_shape()
return_dims = _broadcast_shape_helper(shape_x, shape_y)
if return_dims is None:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return tensor_shape.TensorShape(return_dims)
def call_cpp_shape_fn(op, require_shape_fn=True):
"""A shape function that delegates to the registered C++ shape function.
Args:
op: the node in the graph for which to compute output shapes.
require_shape_fn: If true, and the C++ shape function is not registered
in the current binary then an exception is raised; otherwise, if the
C++ shape function is not registered then unknown_shape is used.
Returns:
A dictionary with the following keys:
shapes: A TensorShape list of the output shapes of the op, as computed
using the C++ shape inference function registered for the op.
handle_shapes: A TensorShape list of the shapes for handle outputs, if
any.
handle_dtypes: A list of DataType enums for the handle outputs, if any.
Raises:
ValueError: If the C++ shape function returned an error (e.g. because the
shapes of the inputs are of the wrong rank or otherwise incompatible
according to the shape function).
RuntimeError: If the C++ shape function is not registered and
<require_shape_fn> is True.
"""
if op.type == "Const":
# To avoid serializing large constants, we special-case constant
# here, even though it has a C++ shape function. When Python
# calls the C / C-API directly, we should be able to remove this.
return {
"shapes": [tensor_shape.TensorShape(op.get_attr("value").tensor_shape)],
"handle_data": [None]
}
input_tensors_needed = []
input_tensors_as_shapes_needed = []
while True:
res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
input_tensors_as_shapes_needed,
require_shape_fn)
if not isinstance(res, dict):
# Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
return res
# See if we need to evaluate some inputs.
if not res["inputs_needed"]:
return res
p = cpp_shape_inference_pb2.CppShapeInferenceInputsNeeded()
p = p.FromString(res["inputs_needed"])
changed = False
for idx in p.input_tensors_needed:
if idx not in input_tensors_needed:
input_tensors_needed.append(idx)
changed = True
for idx in p.input_tensors_as_shapes_needed:
if idx not in input_tensors_as_shapes_needed:
input_tensors_as_shapes_needed.append(idx)
changed = True
if not changed:
return res
def _call_cpp_shape_fn_impl(
op, input_tensors_needed, input_tensors_as_shapes_needed, require_shape_fn):
"""Core implementation of call_cpp_shape_fn."""
graph_def_version = op.graph.graph_def_versions.producer
node_def_str = op.node_def.SerializeToString()
def tensor_to_inference_result(t):
r = cpp_shape_inference_pb2.CppShapeInferenceResult()
r.shape.CopyFrom(t.get_shape().as_proto())
# pylint: disable=protected-access
if t._handle_data is not None:
r.handle_data.CopyFrom(t._handle_data)
# pylint: enable=protected-access
return r.SerializeToString()
input_shapes = [tensor_to_inference_result(i) for i in op.inputs]
input_tensors = [None for i in input_shapes]
for idx in input_tensors_needed:
v = tensor_util.constant_value(op.inputs[idx])
if v is not None:
input_tensors[idx] = np.asarray(v)
serialized_unknown_shape = (
tensor_shape.TensorShape(None).as_proto().SerializeToString())
arr = [serialized_unknown_shape for i in input_shapes]
for idx in input_tensors_as_shapes_needed:
s = tensor_util.constant_value_as_shape(op.inputs[idx])
if s is not None:
arr[idx] = s.as_proto().SerializeToString()
input_tensors_as_shapes = arr
missing_shape_fn = False
try:
output = pywrap_tensorflow.RunCppShapeInference(
graph_def_version, node_def_str, input_shapes, input_tensors,
input_tensors_as_shapes)
except errors.InvalidArgumentError as err:
if err.message.startswith("No shape inference function exists for op"):
missing_shape_fn = True
else:
raise ValueError(err.message)
if missing_shape_fn:
if require_shape_fn:
raise RuntimeError(
"No C++ shape function registered for standard op: %s" % op.type)
return unknown_shape(op)
output_shapes = output[:-1]
# Convert TensorShapeProto values in output_shapes.
result_protos = [
cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s)
for s in output_shapes
]
result = [r.shape for r in result_protos]
result_handle_data = [
r.handle_data if r.handle_data.is_set else None for r in result_protos
]
return {
"shapes": result,
"handle_data": result_handle_data,
"inputs_needed": output[-1]
}
# pylint: disable=protected-access
ops._set_call_cpp_shape_fn(call_cpp_shape_fn)
# pylint: enable=protected-access
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/common_shapes.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""For seeding individual ops based on a graph-level seed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
DEFAULT_GRAPH_SEED = 87654321
_MAXINT32 = 2**31 - 1
def _truncate_seed(seed):
return seed % _MAXINT32 # Truncate to fit into 32-bit integer
@tf_export(v1=['random.get_seed', 'get_seed'])
@deprecation.deprecated_endpoints('get_seed')
def get_seed(op_seed):
"""Returns the local seeds an operation should use given an op-specific seed.
Given operation-specific seed, `op_seed`, this helper function returns two
seeds derived from graph-level and op-level seeds. Many random operations
internally use the two seeds to allow user to change the seed globally for a
graph, or for only specific operations.
For details on how the graph-level seed interacts with op seeds, see
`tf.compat.v1.random.set_random_seed`.
Args:
op_seed: integer.
Returns:
A tuple of two integers that should be used for the local seed of this
operation.
"""
eager = context.executing_eagerly()
if eager:
global_seed = context.global_seed()
else:
global_seed = ops.get_default_graph().seed
if global_seed is not None:
if op_seed is None:
# pylint: disable=protected-access
if hasattr(ops.get_default_graph(), '_seed_used'):
ops.get_default_graph()._seed_used = True
if eager:
op_seed = context.internal_operation_seed()
else:
op_seed = ops.get_default_graph()._last_id
seeds = _truncate_seed(global_seed), _truncate_seed(op_seed)
else:
if op_seed is not None:
seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed)
else:
seeds = None, None
# Avoid (0, 0) as the C++ ops interpret it as nondeterminism, which would
# be unexpected since Python docs say nondeterminism is (None, None).
if seeds == (0, 0):
return (0, _MAXINT32)
return seeds
@tf_export(v1=['random.set_random_seed', 'set_random_seed'])
def set_random_seed(seed):
"""Sets the graph-level random seed for the default graph.
Operations that rely on a random seed actually derive it from two seeds:
the graph-level and operation-level seeds. This sets the graph-level seed.
Its interactions with operation-level seeds is as follows:
1. If neither the graph-level nor the operation seed is set:
A random seed is used for this op.
2. If the graph-level seed is set, but the operation seed is not:
The system deterministically picks an operation seed in conjunction
with the graph-level seed so that it gets a unique random sequence.
3. If the graph-level seed is not set, but the operation seed is set:
A default graph-level seed and the specified operation seed are used to
determine the random sequence.
4. If both the graph-level and the operation seed are set:
Both seeds are used in conjunction to determine the random sequence.
To illustrate the user-visible effects, consider these examples:
To generate different sequences across sessions, set neither
graph-level nor op-level seeds:
```python
a = tf.random.uniform([1])
b = tf.random.normal([1])
print("Session 1")
with tf.compat.v1.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.compat.v1.Session() as sess2:
print(sess2.run(a)) # generates 'A3'
print(sess2.run(a)) # generates 'A4'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To generate the same repeatable sequence for an op across sessions, set the
seed for the op:
```python
a = tf.random.uniform([1], seed=1)
b = tf.random.normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequence of values for 'a', but different sequences of values for 'b'.
print("Session 1")
with tf.compat.v1.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.compat.v1.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To make the random sequences generated by all ops be repeatable across
sessions, set a graph-level seed:
```python
tf.compat.v1.random.set_random_seed(1234)
a = tf.random.uniform([1])
b = tf.random.normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequences of 'a' and 'b'.
print("Session 1")
with tf.compat.v1.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.compat.v1.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B1'
print(sess2.run(b)) # generates 'B2'
```
Args:
seed: integer.
"""
if context.executing_eagerly():
context.set_global_seed(seed)
else:
ops.get_default_graph().seed = seed
@tf_export('random.set_seed', v1=[])
def set_seed(seed):
"""Sets the graph-level random seed.
Operations that rely on a random seed actually derive it from two seeds:
the graph-level and operation-level seeds. This sets the graph-level seed.
Its interactions with operation-level seeds is as follows:
1. If neither the graph-level nor the operation seed is set:
A random seed is used for this op.
2. If the graph-level seed is set, but the operation seed is not:
The system deterministically picks an operation seed in conjunction
with the graph-level seed so that it gets a unique random sequence.
3. If the graph-level seed is not set, but the operation seed is set:
A default graph-level seed and the specified operation seed are used to
determine the random sequence.
4. If both the graph-level and the operation seed are set:
Both seeds are used in conjunction to determine the random sequence.
To illustrate the user-visible effects, consider these examples:
To generate different sequences across sessions, set neither
graph-level nor op-level seeds:
```python
a = tf.random.uniform([1])
b = tf.random.normal([1])
print("Session 1")
with tf.compat.v1.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.compat.v1.Session() as sess2:
print(sess2.run(a)) # generates 'A3'
print(sess2.run(a)) # generates 'A4'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To generate the same repeatable sequence for an op across sessions, set the
seed for the op:
```python
a = tf.random.uniform([1], seed=1)
b = tf.random.normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequence of values for 'a', but different sequences of values for 'b'.
print("Session 1")
with tf.compat.v1.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.compat.v1.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To make the random sequences generated by all ops be repeatable across
sessions, set a graph-level seed:
```python
tf.random.set_seed(1234)
a = tf.random.uniform([1])
b = tf.random.normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequences of 'a' and 'b'.
print("Session 1")
with tf.compat.v1.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.compat.v1.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B1'
print(sess2.run(b)) # generates 'B2'
```
Args:
seed: integer.
"""
# TODO(go/tf2-random): change doc, update to match design doc
set_random_seed(seed)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/random_seed.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the system configuration methods work properly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
def reset_eager(fn):
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
finally:
# Reset the context.
context._context = None
ops.enable_eager_execution_internal()
assert context._context is not None
return wrapper
class ConfigTest(test.TestCase, parameterized.TestCase):
@test_util.run_gpu_only
@reset_eager
def testDevicePolicy(self):
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
# If no op has been executed we should be able to set the device policy as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_device_policy('silent')
config.set_intra_op_parallelism_threads(2)
context.ensure_initialized()
def copy_tensor(dtype=dtypes.int32):
cpu_tensor = constant_op.constant(1, dtype=dtype)
gpu_tensor = cpu_tensor.gpu()
self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
config.set_device_policy('silent')
self.assertEqual(config.get_device_policy(), 'silent')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
copy_tensor()
config.set_device_policy('silent_for_int32')
self.assertEqual(config.get_device_policy(), 'silent_for_int32')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT_FOR_INT32,
context.context().device_policy)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor(dtypes.float32)
copy_tensor()
config.set_device_policy('warn')
self.assertEqual(config.get_device_policy(), 'warn')
self.assertEqual(context.DEVICE_PLACEMENT_WARN,
context.context().device_policy)
copy_tensor()
config.set_device_policy('explicit')
self.assertEqual(config.get_device_policy(), 'explicit')
self.assertEqual(context.DEVICE_PLACEMENT_EXPLICIT,
context.context().device_policy)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor()
config.set_device_policy(None)
self.assertEqual(config.get_device_policy(), 'silent')
@reset_eager
def testExecutionMode(self):
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
# If no op has been executed we should be able to set the execution mode as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_synchronous_execution(False)
config.set_intra_op_parallelism_threads(2)
config.set_synchronous_execution(True)
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
config.set_synchronous_execution(False)
self.assertFalse(config.get_synchronous_execution())
self.assertEqual(context.ASYNC, context.context().execution_mode)
@reset_eager
def testIntraOpParallelismThreads(self):
config.set_intra_op_parallelism_threads(10)
self.assertEqual(
config.get_intra_op_parallelism_threads(),
context.context().intra_op_parallelism_threads)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
config.set_intra_op_parallelism_threads(1)
config.set_intra_op_parallelism_threads(10)
@reset_eager
def testInterOpParallelismThreads(self):
config.set_inter_op_parallelism_threads(10)
self.assertEqual(
config.get_inter_op_parallelism_threads(),
context.context().inter_op_parallelism_threads)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
config.set_inter_op_parallelism_threads(1)
config.set_inter_op_parallelism_threads(10)
@test_util.run_gpu_only
@reset_eager
def testSoftPlacement(self):
if context.executing_eagerly():
self.assertTrue(config.get_soft_device_placement())
else:
self.assertFalse(config.get_soft_device_placement())
@def_function.function
def mod():
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
return math_ops.mod(a, b)
config.set_soft_device_placement(True)
self.assertEqual(config.get_soft_device_placement(), True)
self.assertEqual(
config.get_soft_device_placement(),
context.context().soft_device_placement)
# Since soft placement is enabled, the mod operation should work with CPU
mod()
config.set_soft_device_placement(False)
self.assertEqual(config.get_soft_device_placement(), False)
self.assertEqual(
config.get_soft_device_placement(),
context.context().soft_device_placement)
# Since soft placement is disabled, the mod operation should fail on GPU
with self.assertRaises(errors.InvalidArgumentError):
mod()
@reset_eager
def testLogDevicePlacement(self):
self.assertFalse(context.get_log_device_placement())
context.set_log_device_placement(True)
self.assertEqual(context.get_log_device_placement(), True)
self.assertEqual(
context.get_log_device_placement(),
context.context().log_device_placement)
context.set_log_device_placement(False)
self.assertEqual(context.get_log_device_placement(), False)
self.assertEqual(
context.get_log_device_placement(),
context.context().log_device_placement)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
context.set_log_device_placement(True)
# If the setting the device placement is a no-op, do not throw a runtime
# exception.
context.set_log_device_placement(False)
@test_util.run_gpu_only
@reset_eager
def testJit(self):
self.assertEqual(config.get_optimizer_jit(), False)
# the following function should cause Op fusion to occur. However, there is
# unfortunately no straightforward way to ensure this. We will just have to
# settle for creating a test that can trigger JIT.
@def_function.function
def fun(a, b):
c = a * b
d = c + a
return d
a = constant_op.constant([2., 2.])
b = constant_op.constant([2., 2.])
self.evaluate(fun(a, b))
config.set_optimizer_jit(True)
self.assertEqual(config.get_optimizer_jit(), True)
self.assertEqual(config.get_optimizer_jit(),
context.context().optimizer_jit)
self.evaluate(fun(a, b))
config.set_optimizer_jit(False)
self.assertEqual(config.get_optimizer_jit(), False)
self.assertEqual(config.get_optimizer_jit(),
context.context().optimizer_jit)
self.evaluate(fun(a, b))
@parameterized.named_parameters(
('LayoutOptimizer', 'layout_optimizer'),
('ConstantFolding', 'constant_folding'),
('ShapeOptimization', 'shape_optimization'),
('Remapping', 'remapping'),
('ArithmeticOptimization', 'arithmetic_optimization'),
('DependencyOptimization', 'dependency_optimization'),
('LoopOptimization', 'loop_optimization'),
('FunctionOptimization', 'function_optimization'),
('DebugStripper', 'debug_stripper'),
('ScopedAllocatorOptimization', 'scoped_allocator_optimization'),
('ImplementationSelector', 'implementation_selector'),
('AutoMixedPrecision', 'auto_mixed_precision'))
@reset_eager
def testOptimizerToggleOption(self, field):
# TODO(b/128531235): Improve testing of option
options = config.get_optimizer_experimental_options()
self.assertIsNone(options.get(field))
config.set_optimizer_experimental_options({field: True})
options[field] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
config.set_optimizer_experimental_options({field: False})
options[field] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
@parameterized.named_parameters(
('DisableModelPruning', 'disable_model_pruning'),
('DisableMetaOptimizer', 'disable_meta_optimizer'))
@reset_eager
def testOptimizerBoolOption(self, field):
# TODO(b/128531235): Improve testing of option
options = config.get_optimizer_experimental_options()
self.assertFalse(options.get(field))
config.set_optimizer_experimental_options({field: True})
options[field] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
config.set_optimizer_experimental_options({field: False})
options[field] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
@test_util.run_gpu_only
@reset_eager
def testOptimizerToggleOptionPinToHost(self):
options = config.get_optimizer_experimental_options()
self.assertIsNone(options.get('pin_to_host_optimization'))
@def_function.function
def fun():
op = test_ops.device_placement_op()
return op
# Force optimizer to run for all graphs
config.set_optimizer_experimental_options({'min_graph_nodes': -1})
options['min_graph_nodes'] = -1
# Since pin to host is disabled, the operation should go on GPU
gpu = self.evaluate(fun())
self.assertIn(compat.as_bytes('GPU'), gpu)
config.set_optimizer_experimental_options(
{'pin_to_host_optimization': True})
options['pin_to_host_optimization'] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
# Since pin to host is enabled, the operation should go on CPU
cpu = self.evaluate(fun())
self.assertIn(compat.as_bytes('CPU'), cpu)
config.set_optimizer_experimental_options(
{'pin_to_host_optimization': False})
options['pin_to_host_optimization'] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
# Since pin to host is disabled again, the operation should go on GPU
gpu2 = self.evaluate(fun())
self.assertIn(compat.as_bytes('GPU'), gpu2)
class DeviceTest(test.TestCase):
@reset_eager
def testPhysicalDevices(self):
cpus = config.list_physical_devices('CPU')
self.assertGreater(len(cpus), 0)
if test_util.is_gpu_available():
gpus = config.list_physical_devices('GPU')
self.assertGreater(len(gpus), 0)
@reset_eager
def testCpuMultiple(self):
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_virtual_device_configuration(cpus[0], [
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration()
])
context.ensure_initialized()
vcpus = config.list_logical_devices('CPU')
self.assertEqual(len(vcpus), 2)
with ops.device('/device:CPU:0'):
a = constant_op.constant(1.0)
self.evaluate(a)
with ops.device('/device:CPU:1'):
b = constant_op.constant(1.0)
self.evaluate(b)
with self.assertRaisesRegexp(RuntimeError, 'unknown device'):
with ops.device('/device:CPU:2'):
c = constant_op.constant(1.0)
self.evaluate(c)
# Ensure we can place ops on each of the device names
for vcpu in vcpus:
with ops.device(vcpu.name):
d = constant_op.constant(1.0)
self.evaluate(d)
# Modifying the CPU configuration is not supported
with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
config.set_virtual_device_configuration(cpus[0], [
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration()
])
# Setting the same CPU configuration is fine
config.set_virtual_device_configuration(cpus[0], [
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration()
])
@test_util.run_gpu_only
@reset_eager
def testGpuNone(self):
gpus = config.list_physical_devices('GPU')
self.assertGreater(len(gpus), 0)
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
self.assertEqual(len(config.get_visible_devices('CPU')), 1)
self.assertGreater(len(config.get_visible_devices('GPU')), 0)
config.set_visible_devices(cpus[0])
self.assertEqual(len(config.get_visible_devices('CPU')), 1)
self.assertEqual(len(config.get_visible_devices('GPU')), 0)
with self.assertRaisesRegexp(RuntimeError, 'unknown device'):
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0)
self.evaluate(a)
# Modifying the visible devices is not supported
with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
config.set_visible_devices(gpus)
# Setting the same visible devices is fine
config.set_visible_devices(cpus[0])
@reset_eager
def testGpuMultiple(self):
gpus = config.list_physical_devices('GPU')
if len(gpus) < 2:
self.skipTest('Need at least 2 GPUs')
context.ensure_initialized()
for i in range(0, len(gpus)):
with ops.device('/device:GPU:' + str(i)):
a = constant_op.constant(1.0)
self.evaluate(a)
with self.assertRaisesRegexp(RuntimeError, 'unknown device'):
with ops.device('/device:GPU:' + str(len(gpus))):
a = constant_op.constant(1.0)
self.evaluate(a)
@test_util.run_gpu_only
@reset_eager
def testVirtualGpu(self):
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
self.assertIsNone(config.get_virtual_device_configuration(gpus[-1]))
config.set_virtual_device_configuration(gpus[-1], [
context.VirtualDeviceConfiguration(memory_limit=10),
context.VirtualDeviceConfiguration(memory_limit=10)
])
self.assertEqual(len(config.get_virtual_device_configuration(gpus[-1])), 2)
logical_gpus = config.list_logical_devices('GPU')
self.assertTrue(len(logical_gpus), len(gpus) + 1)
for i in range(0, len(logical_gpus)):
with ops.device('/device:GPU:' + str(i)):
a = constant_op.constant(1.0)
self.evaluate(a)
with self.assertRaisesRegexp(RuntimeError, 'unknown device'):
with ops.device('/device:GPU:' + str(len(logical_gpus))):
a = constant_op.constant(1.0)
self.evaluate(a)
# Modifying the GPU configuration is not supported
with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
config.set_virtual_device_configuration(gpus[-1], [
context.VirtualDeviceConfiguration(memory_limit=20),
context.VirtualDeviceConfiguration(memory_limit=20)
])
with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
config.set_virtual_device_configuration(gpus[-1], [
context.VirtualDeviceConfiguration(memory_limit=10),
context.VirtualDeviceConfiguration(memory_limit=10),
context.VirtualDeviceConfiguration(memory_limit=10)
])
# Setting the same GPU configuration is fine
config.set_virtual_device_configuration(gpus[-1], [
context.VirtualDeviceConfiguration(memory_limit=10),
context.VirtualDeviceConfiguration(memory_limit=10)
])
@test_util.run_gpu_only
@reset_eager
def testGpuGrowth(self):
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
self.assertIsNone(config.get_memory_growth(gpus[-1]))
for gpu in gpus:
config.set_memory_growth(gpu, True)
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
logical_gpus = config.list_logical_devices('GPU')
self.assertTrue(len(logical_gpus), len(gpus))
# Modifying the GPU configuration is not supported
with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
for gpu in gpus:
config.set_memory_growth(gpu, False)
# Setting the same GPU configuration is fine
for gpu in gpus:
config.set_memory_growth(gpu, True)
@test_util.run_gpu_only
@reset_eager
def testGpuInvalidConfig(self):
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
if len(gpus) > 1:
# Assert if other GPUs were not configured
config.set_memory_growth(gpus[0], True)
with self.assertRaisesRegexp(ValueError, 'cannot differ'):
c = context.context().config
# If we limit visibility to GPU 0, growth is fine
config.set_visible_devices(gpus[0], 'GPU')
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
# Default setting for second GPU is False and works if we set visibility
config.set_visible_devices(gpus[1], 'GPU')
c = context.context().config
self.assertFalse(c.gpu_options.allow_growth)
# Growth now fails because all the GPUs are visible and not the same
config.set_visible_devices(gpus, 'GPU')
with self.assertRaisesRegexp(ValueError, 'cannot differ'):
c = context.context().config
for gpu in gpus:
config.set_memory_growth(gpu, True)
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
with self.assertRaisesRegexp(ValueError, 'memory limit'):
config.set_virtual_device_configuration(gpus[-1], [
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration()
])
self.assertIsNone(config.get_virtual_device_configuration(gpus[-1]))
config.set_virtual_device_configuration(gpus[-1], [
context.VirtualDeviceConfiguration(memory_limit=10),
context.VirtualDeviceConfiguration(memory_limit=10)
])
c = context.context().config
self.assertFalse(c.gpu_options.allow_growth)
with self.assertRaisesRegexp(ValueError, 'virtual devices'):
config.set_memory_growth(gpus[-1], False)
@test_util.run_gpu_only
@reset_eager
def testRemote(self):
gpus = config.list_logical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
context.ensure_initialized()
gpus = config.list_logical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
for gpu in gpus:
self.assertIsNotNone(gpu.name)
context.ensure_initialized()
job_name = 'test'
cluster_def = cluster_pb2.ClusterDef()
job_def = cluster_def.job.add()
job_def.name = job_name
job_def.tasks[0] = 'localhost:0'
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name=job_name, task_index=0, protocol='grpc')
context.set_server_def(server_def)
gpus = config.list_logical_devices('GPU')
for gpu in gpus:
self.assertIsNotNone(gpu.name)
@reset_eager
def testV1CompatibilityDummyInivisibleDeviceList(self):
gpus = config.list_physical_devices('GPU')
if gpus:
self.skipTest('Test requires no GPUs')
# Ensure GPU options left untouched on CPU only environments
context.context()._physical_devices = None
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list='0'))
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list, '0')
@test_util.run_gpu_only
@reset_eager
def testV1Compatibility(self):
# Ensure we set 1 CPU by default
context.context()._config = config_pb2.ConfigProto()
new_config = context.context().config
self.assertEqual(new_config.device_count['CPU'], 1)
context.context()._physical_devices = None
# Ensure CPU is split
context.context()._config = config_pb2.ConfigProto(device_count={'CPU': 2})
new_config = context.context().config
self.assertEqual(new_config.device_count['CPU'], 2)
context.context()._physical_devices = None
# Handle empty visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=''))
gpus = config.list_physical_devices('GPU')
gpu_count = len(gpus)
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list,
','.join(str(i) for i in range(len(gpus))))
context.context()._physical_devices = None
# Handle invalid visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=str(gpu_count)))
with self.assertRaisesRegexp(ValueError, 'Invalid visible device index'):
gpus = config.list_physical_devices('GPU')
new_config = context.context().config
context.context()._physical_devices = None
# Handle single visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=str(gpu_count-1)))
gpus = config.list_physical_devices('GPU')
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list,
str(gpu_count-1))
context.context()._physical_devices = None
def testConfigureCollectiveOps(self):
context.context().configure_collective_ops(
collective_leader='/job:worker/replica:0/task:0',
scoped_allocator_enabled_ops=('CollectiveReduce',),
use_nccl_communication=False,
device_filters=['/job:worker/task:1'])
new_config = context.context().config
# Verify group leader
self.assertEqual('/job:worker/replica:0/task:0',
new_config.experimental.collective_group_leader)
# Verify device filters.
self.assertEqual(['/job:worker/task:1'], new_config.device_filters)
# Verify rewrite options.
new_rewrite_options = new_config.graph_options.rewrite_options
self.assertEqual(rewriter_config_pb2.RewriterConfig.ON,
new_rewrite_options.scoped_allocator_optimization)
self.assertEqual(['CollectiveReduce'],
new_rewrite_options.scoped_allocator_opts.enable_op)
class TensorFloat32Test(test.TestCase):
def setUp(self):
if not test_util.is_gpu_available(cuda_only=True,
min_cuda_compute_capability=(8, 0)):
self.skipTest('TensorFloat-32 requires an NVIDIA GPU with compute '
'capability of at least 8.0')
def tearDown(self):
config.enable_tensor_float_32_execution(False)
# Disable test because, one, running with NVIDIA_TF32_OVERRIDE=0 causes it to
# fail and, two, enabling TF32 allows but does not strictly require TF32
# execution to be used.
# def test_tf32_enabled(self):
# self.assertFalse(config.tensor_float_32_execution_enabled())
# config.enable_tensor_float_32_execution(True)
# self.assertTrue(config.tensor_float_32_execution_enabled())
#
# x = array_ops.fill((8, 8), 1 + 2 ** -20)
# y = array_ops.ones((8, 8))
# out = math_ops.matmul(x, y)
# # In tf32, each element of x is rounded to 1, so the output will be 8s.
# expected = array_ops.fill((8, 8), 8)
# self.assertAllEqual(out, expected)
def test_tf32_disabled(self):
x = array_ops.fill((8, 8), 1 + 2 ** -20)
y = array_ops.ones((8, 8))
out = math_ops.matmul(x, y)
expected = array_ops.fill((8, 8), 8 * (1 + 2 ** -20))
self.assertAllEqual(out, expected)
# Test disabling tf32 after enabling it works correctly
config.enable_tensor_float_32_execution(True)
config.enable_tensor_float_32_execution(False)
self.assertFalse(config.tensor_float_32_execution_enabled())
out = math_ops.matmul(x, y)
self.assertAllEqual(out, expected)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/config_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for file_system."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.framework import load_library
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class FileSystemTest(test.TestCase):
def setUp(self):
file_system_library = os.path.join(resource_loader.get_data_files_path(),
"test_file_system.so")
load_library.load_file_system_library(file_system_library)
@test_util.run_deprecated_v1
def testBasic(self):
with self.cached_session() as sess:
reader = io_ops.WholeFileReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queue.enqueue_many([["test://foo"]]).run()
queue.close().run()
key, value = self.evaluate(reader.read(queue))
self.assertEqual(key, compat.as_bytes("test://foo"))
self.assertEqual(value, compat.as_bytes("AAAAAAAAAA"))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/file_system_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple stack that associates filename and line numbers with each object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util import tf_stack
class TraceableObject(object):
"""Wrap an object together with its the code definition location."""
# Return codes for the set_filename_and_line_from_caller() method.
SUCCESS, HEURISTIC_USED, FAILURE = (0, 1, 2)
def __init__(self, obj, filename=None, lineno=None):
self.obj = obj
self.filename = filename
self.lineno = lineno
def set_filename_and_line_from_caller(self, offset=0):
"""Set filename and line using the caller's stack frame.
If the requested stack information is not available, a heuristic may
be applied and self.HEURISTIC USED will be returned. If the heuristic
fails then no change will be made to the filename and lineno members
(None by default) and self.FAILURE will be returned.
Args:
offset: Integer. If 0, the caller's stack frame is used. If 1,
the caller's caller's stack frame is used. Larger values are
permissible but if out-of-range (larger than the number of stack
frames available) the outermost stack frame will be used.
Returns:
TraceableObject.SUCCESS if appropriate stack information was found,
TraceableObject.HEURISTIC_USED if the offset was larger than the stack,
and TraceableObject.FAILURE if the stack was empty.
"""
# Offset is defined in "Args" as relative to the caller. We are one frame
# beyond the caller.
local_offset = offset + 1
frame_records = tf_stack.extract_stack(
limit=local_offset + 1)
if not frame_records:
return self.FAILURE
if len(frame_records) > local_offset:
frame = frame_records[len(frame_records) - (local_offset + 1)]
self.filename = frame.filename
self.lineno = frame.lineno
return self.SUCCESS
else:
# If the offset is too large then we use the largest offset possible,
# meaning we use the outermost stack frame at index 0.
frame = frame_records[0]
self.filename = frame.filename
self.lineno = frame.lineno
return self.HEURISTIC_USED
def copy_metadata(self):
"""Return a TraceableObject like this one, but without the object."""
return self.__class__(None, filename=self.filename, lineno=self.lineno)
class TraceableStack(object):
"""A stack of TraceableObjects."""
def __init__(self, existing_stack=None):
"""Constructor.
Args:
existing_stack: [TraceableObject, ...] If provided, this object will
set its new stack to a SHALLOW COPY of existing_stack.
"""
self._stack = existing_stack[:] if existing_stack else []
def push_obj(self, obj, offset=0):
"""Add object to the stack and record its filename and line information.
Args:
obj: An object to store on the stack.
offset: Integer. If 0, the caller's stack frame is used. If 1,
the caller's caller's stack frame is used.
Returns:
TraceableObject.SUCCESS if appropriate stack information was found,
TraceableObject.HEURISTIC_USED if the stack was smaller than expected,
and TraceableObject.FAILURE if the stack was empty.
"""
traceable_obj = TraceableObject(obj)
self._stack.append(traceable_obj)
# Offset is defined in "Args" as relative to the caller. We are 1 frame
# beyond the caller and need to compensate.
return traceable_obj.set_filename_and_line_from_caller(offset + 1)
def pop_obj(self):
"""Remove last-inserted object and return it, without filename/line info."""
return self._stack.pop().obj
def peek_top_obj(self):
"""Return the most recent stored object."""
return self._stack[-1].obj
def peek_objs(self):
"""Return iterator over stored objects ordered newest to oldest."""
return (t_obj.obj for t_obj in reversed(self._stack))
def peek_traceable_objs(self):
"""Return iterator over stored TraceableObjects ordered newest to oldest."""
return reversed(self._stack)
def __len__(self):
"""Return number of items on the stack, and used for truth-value testing."""
return len(self._stack)
def copy(self):
"""Return a copy of self referencing the same objects but in a new list.
This method is implemented to support thread-local stacks.
Returns:
TraceableStack with a new list that holds existing objects.
"""
return TraceableStack(self._stack)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/traceable_stack.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for tensor_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import sys
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class TensorUtilTest(test.TestCase):
def testFloat(self):
value = 10.0
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape {}
float_val: %.1f
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array(value, dtype=np.float32), a)
def testFloatN(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTyped(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerce(self):
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerceNdarray(self):
arr = np.asarray([10, 20, 30], dtype="int")
t = tensor_util.make_tensor_proto(arr, dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatSizes(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[1, 3])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0, 20.0, 30.0]], dtype=np.float32), a)
def testFloatSizes2(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[3, 1])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0], [20.0], [30.0]], dtype=np.float32), a)
def testFloatSizesLessValues(self):
t = tensor_util.make_tensor_proto(10.0, shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
float_val: 10.0
""", t)
# No conversion to Ndarray for this one: not enough values.
def testFloatNpArrayFloat64(self):
t = tensor_util.make_tensor_proto(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64))
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "@$\000\000\000\000\000\000@4\000\000\000\000\000\000@>\000\000\000\000\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000$@\000\000\000\000\000\0004@\000\000\000\000\000\000>@"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float64, a.dtype)
self.assertAllClose(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64),
tensor_util.MakeNdarray(t))
def testFloatTypesWithImplicitRepeat(self):
for dtype, nptype in [(dtypes.float32, np.float32),
(dtypes.float64, np.float64)]:
t = tensor_util.make_tensor_proto([10.0], shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(
np.array(
[[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0]],
dtype=nptype),
a)
def testFloatMutateArray(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=dtypes.float32)
a = tensor_util.MakeNdarray(t)
a[0] = 5.0
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([5.0, 20.0, 30.0], dtype=np.float32), a)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
def testHalf(self):
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=np.float16))
self.assertProtoEquals("""
dtype: DT_HALF
tensor_shape {
dim {
size: 2
}
}
half_val: 18688
half_val: 19712
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float16, a.dtype)
self.assertAllClose(np.array([10.0, 20.0], dtype=np.float16), a)
def testBfloat16(self):
test_type = dtypes.bfloat16.as_numpy_dtype
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=test_type))
# 10.0: 16672 = 010000010(130) 0100000: (1+0/2+1/4) * 2^(130-127)
# 20.0: 16800 = 010000011(131) 0100000: (1+0/2+1/4) * 2^(131-127)
self.assertProtoEquals("""
dtype: DT_BFLOAT16
tensor_shape {
dim {
size: 2
}
}
half_val: 16672
half_val: 16800
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(test_type, a.dtype)
self.assertAllClose(np.array([10.0, 20.0], dtype=test_type), a)
def testInt(self):
t = tensor_util.make_tensor_proto(10)
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape {}
int_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int32), a)
def testLargeInt(self):
value = np.iinfo(np.int64).max
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testLargeNegativeInt(self):
# We don't use the min np.int64 value here
# because it breaks np.abs().
#
# np.iinfo(np.int64).min = -9223372036854775808
# np.iinfo(np.int64).max = 9223372036854775807
# np.abs(-9223372036854775808) = -9223372036854775808
value = np.iinfo(np.int64).min + 1
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testIntNDefaultType(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\000\000\000\n\000\000\000\024\000\000\000\036\000\000\000("
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\n\000\000\000\024\000\000\000\036\000\000\000(\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array([[10, 20], [30, 40]], dtype=np.int32), a)
def testIntTypes(self):
for dtype, nptype in [(dtypes.int32, np.int32),
(dtypes.uint8, np.uint8),
(dtypes.uint16, np.uint16),
(dtypes.int16, np.int16),
(dtypes.int8, np.int8)]:
# Test with array.
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtype)
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
# Test with ndarray.
t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype))
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
def testIntTypesWithImplicitRepeat(self):
for dtype, nptype in [(dtypes.int64, np.int64), (dtypes.int32, np.int32),
(dtypes.uint8, np.uint8), (dtypes.uint16, np.uint16),
(dtypes.int16, np.int16), (dtypes.int8, np.int8)]:
self.assertAllEqual(
np.array([[10, 11, 12, 12], [12, 12, 12, 12], [12, 12, 12, 12]],
dtype=nptype),
tensor_util.MakeNdarray(
tensor_util.make_tensor_proto([10, 11, 12],
shape=[3, 4],
dtype=dtype)))
def testIntMixedWithDimension(self):
# Github issue: 11974
dtype = dtypes.int32
nptype = np.int32
t = tensor_util.make_tensor_proto(
[10, tensor_shape.Dimension(20), 30], dtype=dtype)
self.assertEquals(dtype, t.dtype)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
def testLong(self):
t = tensor_util.make_tensor_proto(10, dtype=dtypes.int64)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int64), a)
def testLongN(self):
t = tensor_util.make_tensor_proto(
[10, 20, 30], shape=[1, 3], dtype=dtypes.int64)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([[10, 20, 30]], dtype=np.int64), a)
def testLongNpArray(self):
t = tensor_util.make_tensor_proto(np.array([10, 20, 30]))
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=np.int64), a)
def testQuantizedTypes(self):
# Test with array.
data = [(21,), (22,), (23,)]
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\025\000\000\000\026\000\000\000\027"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\000\000\026\000\000\000\027\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint32.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint8)
self.assertProtoEquals(r"""
dtype: DT_QUINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.quint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint8)
self.assertProtoEquals(r"""
dtype: DT_QINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint16)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.quint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint16)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
def testString(self):
t = tensor_util.make_tensor_proto("foo")
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape {}
string_val: "foo"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertEquals([b"foo"], a)
def testStringWithImplicitRepeat(self):
t = tensor_util.make_tensor_proto(["f", "g"], shape=[3, 4])
a = tensor_util.MakeNdarray(t)
self.assertAllEqual(
np.array([[b"f", b"g", b"g", b"g"], [b"g", b"g", b"g", b"g"],
[b"g", b"g", b"g", b"g"]],
dtype=np.object), a)
def testStringN(self):
t = tensor_util.make_tensor_proto([b"foo", b"bar", b"baz"], shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringNpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[b"a", b"ab"], [b"abc", b"abcd"]]))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a)
def testArrayMethod(self):
class Wrapper(object):
def __array__(self):
return np.array([b"foo", b"bar", b"baz"])
t = tensor_util.make_tensor_proto(Wrapper(), shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testArrayInterface(self):
class Wrapper(object):
@property
def __array_interface__(self):
return np.array([b"foo", b"bar", b"baz"]).__array_interface__
t = tensor_util.make_tensor_proto(Wrapper(), shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringTuple(self):
t = tensor_util.make_tensor_proto((b"a", b"ab", b"abc", b"abcd"))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 4 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array((b"a", b"ab", b"abc", b"abcd")), a)
def testStringNestedTuple(self):
t = tensor_util.make_tensor_proto(((b"a", b"ab"), (b"abc", b"abcd")))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array(((b"a", b"ab"), (b"abc", b"abcd"))), a)
def testComplex64(self):
t = tensor_util.make_tensor_proto((1 + 2j), dtype=dtypes.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape {}
scomplex_val: 1
scomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplex128(self):
t = tensor_util.make_tensor_proto((1 + 2j), dtype=dtypes.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape {}
dcomplex_val: 1
dcomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplexWithImplicitRepeat(self):
for dtype, np_dtype in [(dtypes.complex64, np.complex64),
(dtypes.complex128, np.complex128)]:
t = tensor_util.make_tensor_proto((1 + 1j), shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(
np.array(
[[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)]],
dtype=np_dtype),
a)
def testComplex64N(self):
t = tensor_util.make_tensor_proto(
[(1 + 2j), (3 + 4j), (5 + 6j)], shape=[1, 3], dtype=dtypes.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 1 } dim { size: 3 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1 + 2j), (3 + 4j), (5 + 6j)]]), a)
def testComplex128N(self):
t = tensor_util.make_tensor_proto(
[(1 + 2j), (3 + 4j), (5 + 6j)], shape=[1, 3], dtype=dtypes.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 1 } dim { size: 3 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array([[(1 + 2j), (3 + 4j), (5 + 6j)]]), a)
def testComplex64NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]),
dtype=dtypes.complex64)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 2 } dim { size: 2 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
scomplex_val: 7
scomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]), a)
def testComplex128NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]),
dtype=dtypes.complex128)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 2 } dim { size: 2 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
dcomplex_val: 7
dcomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]), a)
def testUnsupportedDTypes(self):
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(np.array([1]), 0)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(3, dtype=dtypes.qint8)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto([3], dtype=dtypes.qint8)
# Validate the helpful error message when trying to convert an
# unconvertible list as strings.
with self.assertRaisesRegexp(TypeError, "Failed to convert object"):
tensor_util.make_tensor_proto([tensor_shape.Dimension(1)])
def testTensorShapeVerification(self):
array = np.array([[1], [2]])
correct_shape = (2, 1)
incorrect_shape = (1, 2)
tensor_util.make_tensor_proto(array, shape=correct_shape, verify_shape=True)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(
array, shape=incorrect_shape, verify_shape=True)
def testShapeTooLarge(self):
with self.assertRaises(ValueError):
tensor_util.make_tensor_proto(np.array([1, 2]), shape=[1])
def testLowRankSupported(self):
t = tensor_util.make_tensor_proto(np.array(7))
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 7
""", t)
def testShapeEquals(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
self.assertTrue(tensor_util.ShapeEquals(t, [2, 2]))
self.assertTrue(tensor_util.ShapeEquals(t, (2, 2)))
self.assertTrue(
tensor_util.ShapeEquals(t, tensor_shape.as_shape([2, 2]).as_proto()))
self.assertFalse(tensor_util.ShapeEquals(t, [5, 3]))
self.assertFalse(tensor_util.ShapeEquals(t, [1, 4]))
self.assertFalse(tensor_util.ShapeEquals(t, [4]))
class IsTensorTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testConstantTensor(self):
np_val = np.random.rand(3).astype(np.int32)
tf_val = constant_op.constant(np_val)
self.assertFalse(tensor_util.is_tensor(np_val))
self.assertTrue(tensor_util.is_tensor(tf_val))
class ConstantValueTest(test.TestCase):
def testConstant(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
@test_util.run_deprecated_v1
def testUnknown(self):
tf_val = gen_state_ops.variable(
shape=[3, 4, 7],
dtype=dtypes.float32,
name="tf_val",
container="",
shared_name="")
self.assertIs(None, tensor_util.constant_value(tf_val))
def testShape(self):
np_val = np.array([1, 2, 3], dtype=np.int32)
tf_val = array_ops.shape(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.int32, c_val.dtype)
def testFill(self):
np_val = np.array([-1, -1, -1], dtype=np.float32)
tf_val = array_ops.fill([3], constant_op.constant(-1.0))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.float32, c_val.dtype)
def testSize(self):
tf_val = array_ops.size(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(6, c_val)
@test_util.run_deprecated_v1
def testSizeOfScalar(self):
tf_val = array_ops.size(constant_op.constant(0.0))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(1, c_val)
self.assertEqual(np.ndarray, type(c_val))
@test_util.run_deprecated_v1
def testRank(self):
tf_val = array_ops.rank(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(np.ndarray, type(c_val))
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
# Repeat test using array_ops.rank_internal to avoid the optimization that
# happens in the rank function.
tf_val = array_ops.rank_internal(
constant_op.constant(
0.0, shape=[1, 2, 3]), optimize=False)
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(np.ndarray, type(c_val))
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
self.assertEqual([3], c_val)
def testCast(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
@test_util.run_deprecated_v1
def testConcat(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = array_ops.concat(
[np_val[0:1, :, :], np_val[1:2, :, :], np_val[2:3, :, :]], 0)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = array_ops.concat(
[np_val[0, :, :], np_val[1, :, :], np_val[2, :, :]],
array_ops.placeholder(dtypes.int32))
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
tf_val = array_ops.concat([
np_val[0, :, :], array_ops.placeholder(dtypes.float32), np_val[2, :, :]
], 1)
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
@test_util.run_deprecated_v1
def testPack_Axis0(self):
inputs = [np.random.rand(4, 7) for _ in range(3)]
np_val = np.array(inputs)
tf_val = array_ops.stack(inputs)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = array_ops.stack(
[inputs[0], array_ops.placeholder(dtypes.float32), inputs[2]])
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
@test_util.run_deprecated_v1
def testPack_Axis1(self):
inputs = [np.random.rand(4, 7) for _ in range(3)]
tf_val = array_ops.stack(inputs, axis=1)
c_val = tensor_util.constant_value(tf_val)
self.assertIsNone(c_val)
tf_val = array_ops.stack(
[inputs[0], array_ops.placeholder(dtypes.float32), inputs[2]], axis=1)
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
@test_util.run_deprecated_v1
def testPack_Partial_Axis0(self):
input_ = np.random.rand(4, 7)
tf_val = array_ops.stack([input_, array_ops.placeholder(dtypes.float32)])
c_val = tensor_util.constant_value(tf_val, partial=True)
self.assertAllClose(input_, c_val[0])
self.assertIsNone(c_val[1])
@test_util.run_deprecated_v1
def testPack_Partial_Axis1(self):
input_ = np.random.rand(4, 7)
tf_val = array_ops.stack([input_, array_ops.placeholder(dtypes.float32)],
axis=1)
c_val = tensor_util.constant_value(tf_val, partial=True)
self.assertIsNone(c_val)
def testEqual(self):
# Scalar inputs.
tf_val = math_ops.equal(constant_op.constant(1), constant_op.constant(1))
self.assertEqual(tensor_util.constant_value(tf_val), True)
tf_val = math_ops.equal(constant_op.constant(1), constant_op.constant(0))
self.assertEqual(tensor_util.constant_value(tf_val), False)
# Shaped inputs with broadcast semantics.
tf_val = math_ops.equal(constant_op.constant([[0, 1]]),
constant_op.constant([[0], [1]]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(c_val, [[True, False], [False, True]])
def testNotEqual(self):
# Scalar inputs.
tf_val = math_ops.not_equal(constant_op.constant(1),
constant_op.constant(1))
self.assertEqual(tensor_util.constant_value(tf_val), False)
tf_val = math_ops.not_equal(constant_op.constant(1),
constant_op.constant(0))
self.assertEqual(tensor_util.constant_value(tf_val), True)
# Shaped inputs with broadcast semantics.
tf_val = math_ops.not_equal(constant_op.constant([[0, 1]]),
constant_op.constant([[0], [1]]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(c_val, [[False, True], [True, False]])
def testLiteral(self):
x = "hi"
self.assertIs(x, tensor_util.constant_value(x))
def testNumpyNdarray(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
self.assertIs(np_val, tensor_util.constant_value(np_val))
def testVariable(self):
var = variables.Variable(1.0, name="variable_node")
self.assertIsNone(tensor_util.constant_value(var))
def testVariableV1(self):
var = variables.VariableV1(1.0, name="variable_node")
self.assertIsNone(tensor_util.constant_value(var))
class ConstantValueAsShapeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testConstant(self):
np_val = np.random.rand(3).astype(np.int32)
tf_val = constant_op.constant(np_val)
self.assertEqual(
tensor_shape.TensorShape(np_val),
tensor_util.constant_value_as_shape(tf_val))
tf_val = constant_op.constant([], dtype=dtypes.int32)
self.assertEqual(
tensor_shape.TensorShape([]),
tensor_util.constant_value_as_shape(tf_val))
@test_util.run_in_graph_and_eager_modes
def testShape(self):
tf_val = array_ops.shape(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual(tensor_shape.TensorShape([1, 2, 3]), c_val)
@test_util.run_in_graph_and_eager_modes
def testMinusOneBecomesNone(self):
tf_val = constant_op.constant([-1, 1, -1], shape=[3])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, 1, None], c_val.as_list())
@test_util.run_deprecated_v1
def testPack(self):
tf_val = array_ops.stack(
[constant_op.constant(16), 37, array_ops.placeholder(dtypes.int32)])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None], c_val.as_list())
@test_util.run_deprecated_v1
def testConcat(self):
tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(2,))], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, None], c_val.as_list())
tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(1,)), [48]], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
@test_util.run_deprecated_v1
def testSlice(self):
tf_val = array_ops.placeholder(dtypes.int32, shape=(4,))[0:2]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, None], c_val.as_list())
# begin:end
tf_val = constant_op.constant([10, 20, 30])[1:3]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([20, 30], c_val.as_list())
# begin:end:stride
tf_val = array_ops.strided_slice(
constant_op.constant([10, 20, 30]), [1], [3], strides=[2])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([20], c_val.as_list())
# [1, 2, 16, 37, None, 48]
tf_val_orig = array_ops.concat(
[[1, 2, 16, 37], array_ops.placeholder(
dtypes.int32, shape=(1,)), [48]], 0)
# begin: no end
tf_val = tf_val_orig[2:]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
# begin::negative slice
tf_val = tf_val_orig[2::-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 2, 1], c_val.as_list())
# :end:negative slice
tf_val = tf_val_orig[:1:-2]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([48, 37], c_val.as_list())
# begin:end:negative slice
tf_val = tf_val_orig[3:1:-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16], c_val.as_list())
# begin:negative end:slice
tf_val = tf_val_orig[1:-3:1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([2, 16], c_val.as_list())
# negative begin::slice
tf_val = tf_val_orig[-3::1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, None, 48], c_val.as_list())
# negative begin::negative slice
tf_val = tf_val_orig[-3::-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16, 2, 1], c_val.as_list())
# negative begin:negative end:negative slice
tf_val = tf_val_orig[-3:-5:-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16], c_val.as_list())
# Do not support shape inference for additional arguments
tf_val = constant_op.constant([10, 20, 30])[...]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, None, None], c_val.as_list())
# Do not support shape inference for tensor slices.
tf_val = constant_op.constant([10, 20, 30])[
array_ops.placeholder(dtypes.int32, shape=()):]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual(tensor_shape.unknown_shape(), c_val)
# Do not support shape inference for higher rank
with self.assertRaises(ValueError):
tf_val = constant_op.constant([[10], [20], [30]])[:, 0:]
c_val = tensor_util.constant_value_as_shape(tf_val)
class MaybeSetStaticShapeTest(test.TestCase):
@contextlib.contextmanager
def disableSetStaticShape(self):
flag_old = tensor_util._ENABLE_MAYBE_SET_STATIC_SHAPE
tensor_util._ENABLE_MAYBE_SET_STATIC_SHAPE = False
try:
yield
finally:
tensor_util._ENABLE_MAYBE_SET_STATIC_SHAPE = flag_old
@test_util.run_deprecated_v1
def testMaybeSetStaticShape(self):
shape = constant_op.constant([2, 5], dtype=dtypes.int32)
def reshape():
v = array_ops.zeros([10])
return array_ops.reshape(v, shape)
with self.disableSetStaticShape():
graph_without_shape_propagation = func_graph.func_graph_from_py_func(
"without_shape_propagation", reshape, [], {})
graph_with_shape_propagation = func_graph.func_graph_from_py_func(
"with_shape_propagation", reshape, [], {})
self.assertCountEqual(
[op.type for op in graph_without_shape_propagation.get_operations()],
[op.type for op in graph_with_shape_propagation.get_operations()])
@test_util.run_deprecated_v1
def testMaybeSetStaticShapeScalarShape(self):
def reshape():
v = array_ops.placeholder(dtypes.float32)
t = array_ops.reshape(v, [-1])
return t
with self.disableSetStaticShape():
graph_without_shape_propagation = func_graph.func_graph_from_py_func(
"without_shape_propagation", reshape, [], {})
graph_with_shape_propagation = func_graph.func_graph_from_py_func(
"with_shape_propagation", reshape, [], {})
self.assertCountEqual(
[op.type for op in graph_without_shape_propagation.get_operations()],
[op.type for op in graph_with_shape_propagation.get_operations()])
class ShapeTensorTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testConversion(self):
"""Make sure fully known TensorShape objects convert to Tensors."""
shape = tensor_shape.TensorShape([1, tensor_shape.Dimension(2)])
shape_tensor = tensor_util.shape_tensor(shape)
self.assertAllEqual((1, 2), shape_tensor)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/tensor_util_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Python front-end supports for functions.
NOTE: At this time, functions are experimental and subject to change!. Proceed
with caution.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.eager import context
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import compat
from tensorflow.python.util import function_utils
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
class Defun(object):
"""Decorator used to define TensorFlow functions.
Use this decorator to make a Python function usable directly as a TensorFlow
function.
The decorated function must add ops to the default graph and return zero or
more `Tensor` objects. Call the decorator with named arguments, one for each
argument of the function to decorate, with the expected type of the argument
as value.
For example if the function to decorate accepts two `tf.float32` arguments
named `x` and `y`, call the decorator with:
@Defun(tf.float32, tf.float32)
def foo(x, y):
...
When you call the decorated function, it adds the `call` ops to the
default graph. In addition, it adds the definition of the function into the
default graph. Because the addition of the function into the graph
is deferred, the decorator can be used anywhere in the program.
Any variables created inside of the function are hoisted into the outer graph.
Note that the variables are created in the variable scope that was active
during the first call to the function. Subsequent function calls will refer to
the same set of variables.
Definitions of functions in a graph are frozen as soon as the graph is used to
create a session. However, new functions and new calls to existing functions
may be added to the graph, with the new functions themselves becoming
immediately frozen.
Example, but also see the [How To on functions](link_needed).
```python
# Defining the function.
@tf.Defun(tf.float32, tf.float32)
def MyFunc(x, y):
return x + y, x - y
# Building the graph.
a = tf.constant([1.0])
b = tf.constant([2.0])
c, d = MyFunc(a, b, name='mycall')
```
"""
def __init__(self, *input_types, **kwargs):
"""Create a `Defun` decorator.
Args:
*input_types: A list of `tf.DType`
**kwargs: Optional keyword arguments, including
func_name - (optional). A python string, the name to use to
declare this `Function` in the graph.
grad_func - (optional). A function implementing the gradient
of the function-to-register. This is must be a
`_DefinedFunction` object. The gradient
function must satisfy the criterion defined in
function.proto:GradientDef.
python_grad_func - (optional). A function implementing the
gradient of the function python-side. This function must
take the current op and the gradients w.r.t. its outputs,
and return the gradients w.r.t. the inputs. That is it must
implement the interface expected by `tf.RegisterGradient`).
This will be called by tf.gradients to add the gradient ops
to the graph. At most one of grad_func and python_grad_func
can be specified.
out_names = (optional). A list of strings, one per output
tensor.
shape_func - (optional). A function taking the op and returning a list
of static shapes to set for the function's outputs.
"""
self._input_types = input_types
self._func_name = kwargs.pop("func_name", None)
self._grad_func = kwargs.pop("grad_func", None)
self._python_grad_func = kwargs.pop("python_grad_func", None)
self._out_names = kwargs.pop("out_names", None)
self._extra_kwargs = kwargs
def __call__(self, func):
# Various sanity checks on the callable func.
if not callable(func):
raise ValueError("function %s must be callable" % func)
# Func should not use kwargs and defaults.
argspec = tf_inspect.getargspec(func)
if argspec.keywords or argspec.defaults:
raise ValueError(
"function with argument defaults or keywords arguments are not"
" supported. {} has defaults {} and keywords {}.".format(
func, argspec.defaults, argspec.keywords))
# Computes how many arguments 'func' has.
min_args = len(argspec.args)
max_args = min_args
if argspec.varargs:
max_args = 1000000
argnames = argspec.args
if tf_inspect.ismethod(func):
# 1st argument is the "class" type.
min_args -= 1
argnames = argnames[1:]
if self._input_types:
# If Defun is given a list of types for the inputs, the number
# of input types should be compatible with 'func'.
num = len(self._input_types)
if num < min_args or num > max_args:
raise ValueError(
"The function has fewer arguments than the number of specified "
"input types.")
return _DefinedFunction(
func,
argnames,
self._input_types,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# 'func' expects no arguments and input types is an empty list.
if min_args == 0 and max_args == 0:
return _DefinedFunction(
func, [], [],
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# Input types are unknown. It's an overloaded function and hence
# its definition needs to be deferred until it's called.
return _OverloadedFunction(
func,
argnames,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
class _DefinedFunctionDeleter(object):
"""Unregister function from eager context."""
def __init__(self, name):
self.name = name
def __del__(self):
try:
context.remove_function(self.name)
except TypeError:
# Suppress some exceptions, mainly for the case when we're running on
# module deletion. Things that can go wrong include the context module
# already being unloaded, self._handle._handle_data no longer being
# valid, and so on. Printing warnings in these cases is silly
# (exceptions raised from __del__ are printed as warnings to stderr).
pass # 'NoneType' object is not callable when the handle has been
# partially unloaded.
except AttributeError:
pass # 'NoneType' object has no attribute 'eager_mode' when context has
# been unloaded. Will catch other module unloads as well.
class _DefinedFunction(object):
"""_DefinedFunction encapsulates a function definition and its properties.
Attributes:
name: The function name.
definition: The definition of this function. A FunctionDef proto.
grad_func_name: If not None, the name of this function's gradient function.
python_grad_func: A python callable implementing the gradient of
the function python-side.
"""
def __init__(self,
func,
argnames,
input_types,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
shape_func=None,
capture_by_value=False,
whitelisted_stateful_ops=None,
capture_resource_var_by_value=True,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
input_types: The function's argument types. Can be a tuple, list of
tf data types.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: An optional list of strings for the function return value
names.
shape_func: An optional function mapping an op to a list of static
output shapes.
capture_by_value: Boolean (defaults to False). If True, captured values
will be copied into the function body.
whitelisted_stateful_ops: A set of ops that if stateful we ignore and
copy into the function body, when `capture_by_value` is True.
capture_resource_var_by_value: Boolean (defaults to True). If False,
captured resource variable returns the handle instead of value.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._input_types = input_types
self._func_name = func_name
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._shape_func = shape_func
self._capture_by_value = capture_by_value
self._whitelisted_stateful_ops = whitelisted_stateful_ops
if self._whitelisted_stateful_ops is None:
self._whitelisted_stateful_ops = set()
self._capture_resource_var_by_value = capture_resource_var_by_value
self._extra_kwargs = kwargs
# Constructed only when C API is disabled, lazily
self._definition = None
# Constructed only when C API is enabled, lazily
self._c_func = None
self._function_deleter = None
self._sub_functions = {} # Constructed with _definition or _c_func
# pylint: disable=protected-access
device_funcs = ops.get_default_graph()._device_functions_outer_to_inner
# pylint: enable=protected-access
# Get the innermost device if possbile.
self._caller_device = device_funcs[-1] if device_funcs else None
# Cached OpDef for this function. When C API is enabled, this is
# the only part of FunctionDef that we cache in Python. When C API
# is disabled the whole _definition is available and this is simply
# another reference to _definition.signature
self._op_def = None
assert isinstance(input_types, (list, tuple))
self._arg_types = input_types
self._arg_names = [argnames[i] if i < len(argnames) else ("arg%d" % i)
for i in range(len(input_types))]
@property
def name(self):
"""Function name."""
self._create_definition_if_needed()
return self._func_name
@property
def definition(self):
"""Function definition proto."""
self._create_definition_if_needed()
if self._c_func:
with c_api_util.tf_buffer() as buf:
c_api.TF_FunctionToFunctionDef(self._c_func.func, buf)
fdef = function_pb2.FunctionDef()
proto_data = c_api.TF_GetBuffer(buf)
fdef.ParseFromString(compat.as_bytes(proto_data))
with ops.init_scope():
if context.executing_eagerly():
context.add_function(self._c_func.func)
self._function_deleter = _DefinedFunctionDeleter(
fdef.signature.name)
return fdef
return self._definition
@property
def _signature(self):
self._create_definition_if_needed()
return self._op_def
def set_grad_func(self, grad_func):
"""Specifies the gradient function of this function."""
assert not self._grad_func
assert isinstance(grad_func, _DefinedFunction)
self._grad_func = grad_func
@property
def grad_func_name(self):
"""Returns the name of the gradient function."""
return self._grad_func.name if self._grad_func else None
@property
def python_grad_func(self):
"""Python gradient function callable."""
return self._python_grad_func
@property
def declared_input_types(self):
"""Returns the list of data types of explicit declared inputs."""
return self._input_types
@property
def captured_inputs(self):
"""Returns the list of implicitly captured inputs."""
self._create_definition_if_needed()
return self._extra_inputs
@property
def stateful_ops(self):
"""Returns the list of stateful ops in function definition.
Returns:
A list of (op.name, op.type) pairs.
"""
self._create_definition_if_needed()
return self._stateful_ops
def _create_definition_if_needed(self):
"""Creates the function definition if it's not created yet."""
with context.graph_mode():
self._create_definition_if_needed_impl()
def _create_definition_if_needed_impl(self):
"""This is not what you want, see _create_definition_if_needed."""
if self._definition is not None or self._c_func is not None:
return
# Copy variable collections (by reference) from the parent graph such that
# name based variable sharing (e.g. via tf.make_template) works between the
# func graph and parent graph.
variable_keys = []
variable_keys.extend(ops.GraphKeys._VARIABLE_COLLECTIONS) # pylint: disable=protected-access
variable_keys.append(vs._VARSTORE_KEY) # pylint: disable=protected-access
collections_ref = {}
parent_collections_ref = ops.get_default_graph()._collections # pylint: disable=protected-access
for key in variable_keys:
if key not in parent_collections_ref:
parent_collections_ref[key] = collections_ref[key] = []
else:
collections_ref[key] = parent_collections_ref[key]
temp_graph = func_graph_from_py_func(
self._func,
self._arg_names,
self._arg_types,
self._func_name,
self._capture_by_value,
self._caller_device,
collections_ref=collections_ref,
whitelisted_stateful_ops=self._whitelisted_stateful_ops,
capture_resource_var_by_value=self._capture_resource_var_by_value)
self._extra_inputs = temp_graph.extra_inputs
# pylint: disable=protected-access
self._sub_functions = temp_graph._functions
# pylint: enable=protected-access
# Extra kwargs are treated as attrs on the function def.
if self._func_name:
base_func_name = self._func_name
else:
base_func_name = function_utils.get_func_name(self._func)
if self._grad_func:
base_func_name += ("_%s" % self._grad_func.name)
kwargs_attr = _parse_kwargs_as_attrs(base_func_name, **self._extra_kwargs)
if not temp_graph._c_graph: # pylint: disable=protected-access
# Build the FunctionDef
self._definition = graph_to_function_def.graph_to_function_def(
temp_graph,
temp_graph.get_operations(),
temp_graph.inputs,
temp_graph.outputs,
out_names=self._out_names)
for k in kwargs_attr:
self._definition.attr[k].CopyFrom(kwargs_attr[k])
# Hash the definition and its dependencies.
self._hash_str = self._create_hash_str(
self._definition.signature.input_arg,
self._definition.signature.output_arg, self._definition.node_def)
# Finally, we decide the function name to use. If not specified,
# make up something which is almost certainly unique (but deterministic).
if not self._func_name:
self._func_name = "_".join([base_func_name, self._hash_str])
self._definition.signature.name = self._func_name
if self._func.__doc__:
self._definition.signature.description = self._func.__doc__
self._op_def = self._definition.signature
else: # C API is enabled
output_names = ([compat.as_bytes(x) for x in self._out_names]
if self._out_names else [])
description = self._func.__doc__ or None
# pylint: disable=protected-access
c_func = c_api.TF_GraphToFunction_wrapper(
temp_graph._c_graph,
base_func_name,
self._func_name is None, # append_hash_to_fn_name
None, # opers
[t._as_tf_output() for t in temp_graph.inputs],
[t._as_tf_output() for t in temp_graph.outputs],
output_names,
[], # control_outputs
[], # control_output_names
None, # opts
description)
self._c_func = c_api_util.ScopedTFFunction(c_func)
# pylint: enable=protected-access
self._set_c_attrs(kwargs_attr)
# Set cached fields: _op_def and _func_name (if not already set)
self._op_def = self.definition.signature
if self._func_name:
assert self._func_name == self._op_def.name
else:
self._func_name = compat.as_str(self._op_def.name)
self._stateful_ops = [(op.name, op.type)
for op in temp_graph.get_operations()
if op._is_stateful] # pylint: disable=protected-access
def _set_c_attrs(self, attrs):
"""Sets `attrs` as attributes of self._c_func.
Requires that self._c_func is not None.
Args:
attrs: a dictionary from attribute name to attribute proto value
"""
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_FunctionSetAttrValueProto(self._c_func.func, compat.as_str(name),
serialized)
def _create_hash_str(self, input_arg, output_arg, node_def):
"""Creates an 8-character string unique to this input.
Args:
input_arg: the input_arg field of an OpDef
(e.g. self._definition.signature.input_arg)
output_arg: the output_arg field of an OpDef
(e.g. self._definition.signature.output_arg)
node_def: the node_def field of a FunctionDef
(e.g. self._definition.node_def)
Returns:
The unique string for this input
"""
hasher = hashlib.sha1()
def update_num(n):
hasher.update(compat.as_bytes("%x" % n))
def update_str(s):
update_num(len(s))
hasher.update(compat.as_bytes(s))
def update_strs(slist):
update_num(len(slist))
for s in slist:
update_str(s)
for adef in input_arg:
update_str(adef.SerializeToString())
for adef in output_arg:
update_str(adef.SerializeToString())
for n in sorted(node_def, key=lambda n: n.name):
update_str(n.name)
update_str(n.op)
update_strs(n.input)
update_num(len(n.attr))
# NOTE: protobuf map serialization does not guarantee ordering.
for k in sorted(n.attr):
update_str(k)
update_str(n.attr[k].SerializeToString())
return hasher.hexdigest()[:8]
def add_to_graph(self, g):
"""Adds this function into the graph g."""
self._create_definition_if_needed()
# Adds this function into 'g'.
# pylint: disable=protected-access
if context.executing_eagerly():
context.context().add_function_def(self.definition)
else:
g._add_function(self)
# pylint: enable=protected-access
# Ensures related sub-routines are defined in 'g', too.
for f in self._sub_functions.values():
f.add_to_graph(g)
# Adds its gradient function, too.
if self._grad_func:
self._grad_func.add_to_graph(g)
def __call__(self, *args, **kwargs):
self.add_to_graph(ops.get_default_graph())
args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs
ret, op = _call(self._signature, *args, **kwargs)
# Set a hidden attr in 'op' so that gradients_impl can refer back
# to this _DefinedFunction instance to access python_grad_func.
assert isinstance(op, ops.Operation)
setattr(op, "__defun", self)
if self._shape_func is not None:
shapes = self._shape_func(op)
if len(shapes) != len(op.outputs):
raise ValueError("shape_func produced %d shapes for %d outputs" %
(len(shapes), len(op.outputs)))
for (t, shape) in zip(op.outputs, shapes):
t.set_shape(shape)
return ret
class _OverloadedFunction(object):
"""_OverloadedFunction encapsulates an overloaded function.
_OverloadedFunction maintains a mapping from input types to
instantiated _DefinedFunction in self._overload.
"""
def __init__(self,
func,
argnames,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: A list of strings for the function return value names.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._argnames = argnames
self._func_name = func_name
assert grad_func is None or isinstance(grad_func, _OverloadedFunction)
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._extra_kwargs = kwargs
self._overload = {}
def instantiate(self, input_types):
"""Instantiate this function given input argument types.
Args:
input_types: A list of data types for the inputs.
Returns:
_DefinedFunction for the given input types.
"""
# Stringify the type list.
key = _type_list_to_str(input_types)
defined = self._overload.get(key)
if not defined:
# If not defined yet, define the function given the input types.
name = self._func_name
if name is not None:
name = "_".join([name, key])
defined = _DefinedFunction(
self._func,
self._argnames,
input_types,
name,
None,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
_ = defined.name # Fully instantiate the function definition.
if self._grad_func:
# If _grad_func is given, it is another
# _OverloadedFunction. We need to instantiate it with the
# right input types.
output_types = [
dtypes.DType(_.type) for _ in defined._signature.output_arg # pylint: disable=protected-access
]
# pylint: disable=protected-access
defined._grad_func = self._grad_func.instantiate(input_types +
output_types)
# pylint: enable=protected-access
self._overload[key] = defined
return defined
def __call__(self, *args, **kwargs):
input_types = []
args = list(args)
for (i, x) in enumerate(args):
x = ops.convert_to_tensor(x)
if not isinstance(x, ops.Tensor):
raise ValueError("Expect a Tensor but get ", x)
input_types.append(x.dtype)
args[i] = x
return self.instantiate(input_types)(*args, **kwargs)
class _FuncGraph(ops.Graph):
"""A helper for constructing a function.
_FuncGraph overrides ops.Graph's create_op() so that we can keep
track of all inputs into every op created inside the function. If
any input is from other graphs, we keep track of it in self.capture
and substitute the input with a place holder.
Each captured input's corresponding place holder is converted into a
function argument and the caller passes in the captured tensor.
"""
def __init__(self, name, capture_by_value, whitelisted_stateful_ops,
capture_resource_var_by_value, *args, **kwargs):
super(_FuncGraph, self).__init__(*args, **kwargs)
self._capture_by_value = capture_by_value
self._whitelisted_stateful_ops = whitelisted_stateful_ops
self._capture_resource_var_by_value = capture_resource_var_by_value
self._building_function = True
self._outer_graph = ops.get_default_graph()
self._vscope = vs.get_variable_scope()
self._old_custom_getter = self._vscope.custom_getter
# The name of the function.
self.name = name
# Placeholder tensors representing the inputs to this function. The tensors
# are in this _FuncGraph.
self.inputs = []
# Tensors that will be returned this function. The tensors are in this
# _FuncGraph.
self.outputs = []
# Maps external tensor -> internal tensor (e.g. input placeholder).
self._captured = object_identity.ObjectIdentityDictionary()
# The external tensors that have been captured as inputs and must be passed
# to this function (empty if capturing by value, otherwise these are the
# keys of _captured).
self.extra_inputs = []
# Input placeholders that been added for captured values (empty if capturing
# by value).
self.extra_args = []
# Captured variables.
# TODO(skyewm): is this needed?
self.extra_vars = []
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Overridden from `tf.Graph` to update both the init_scope container
and the present inner container. This is necessary to make sure setting
containers applies correctly both to created variables and to stateful
ops.
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
# pylint: disable=protected-access
with ops.init_scope():
original_init_container = ops.get_default_graph()._container
try:
self._container = container_name
with ops.init_scope():
ops.get_default_graph()._container = container_name
yield self._container
finally:
self._container = original_container
with ops.init_scope():
ops.get_default_graph()._container = original_init_container
# pylint: enable=protected-access
# pylint: enable=g-doc-return-or-yield
def getvar(
self,
getter,
name,
shape=None,
dtype=None,
initializer=None,
reuse=None,
trainable=True,
collections=None, # pylint: disable=redefined-outer-name
use_resource=None,
**kwargs):
"""A custom variable getter."""
# Here, we switch the default graph to the outer graph and ask the
# variable scope in which the function is defined to give us the
# variable. The variable is stashed in extra_vars and returned to
# the caller.
#
# We capture these variables so that the variable definition is
# hoisted upward to the outer most graph.
with self._outer_graph.as_default():
# pylint: disable=protected-access
var = self._vscope.get_variable(
vs._get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
reuse=reuse,
trainable=trainable,
collections=collections,
use_resource=use_resource)
self.extra_vars.append(var)
if (isinstance(var, resource_variable_ops.BaseResourceVariable) and
self._capture_resource_var_by_value):
# For resource-based variables read the variable outside the function
# and pass in the value. This ensures that the function is pure and
# differentiable. TODO(apassos) this may have performance problems if
# the function will only do embedding lookups on the variable.
return var.value()
return var
def create_op(self, op_type, inputs, dtypes=None, **kwargs): # pylint: disable=redefined-outer-name
for i, x in enumerate(inputs):
if isinstance(x, ops.EagerTensor) or x.graph is not self:
inputs[i] = self.capture(x)
return super(_FuncGraph, self).create_op(op_type, inputs,
dtypes=dtypes, **kwargs)
def capture(self, tensor, name=None):
"""Adds the given tensor to this graph and returns the captured tensor."""
if tensor in self._captured:
# Captured already.
return self._captured[tensor]
elif self._capture_by_value:
return self._add_tensor_and_parents(tensor)
else:
return self._capture_tensor_as_extra_input(tensor, name)
def _capture_tensor_as_extra_input(self, tensor, name=None):
# Substitute with a placeholder.
self.extra_inputs.append(tensor)
# Hoist the new input placeholder out of any control flow context
# we're currently in.
with ops.control_dependencies(None):
ph = array_ops.placeholder(
tensor.dtype, shape=tensor.get_shape(), name=name)
# pylint: disable=protected-access
if isinstance(tensor, ops.EagerTensor):
handle_data = tensor._handle_data
if handle_data:
handle_data = handle_data.SerializeToString()
else:
handle_data = c_api.GetHandleShapeAndType(tensor.graph._c_graph,
tensor._as_tf_output())
if handle_data:
c_api.SetHandleShapeAndType(ph.graph._c_graph, ph._as_tf_output(),
compat.as_bytes(handle_data))
# pylint: enable=protected-access
self.inputs.append(ph)
self._captured[tensor] = ph
self.extra_args.append(ph)
if _is_guaranteed_const(tensor):
with ops.control_dependencies(None):
return array_ops.guarantee_const(ph)
else:
return ph
def _add_tensor_and_parents(self, tensor):
op = self._add_op_and_parents(tensor.op)
return op.outputs[tensor.value_index]
def _add_op_and_parents(self, op):
# pylint: disable=protected-access
op_def = graph_to_function_def._get_op_def(op)
if op._is_stateful and op not in self._whitelisted_stateful_ops:
raise ValueError("Cannot capture a stateful node (name:%s, type:%s) "
"by value." % (op.name, op.type))
elif op.type in ("Placeholder", "PlaceholderV2"):
raise ValueError("Cannot capture a placeholder (name:%s, type:%s) "
"by value." % (op.name, op.type))
# pylint: enable=protected-access
captured_inputs = [self._add_tensor_and_parents(x) for x in op.inputs]
captured_op = self.create_op(
op.type,
captured_inputs, [o.dtype for o in op.outputs],
name=op.name,
attrs=op.node_def.attr,
op_def=op_def)
for t, captured_t in zip(op.outputs, captured_op.outputs):
self._captured[t] = captured_t
return captured_op
def func_graph_from_py_func(func,
arg_names,
arg_types,
name=None,
capture_by_value=False,
device=None,
colocation_stack=None,
container=None,
collections_ref=None,
arg_shapes=None,
whitelisted_stateful_ops=None,
capture_resource_var_by_value=True):
"""Returns a _FuncGraph generated from `func`.
Args:
func: A Python callable which constructs a TF function body. The arguments
must correspond to `arg_types`. Returns a value or list/tuple of values.
No returned value can be None.
arg_names: A sequence of strings for the function argument names.
arg_types: A sequence of the function's argument types.
name: The function name. If None, the name is derived from `func`.
capture_by_value: boolean. If True, captured values will be copied into the
function body.
device: device name or function.
colocation_stack: A colocation stack (list) the _FuncGraph should use.
container: A container name the _FuncGraph should start with.
collections_ref: A reference to a collections dict the _FuncGraph should
use internally.
arg_shapes: A sequence of the function's argument shapes.
whitelisted_stateful_ops: A set of ops that if stateful we ignore and
re-create.
capture_resource_var_by_value: Boolean (defaults to True). If False,
captured resource variable returns the handle instead of value.
Returns:
A _FuncGraph.
Raises:
ValueError: if func returns None.
"""
if not name:
name = function_utils.get_func_name(func)
func_graph = _FuncGraph(name, capture_by_value, whitelisted_stateful_ops,
capture_resource_var_by_value)
with func_graph.as_default(), ops.device(device):
# pylint: disable=protected-access
if collections_ref is not None:
func_graph._collections = collections_ref
if container is not None:
func_graph._container = container
if colocation_stack is not None:
func_graph._colocation_stack = colocation_stack
# pylint: enable=protected-access
if arg_shapes is None:
arg_shapes = [None] * len(arg_types)
# Create placeholders for the function arguments.
for (argname, argtype, argshape) in zip(arg_names, arg_types, arg_shapes):
argholder = array_ops.placeholder(argtype, shape=argshape, name=argname)
func_graph.inputs.append(argholder)
# Call func and gather the output tensors.
with vs.variable_scope("", custom_getter=func_graph.getvar):
outputs = func(*func_graph.inputs)
# There is no way of distinguishing between a function not returning
# anything and a function returning None in Python.
# We need to allow the former and ideally want to forbid the latter as
# it is most likely user error.
# TODO(iga): Consider adding a @NoOutput decorator on top of @Defun to
# allow users to explicitly mark the function as not returning anything.
# For now, we allow a single None return and interpret it as a function
# with no output.
if outputs is None:
outputs = []
else:
# If func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
if any(_ is None for _ in outputs):
raise ValueError("Function %s can not return None." % name)
# Ensures each output is a Tensor in the function graph.
outputs = [ops.convert_to_tensor(t) for t in outputs]
outputs = [func_graph.capture(t) if t.graph is not func_graph else t
for t in outputs]
func_graph.outputs = outputs
return func_graph
def _is_guaranteed_const(tensor):
"""Determines whether `tensor` is guaranteed to be a constant.
A tensor is guaranteed to be a constant if either it was produced by
a `GuaranteeConst` op or if all of its children are guaranteed to be
constants.
Args:
tensor: The tensor for which to determine const-ness.
Returns:
True if `tensor` is guaranteed to be a constant, False otherwise.
"""
if isinstance(tensor, ops.EagerTensor):
return False
class Work(object):
def __init__(self, op, leaving):
self.op = op
self.leaving = leaving
is_guaranteed_const = lambda op: op.node_def.op == "GuaranteeConst"
constants = set([])
def all_inputs_const(op):
# If all inputs of an op are guaranteed constants, then we can infer that
# the op produces a constant as well.
return op.inputs and all(inp.op in constants for inp in op.inputs)
visited = set([])
stack = [Work(tensor.op, leaving=False)]
while stack:
work = stack.pop()
if work.leaving:
if all_inputs_const(work.op):
constants.add(work.op)
continue
visited.add(work.op)
if is_guaranteed_const(work.op):
constants.add(work.op)
continue
# This op will be revisited after all its inputs are checked for const-ness.
stack.append(Work(work.op, leaving=True))
for inp in work.op.inputs:
if inp.op not in visited:
stack.append(Work(inp.op, leaving=False))
return tensor.op in constants
def _call(sig, *inputs, **kwargs):
"""Adds a node calling a function.
This adds a `call` op to the default graph that calls the function
of signature `sig`, passing the tensors in `inputs` as arguments.
It returns the outputs of the call, which are one or more tensors.
`sig` is OpDefArg.a `_DefinedFunction` object.
You can pass an optional keyword parameter `name=string` to name the
added operation.
You can pass an optional keyword parameter `noinline=True|False` to
instruct the runtime not to inline the function body into the call
site.
Args:
sig: OpDefArg. The signature of the function.
*inputs: arguments to the function.
**kwargs: Optional keyword arguments. Can only contain 'name' or
'noinline'.
Returns:
A 2-element tuple. First element: a Tensor if the function returns a single
value; a list of Tensors if the function returns multiple value; the
Operation if the function returns no values. Second element: the Operation.
Raises:
ValueError: if the arguments are invalid.
"""
if len(inputs) != len(sig.input_arg):
raise ValueError("Expected number of arguments: %d, received: %d" % (len(
sig.input_arg), len(inputs)))
name = kwargs.pop("name", None)
g = ops.get_default_graph()
func_name = sig.name
if name is None:
name = func_name
attrs = _parse_kwargs_as_attrs(func_name, **kwargs)
output_types = [dtypes.DType(x.type) for x in sig.output_arg]
op = g.create_op(
func_name, list(inputs), output_types, name=name, attrs=attrs, op_def=sig)
if op.outputs:
if len(op.outputs) == 1:
ret = op.outputs[0]
else:
ret = tuple(op.outputs)
else:
ret = op
return ret, op
def _from_definition(fdef, grad_func=None):
"""Creates a _DefinedFunction initialized from a FunctionDef proto.
Args:
fdef: a FunctionDef
grad_func: a _DefinedFunction or None
Returns:
A _DefinedFunction representing fdef
"""
# TODO(iga): This method does major surgery on _DefinedFunction.
# Make it a named constructor using @classmethod of _DefinedFunction.
# The Python callable is only needed to create a FunctionDef. Since we have
# the FunctionDef here, we don't need to set _DefinedFunction._func (nor do we
# have access to such a callable here).
func = None
argnames = [arg.name for arg in fdef.signature.input_arg]
input_types = tuple(
dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg)
func_name = fdef.signature.name
# Note: FunctionDefs do not include python gradient functions, so if the
# original _DefinedFunction included one it will not be reflected here.
python_grad_func = None
out_names = [arg.name for arg in fdef.signature.output_arg]
result = _DefinedFunction(func, argnames, input_types, func_name, grad_func,
python_grad_func, out_names)
# pylint: disable=protected-access
serialized = fdef.SerializeToString()
c_func = c_api.TF_FunctionImportFunctionDef(serialized)
result._c_func = c_api_util.ScopedTFFunction(c_func)
result._extra_inputs = []
result._op_def = fdef.signature
# pylint: enable=protected-access
return result
def from_library(lib):
"""Creates _DefinedFunctions initialized from a FunctionDefLibrary proto.
This method handles assigning the correct gradient functions to each
function.
Args:
lib: a FunctionDefLibrary
Returns:
A list of _DefinedFunctions
Raises:
ValueError: `lib` is invalid
"""
if not lib.function and not lib.gradient:
return []
# function name -> FunctionDef proto
funcs = {fdef.signature.name: fdef for fdef in lib.function}
# Validate that all references function names have function defs
for g in lib.gradient:
if g.function_name not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.function_name, str(lib)))
if g.gradient_func not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.gradient_func, str(lib)))
# function name -> gradient function name
func_to_grad = collections.defaultdict(lambda: None)
# gradient function name -> names of functions having that grad function
grad_to_funcs = collections.defaultdict(list)
for gdef in lib.gradient:
func_to_grad[gdef.function_name] = gdef.gradient_func
grad_to_funcs[gdef.gradient_func].append(gdef.function_name)
# Start with functions without gradients
ready = [
fdef for fdef in lib.function if func_to_grad[fdef.signature.name] is None
]
if not ready:
raise ValueError(
"FunctionDefLibrary contains cyclic gradient functions!\n" + str(lib))
# function name -> _DefinedFunction
initialized = {}
while ready:
fdef = ready.pop()
name = fdef.signature.name
grad = initialized.get(func_to_grad[name])
if func_to_grad[name]:
assert grad
defined_func = _from_definition(fdef, grad_func=grad)
initialized[name] = defined_func
ready.extend(funcs[f] for f in grad_to_funcs[name])
return initialized.values()
def _get_experimental_kwarg_as_attr(attr_name, value):
"""Creates an AttrValue for a python object."""
if isinstance(value, bool):
return attr_value_pb2.AttrValue(b=value)
elif isinstance(value, int):
return attr_value_pb2.AttrValue(i=value)
elif isinstance(value, float):
return attr_value_pb2.AttrValue(f=value)
elif isinstance(value, str):
return attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError("Unsupported attribute type for %s with type %s" %
(attr_name, type(value)))
def _get_kwarg_as_str_attr(attr_name, value):
"""Creates an AttrValue for a python object."""
if isinstance(value, str):
return attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError("Unsupported attribute type for %s with type %s" %
(attr_name, type(value)))
def _parse_kwargs_as_attrs(func_name, **kwargs):
"""Parses **kwargs into a node's attributes."""
attrs = {}
noinline = kwargs.pop("noinline", None)
if noinline is not None:
attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline))
# For compatibility with previous behavior, Defun does not perform shape
# inference through its function call operations.
attrs["_disable_call_shape_inference"] = attr_value_pb2.AttrValue(b=True)
compiled = kwargs.pop("compiled", None)
separate_compiled_gradients = kwargs.pop("separate_compiled_gradients", None)
if compiled is not None:
attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled))
attrs["_XlaSeparateCompiledGradients"] = attr_value_pb2.AttrValue(
b=bool(separate_compiled_gradients))
# Forward _XlaScope from enclosing context (if set), otherwise create new.
# pylint: disable=protected-access
if "_XlaScope" in ops.get_default_graph()._attr_scope_map:
attrs["_XlaScope"] = ops.get_default_graph()._attr_scope_map["_XlaScope"]
else:
attrs["_XlaScope"] = attr_value_pb2.AttrValue(
s=("function_%s" % func_name).encode())
# pylint: enable=protected-access
kwargs_keys = list(kwargs.keys())
for key in kwargs_keys:
if key.startswith("experimental_"):
attrs[key] = _get_experimental_kwarg_as_attr(key, kwargs[key])
del kwargs[key]
# Support for https://github.com/tensorflow/community/pull/113/files.
elif key == "_implements" or key == "_reference":
attrs[key] = _get_kwarg_as_str_attr(key, kwargs[key])
del kwargs[key]
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % kwargs.keys())
return attrs
def get_extra_vars():
"""Returns the captured variables by the function.
Returns:
If the default graph is being used to define a function, the
returned list of variables are those created inside the function
body so far. Otherwise, returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_vars
else:
return []
def get_extra_inputs():
"""Returns the captured input tensors by the function.
Returns:
If the default graph is being used to define a function, the
returned list of tensors are those accessed inside the function body
but defined outside the function body so far. Otherwise, returns an
empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_inputs
else:
return []
def get_extra_args():
"""Returns the corresponding function arguments for the captured inputs.
Returns:
If the default graph is being used to define a function, the
returned list of place holders are those used inside the function
body corresponding those returned by get_extra_inputs(). Otherwise,
returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_args
else:
return []
def _type_list_to_str(types):
if any(_ not in _DTYPE_TO_STR for _ in types):
raise ValueError("Unsupported dtypes: %s" % types)
return "".join([_DTYPE_TO_STR[_] for _ in types])
# NOTE: The list needs to be extended when more data types are added.
_DTYPE_TO_STR = {
dtypes.float16: "f16",
dtypes.float32: "f32",
dtypes.float64: "f64",
dtypes.int32: "i32",
dtypes.uint8: "i8",
dtypes.uint16: "u16",
dtypes.uint32: "u32",
dtypes.uint64: "u64",
dtypes.int16: "i16",
dtypes.int8: "i8",
dtypes.string: "s",
dtypes.complex64: "c64",
dtypes.complex128: "c128",
dtypes.int64: "i64",
dtypes.bool: "b",
dtypes.qint8: "qi8",
dtypes.quint8: "qu8",
dtypes.qint16: "qi16",
dtypes.quint16: "qu16",
dtypes.qint32: "qi32",
dtypes.bfloat16: "b16"
}
def function_def_from_tf_function(c_func):
"""Converts a SWIG-wrapped TF_Function* to a FunctionDef proto."""
with c_api_util.tf_buffer() as buf:
c_api.TF_FunctionToFunctionDef(c_func, buf)
data = c_api.TF_GetBuffer(buf)
fdef = function_pb2.FunctionDef()
fdef.ParseFromString(compat.as_bytes(data))
return fdef
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/function.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.framework.meta_graph.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os.path
import random
import shutil
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import function
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner_impl
# pylint: disable=invalid-name
def _TestDir(test_name):
test_dir = os.path.join(test.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
gfile.MakeDirs(test_dir)
return test_dir
# pylint: enable=invalid-name
class SimpleMetaGraphTest(test.TestCase):
@test_util.run_deprecated_v1
def testNoVariables(self):
test_dir = _TestDir("no_variables")
filename = os.path.join(test_dir, "metafile")
input_feed_value = -10 # Arbitrary input value for feed_dict.
orig_graph = ops.Graph()
with self.session(graph=orig_graph) as sess:
# Create a minimal graph with zero variables.
input_tensor = array_ops.placeholder(
dtypes.float32, shape=[], name="input")
offset = constant_op.constant(42, dtype=dtypes.float32, name="offset")
output_tensor = math_ops.add(input_tensor, offset, name="add_offset")
# Add input and output tensors to graph collections.
ops.add_to_collection("input_tensor", input_tensor)
ops.add_to_collection("output_tensor", output_tensor)
output_value = sess.run(output_tensor, {input_tensor: input_feed_value})
self.assertEqual(output_value, 32)
# Generates MetaGraphDef.
meta_graph_def, var_list = meta_graph.export_scoped_meta_graph(
filename=filename,
graph_def=ops.get_default_graph().as_graph_def(add_shapes=True),
collection_list=["input_tensor", "output_tensor"],
saver_def=None)
self.assertTrue(meta_graph_def.HasField("meta_info_def"))
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, "")
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,
"")
self.assertEqual({}, var_list)
# Create a clean graph and import the MetaGraphDef nodes.
new_graph = ops.Graph()
with self.session(graph=new_graph) as sess:
# Import the previously export meta graph.
meta_graph.import_scoped_meta_graph(filename)
# Re-exports the current graph state for comparison to the original.
new_meta_graph_def, _ = meta_graph.export_scoped_meta_graph(filename +
"_new")
test_util.assert_meta_graph_protos_equal(self, meta_graph_def,
new_meta_graph_def)
# Ensures that we can still get a reference to our graph collections.
new_input_tensor = ops.get_collection("input_tensor")[0]
new_output_tensor = ops.get_collection("output_tensor")[0]
# Verifies that the new graph computes the same result as the original.
new_output_value = sess.run(new_output_tensor,
{new_input_tensor: input_feed_value})
self.assertEqual(new_output_value, output_value)
@test_util.run_deprecated_v1
def testStrippedOpListNestedFunctions(self):
with self.cached_session():
# Square two levels deep
@function.Defun(dtypes.int32)
def f0(x):
return math_ops.square(x)
@function.Defun(dtypes.int32)
def f1(x):
return f0(x)
# At this point we've defined two functions but haven't called them, so
# there should be no used ops.
op_list = meta_graph.stripped_op_list_for_graph(ops.get_default_graph()
.as_graph_def())
self.assertEqual(len(op_list.op), 0)
# If we call the function on a constant, there should be two ops
_ = f1(constant_op.constant(7))
op_list = meta_graph.stripped_op_list_for_graph(ops.get_default_graph()
.as_graph_def())
self.assertEqual(["Const", "Square"], [op.name for op in op_list.op])
def testStrippedOpListRecursiveFunctions(self):
# The function module doesn't support recursive functions, so we build a
# recursive function situation by ourselves: A calls B calls A and Const.
graph = graph_pb2.GraphDef()
a = graph.library.function.add()
b = graph.library.function.add()
a.signature.name = "A"
b.signature.name = "B"
a.node_def.add().op = "B"
b.node_def.add().op = "Const"
b.node_def.add().op = "A"
# Use A in the graph
graph.node.add().op = "A"
# The stripped op list should contain just Const.
op_list = meta_graph.stripped_op_list_for_graph(graph)
self.assertEqual(["Const"], [op.name for op in op_list.op])
@test_util.run_deprecated_v1
def testDefaultAttrStripping(self):
"""Verifies that default attributes are stripped from a graph def."""
# Complex Op has 2 attributes with defaults:
# o "T" : float32.
# o "Tout" : complex64.
# When inputs to the Complex Op are float32 instances, "T" maps to float32
# and "Tout" maps to complex64. Since these attr values map to their
# defaults, they must be stripped unless stripping of default attrs is
# disabled.
with self.cached_session():
real_num = constant_op.constant(1.0, dtype=dtypes.float32, name="real")
imag_num = constant_op.constant(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
# strip_default_attrs is enabled.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
# strip_default_attrs is disabled.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=False)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
self.assertFalse(meta_graph_def.meta_info_def.stripped_default_attrs)
# When inputs to the Complex Op are float64 instances, "T" maps to float64
# and "Tout" maps to complex128. Since these attr values don't map to their
# defaults, they must not be stripped.
with self.session(graph=ops.Graph()):
real_num = constant_op.constant(1.0, dtype=dtypes.float64, name="real")
imag_num = constant_op.constant(2.0, dtype=dtypes.float64, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertEqual(node_def.attr["T"].type, dtypes.float64)
self.assertEqual(node_def.attr["Tout"].type, dtypes.complex128)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
@test_util.run_deprecated_v1
def testDefaultAttrStrippingNestedFunctions(self):
"""Verifies that default attributes are stripped from function node defs."""
with self.cached_session():
@function.Defun(dtypes.float32, dtypes.float32)
def f0(i, j):
return math_ops.complex(i, j, name="double_nested_complex")
@function.Defun(dtypes.float32, dtypes.float32)
def f1(i, j):
return f0(i, j)
_ = f1(constant_op.constant(1.0), constant_op.constant(2.0))
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=True)
double_nested_complex_node_def = None
for function_def in meta_graph_def.graph_def.library.function:
for node_def in function_def.node_def:
if node_def.name.startswith("double_nested_complex"):
double_nested_complex_node_def = node_def
break
if double_nested_complex_node_def:
break
self.assertIsNotNone(double_nested_complex_node_def)
self.assertNotIn("T", double_nested_complex_node_def.attr)
self.assertNotIn("Tout", double_nested_complex_node_def.attr)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
def testDefaultAttrStrippingUnregisteredOps(self):
"""Verifies that nodes with un-registered ops are not stripped."""
graph_def = graph_pb2.GraphDef()
node = graph_def.node.add()
node.name = "node_with_unreg_op"
node.op = "unreg_op"
node.attr["attr_1"].i = 1
meta_info_def = meta_graph_pb2.MetaGraphDef.MetaInfoDef()
meta_info_def.stripped_op_list.op.add()
with self.cached_session():
meta_graph_def = meta_graph.create_meta_graph_def(
meta_info_def=meta_info_def, graph_def=graph_def,
strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("node_with_unreg_op",
meta_graph_def.graph_def)
self.assertEqual(node_def.attr["attr_1"].i, 1)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
@test_util.run_deprecated_v1
def testVariableObjectsAreSharedAmongCollections(self):
with ops.Graph().as_default() as graph1:
v = variables.Variable(3.0)
# A single instance of Variable is shared among the collections:
global_vars = graph1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
trainable_vars = graph1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(global_vars), 1)
self.assertEqual(len(trainable_vars), 1)
self.assertIs(global_vars[0], trainable_vars[0])
self.assertIs(v, global_vars[0])
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(graph=graph1)
del graph1 # To avoid accidental references in code involving graph2.
with ops.Graph().as_default() as graph2:
meta_graph.import_scoped_meta_graph(orig_meta_graph)
global_vars = graph2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
trainable_vars = graph2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(global_vars), 1)
self.assertEqual(len(trainable_vars), 1)
# A single instance of Variable is shared among the collections:
self.assertIs(global_vars[0], trainable_vars[0])
@test_util.run_deprecated_v1
def testMetricVariablesCollectionLoadsBytesList(self):
with ops.Graph().as_default() as graph1:
v1 = variables.Variable(
[1, 2, 3], shape=[3], dtype=dtypes.float64, name="v")
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(graph=graph1)
# Copy bytes list from global variables collection to metric variables.
orig_meta_graph.collection_def[ops.GraphKeys.METRIC_VARIABLES].CopyFrom(
orig_meta_graph.collection_def["variables"])
with ops.Graph().as_default() as graph2:
meta_graph.import_scoped_meta_graph(orig_meta_graph)
var_list = graph2.get_collection(ops.GraphKeys.METRIC_VARIABLES)
self.assertEqual(len(var_list), 1)
v2 = var_list[0]
self.assertIsInstance(v2, variables.Variable)
self.assertEqual(v1.name, v2.name)
self.assertEqual(v1.dtype, v2.dtype)
self.assertEqual(v1.shape, v2.shape)
class ScopedMetaGraphTest(test.TestCase):
def _testScopedExport(self, test_dir, exported_filenames):
graph = ops.Graph()
with graph.as_default():
# Creates an inference graph.
# Hidden 1
colocate_constraint = constant_op.constant(1.2, name="constraint")
images = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with ops.name_scope("hidden1"):
with graph.colocate_with(colocate_constraint.op):
weights1 = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
biases1 = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)
# Hidden 2
with ops.name_scope("hidden2"):
weights2 = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases2):
biases2 += constant_op.constant(0.1, shape=[32])
return it + 1, biases2
_, biases2 = control_flow_ops.while_loop(
loop_cond,
loop_body, [
constant_op.constant(0), variables.Variable(
array_ops.zeros([32]), name="biases")
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)
# Linear
with ops.name_scope("softmax_linear"):
weights3 = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases3 = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights3) + biases3
ops.add_to_collection("logits", logits)
# Exports each sub-graph.
# Exports the first one with unbound_inputs_col_name set to default.
orig_meta_graph1, var_list = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[0]),
graph=ops.get_default_graph(),
export_scope="hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["hidden1/biases:0", "hidden1/weights:0"],
sorted(var_names))
# Exports the rest with no unbound_inputs_col_name.
orig_meta_graph2, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[1]),
graph=ops.get_default_graph(),
export_scope="hidden2",
unbound_inputs_col_name=None)
orig_meta_graph3, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[2]),
graph=ops.get_default_graph(),
export_scope="softmax_linear",
unbound_inputs_col_name=None)
return [orig_meta_graph1, orig_meta_graph2, orig_meta_graph3]
def _testScopedImport(self, test_dir, exported_filenames):
graph = ops.Graph()
# Create all the missing inputs.
with graph.as_default():
new_image = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
import_scope="new_hidden1")
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
input_map={"image:0": new_image},
import_scope="new_hidden1")
# Verifies we can import the original "hidden1" into "new_hidden1".
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
input_map={"$unbound_inputs_images": new_image},
import_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["new_hidden1/biases:0", "new_hidden1/weights:0"],
sorted(new_var_names))
# Verifies we can import the original "hidden2" into "new_hidden2".
hidden1 = array_ops.identity(
graph.as_graph_element("new_hidden1/Relu:0"), name="hidden1/Relu")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[1]),
graph=graph,
input_map={"$unbound_inputs_hidden1/Relu": hidden1},
import_scope="new_hidden2",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["new_hidden2/biases:0", "new_hidden2/weights:0"],
sorted(new_var_names))
# Verifies we can import the original "softmax_linear" into
# "new_softmax_linear".
hidden2 = array_ops.identity(
graph.as_graph_element("new_hidden2/Relu:0"), name="hidden2/Relu")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[2]),
graph=graph,
input_map={"$unbound_inputs_hidden2/Relu": hidden2},
import_scope="new_softmax_linear",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(
["new_softmax_linear/biases:0", "new_softmax_linear/weights:0"],
sorted(new_var_names))
# Exports the scoped meta graphs again.
new_meta_graph1, var_list = meta_graph.export_scoped_meta_graph(
graph=graph, export_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_meta_graph2, var_list = meta_graph.export_scoped_meta_graph(
graph=graph, export_scope="new_hidden2", unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_meta_graph3, var_list = meta_graph.export_scoped_meta_graph(
graph=graph,
export_scope="new_softmax_linear",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
return [new_meta_graph1, new_meta_graph2, new_meta_graph3]
# Verifies that we can export the subgraph under each layer and import
# them into new layers in a new graph.
@test_util.run_deprecated_v1
def testScopedExportAndImport(self):
test_dir = _TestDir("scoped_export_import")
filenames = [
"exported_hidden1.pbtxt", "exported_hidden2.pbtxt",
"exported_softmax_linear.pbtxt"
]
orig_meta_graphs = self._testScopedExport(test_dir, filenames)
new_meta_graphs = self._testScopedImport(test_dir, filenames)
for a, b in zip(orig_meta_graphs, new_meta_graphs):
# The unbound input strings are slightly different with the C API enabled
# ("images" vs "images:0") due to the original import_graph_def code
# vs. ImportGraphDef in C++.
# TODO(skyewm): update the pbtxts once _USE_C_API is removed.
del a.collection_def["unbound_inputs"]
del b.collection_def["unbound_inputs"]
test_util.assert_meta_graph_protos_equal(self, a, b)
def testWhileLoopGradients(self):
# Create a simple while loop.
with ops.Graph().as_default():
with ops.name_scope("export"):
var = variables.Variable(0.)
var_name = var.name
_, output = control_flow_ops.while_loop(
lambda i, x: i < 5,
lambda i, x: (i + 1, x + math_ops.cast(i, dtypes.float32)),
[0, var])
output_name = output.name
# Generate a MetaGraphDef containing the while loop with an export scope.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
export_scope="export")
# Build and run the gradients of the while loop. We use this below to
# verify that the gradients are correct with the imported MetaGraphDef.
init_op = variables.global_variables_initializer()
grad = gradients_impl.gradients([output], [var])
with session.Session() as sess:
self.evaluate(init_op)
expected_grad_value = self.evaluate(grad)
# Restore the MetaGraphDef into a new Graph with an import scope.
with ops.Graph().as_default():
meta_graph.import_scoped_meta_graph(meta_graph_def, import_scope="import")
# Re-export and make sure we get the same MetaGraphDef.
new_meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
export_scope="import")
test_util.assert_meta_graph_protos_equal(
self, meta_graph_def, new_meta_graph_def)
# Make sure we can still build gradients and get the same result.
def new_name(tensor_name):
base_tensor_name = tensor_name.replace("export/", "")
return "import/" + base_tensor_name
var = ops.get_default_graph().get_tensor_by_name(new_name(var_name))
output = ops.get_default_graph().get_tensor_by_name(new_name(output_name))
grad = gradients_impl.gradients([output], [var])
init_op = variables.global_variables_initializer()
with session.Session() as sess:
self.evaluate(init_op)
actual_grad_value = self.evaluate(grad)
self.assertEqual(expected_grad_value, actual_grad_value)
@test_util.run_v1_only("b/120545219")
def testImportWhileLoopInWhileLoop(self):
# Create a simple while loop.
with ops.Graph().as_default():
var = variables.Variable(0.0)
_, output = control_flow_ops.while_loop(lambda i, x: i < 5,
lambda i, x: (i + 1, x * 2.0),
[0, var])
output_name = output.name
# Generate a MetaGraphDef containing the while loop with an export scope.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph()
# Restore the MetaGraphDef in a while loop in a new graph.
with ops.Graph().as_default():
def body(i, _):
meta_graph.import_scoped_meta_graph(meta_graph_def)
return i + 1, ops.get_default_graph().get_tensor_by_name(output_name)
_, x = control_flow_ops.while_loop(lambda i, x: i < 2, body, [0, 0.0],
name="")
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
self.evaluate(x)
@test_util.run_deprecated_v1
def testScopedImportUnderNameScope(self):
graph = ops.Graph()
with graph.as_default():
variables.Variable(initial_value=1.0, trainable=True, name="myvar")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(graph=graph)
graph = ops.Graph()
with graph.as_default():
with ops.name_scope("foo"):
imported_variables = meta_graph.import_scoped_meta_graph(
meta_graph_def, import_scope="bar")
self.assertEqual(len(imported_variables), 1)
self.assertEqual(list(imported_variables.values())[0].name,
"foo/bar/myvar:0")
@test_util.run_deprecated_v1
def testScopedImportUnderNameScopeNoVarScope(self):
graph = ops.Graph()
with graph.as_default():
variables.Variable(initial_value=1.0, trainable=True, name="myvar")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(graph=graph)
graph = ops.Graph()
with graph.as_default():
with ops.name_scope("foo"):
imported_variables = meta_graph.import_scoped_meta_graph(
meta_graph_def)
self.assertEqual(len(imported_variables), 1)
self.assertEqual(list(imported_variables.values())[0].name,
"foo/myvar:0")
def testImportsUsingSameScopeName(self):
with ops.Graph().as_default():
variables.Variable(0, name="v")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph()
with ops.Graph().as_default():
for suffix in ["", "_1"]:
imported_variables = meta_graph.import_scoped_meta_graph(
meta_graph_def, import_scope="s")
self.assertEqual(len(imported_variables), 1)
self.assertEqual(list(imported_variables.keys())[0], "v:0")
self.assertEqual(list(imported_variables.values())[0].name,
"s" + suffix + "/v:0")
@test_util.run_deprecated_v1
def testScopedImportWithSelectedCollections(self):
meta_graph_filename = os.path.join(
_TestDir("selected_collections_import"), "meta_graph.pb")
graph = ops.Graph()
# Add a variable to populate two collections. The functionality tested is
# not specific to variables, but using variables in the test is convenient.
with graph.as_default():
variables.Variable(initial_value=1.0, trainable=True)
self.assertTrue(
all(
graph.get_collection(key)
for key in
[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES]
))
meta_graph.export_scoped_meta_graph(
filename=meta_graph_filename, graph=graph)
def _test_import(include_collection_keys, omit_collection_keys):
assert set(include_collection_keys).isdisjoint(omit_collection_keys)
newgraph = ops.Graph()
import_scope = "some_scope_name"
def _restore_collections_predicate(collection_key):
return (collection_key in include_collection_keys and
collection_key not in omit_collection_keys)
meta_graph.import_scoped_meta_graph(
meta_graph_filename,
graph=newgraph,
import_scope=import_scope,
restore_collections_predicate=_restore_collections_predicate)
collection_values = [
newgraph.get_collection(name=key, scope=import_scope)
for key in include_collection_keys
]
self.assertTrue(all(collection_values))
collection_values = [
newgraph.get_collection(name=key, scope=import_scope)
for key in omit_collection_keys
]
self.assertFalse(any(collection_values))
_test_import(
include_collection_keys=[
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES
],
omit_collection_keys=[])
_test_import(
include_collection_keys=[ops.GraphKeys.GLOBAL_VARIABLES],
omit_collection_keys=[ops.GraphKeys.TRAINABLE_VARIABLES])
_test_import(
include_collection_keys=[ops.GraphKeys.TRAINABLE_VARIABLES],
omit_collection_keys=[ops.GraphKeys.GLOBAL_VARIABLES])
_test_import(
include_collection_keys=[],
omit_collection_keys=[
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES
])
def _testScopedExportWithQueue(self, test_dir, exported_filename):
graph = ops.Graph()
with graph.as_default():
with ops.name_scope("queue1"):
input_queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
enqueue = input_queue.enqueue((9876), name="enqueue")
close = input_queue.close(name="close")
qr = queue_runner_impl.QueueRunner(input_queue, [enqueue], close)
queue_runner_impl.add_queue_runner(qr)
input_queue.dequeue(name="dequeue")
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filename),
graph=ops.get_default_graph(),
export_scope="queue1")
return orig_meta_graph
def _testScopedImportWithQueue(self, test_dir, exported_filename,
new_exported_filename):
graph = ops.Graph()
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filename),
graph=graph,
import_scope="new_queue1")
graph.as_graph_element("new_queue1/dequeue:0")
graph.as_graph_element("new_queue1/close")
with graph.as_default():
new_meta_graph, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, new_exported_filename),
graph=graph,
export_scope="new_queue1")
return new_meta_graph
# Verifies that we can export the subgraph containing a FIFOQueue under
# "queue1" and import it into "new_queue1" in a new graph.
@test_util.run_deprecated_v1
def testScopedWithQueue(self):
test_dir = _TestDir("scoped_with_queue")
orig_meta_graph = self._testScopedExportWithQueue(test_dir,
"exported_queue1.pbtxt")
new_meta_graph = self._testScopedImportWithQueue(
test_dir, "exported_queue1.pbtxt", "exported_new_queue1.pbtxt")
test_util.assert_meta_graph_protos_equal(self, orig_meta_graph,
new_meta_graph)
def testExportDebugInfo(self):
graph1 = ops.Graph()
with graph1.as_default():
with ops.name_scope("hidden1/hidden2/hidden3"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
name="weights")
biases1 = resource_variable_ops.ResourceVariable(
[0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
operations = []
for op in graph1.get_operations():
operations.append(("", op))
debug_info_def = error_interpolation.create_graph_debug_info_def(
operations=operations)
# The unique file names in all the stack traces should be larger or equal
# than 1.
self.assertTrue(len(debug_info_def.files) >= 1)
# All the nodes from the exported graphdef are included.
self.assertEqual(len(debug_info_def.traces), len(graph1.get_operations()))
# Verifies that we can export a subgraph in a nested name scope containing a
# "hidden1/hidden2" and import it into "new_hidden1/new_hidden2" in a new
# graph.
def doTestExportNestedNames(self, use_resource=False):
graph1 = ops.Graph()
with graph1.as_default():
with ops.name_scope("hidden1/hidden2/hidden3"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
if use_resource:
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = resource_variable_ops.ResourceVariable(
[0.1] * 3, name="biases")
else:
biases1 = variables.Variable([0.1] * 3, name="biases")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
orig_meta_graph, var_list = meta_graph.export_scoped_meta_graph(
export_scope="hidden1/hidden2", graph=graph1)
var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["hidden3/biases:0", "hidden3/weights:0"],
sorted(var_list.keys()))
self.assertEqual([
"hidden1/hidden2/hidden3/biases:0", "hidden1/hidden2/hidden3/weights:0"
], sorted(var_names))
for node in orig_meta_graph.graph_def.node:
self.assertTrue(node.name.startswith("hidden3"))
graph2 = ops.Graph()
new_var_list = meta_graph.import_scoped_meta_graph(
orig_meta_graph, import_scope="new_hidden1/new_hidden2", graph=graph2)
self.assertEqual(["hidden3/biases:0", "hidden3/weights:0"],
sorted(new_var_list.keys()))
new_var_names = [v.name for _, v in new_var_list.items()]
self.assertEqual([
"new_hidden1/new_hidden2/hidden3/biases:0",
"new_hidden1/new_hidden2/hidden3/weights:0"
], sorted(new_var_names))
nodes = [
"new_hidden1/new_hidden2/hidden3/biases/Assign",
"new_hidden1/new_hidden2/hidden3/weights/Assign"
]
expected = [
b"loc:@new_hidden1/new_hidden2/hidden3/biases",
b"loc:@new_hidden1/new_hidden2/hidden3/weights"
]
@test_util.run_deprecated_v1
def testExportNestedNames(self):
self.doTestExportNestedNames(use_resource=False)
@test_util.run_deprecated_v1
def testExportNestedNamesResource(self):
self.doTestExportNestedNames(use_resource=True)
@test_util.run_deprecated_v1
def testPotentialCycle(self):
graph1 = ops.Graph()
with graph1.as_default():
a = constant_op.constant(1.0, shape=[2, 2])
b = constant_op.constant(2.0, shape=[2, 2])
matmul = math_ops.matmul(a, b)
with ops.name_scope("hidden1"):
c = nn_ops.relu(matmul)
d = constant_op.constant(3.0, shape=[2, 2])
matmul = math_ops.matmul(c, d)
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
export_scope="hidden1", graph=graph1)
graph2 = ops.Graph()
with graph2.as_default():
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
orig_meta_graph, import_scope="new_hidden1")
meta_graph.import_scoped_meta_graph(
orig_meta_graph,
import_scope="new_hidden1",
input_map={
"$unbound_inputs_MatMul": constant_op.constant(
4.0, shape=[2, 2])
})
@test_util.run_deprecated_v1
def testClearDevices(self):
graph1 = ops.Graph()
with graph1.as_default():
with ops.device("/device:CPU:0"):
a = variables.Variable(
constant_op.constant(
1.0, shape=[2, 2]), name="a")
with ops.device("/job:ps/replica:0/task:0/device:GPU:0"):
b = variables.Variable(
constant_op.constant(
2.0, shape=[2, 2]), name="b")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
math_ops.matmul(a, b, name="matmul")
self.assertEqual("/device:CPU:0", str(graph1.as_graph_element("a").device))
self.assertEqual("/job:ps/replica:0/task:0/device:GPU:0",
str(graph1.as_graph_element("b").device))
self.assertEqual("/job:localhost/replica:0/task:0/device:CPU:0",
str(graph1.as_graph_element("matmul").device))
# Verifies that devices are cleared on export.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph=graph1, clear_devices=True)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=False)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
# Verifies that devices are cleared on export when passing in graph_def.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph_def=graph1.as_graph_def(), clear_devices=True)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=False)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
# Verifies that devices are cleared on import.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph=graph1, clear_devices=False)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=True)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
class MetaGraphWithVariableScopeTest(test.TestCase):
@test_util.run_deprecated_v1
def testMetricsCollection(self):
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
meta_graph_filename = os.path.join(
_TestDir("metrics_export"), "meta_graph.pb")
graph = ops.Graph()
with self.session(graph=graph) as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
_, update_op = metrics.mean(values)
initializer = variables.local_variables_initializer()
self.evaluate(initializer)
self.evaluate(update_op)
meta_graph.export_scoped_meta_graph(
filename=meta_graph_filename, graph=graph)
# Verifies that importing a meta_graph with LOCAL_VARIABLES collection
# works correctly.
graph = ops.Graph()
with self.session(graph=graph) as sess:
meta_graph.import_scoped_meta_graph(meta_graph_filename)
initializer = variables.local_variables_initializer()
self.evaluate(initializer)
# Verifies that importing an old meta_graph where "local_variables"
# collection is of node_list type works, but cannot build initializer
# with the collection.
graph = ops.Graph()
with self.session(graph=graph) as sess:
meta_graph.import_scoped_meta_graph(
test.test_src_dir_path(
"python/framework/testdata/metrics_export_meta_graph.pb"))
self.assertEqual(len(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)),
2)
with self.assertRaisesRegexp(
AttributeError, "'Tensor' object has no attribute 'initializer'"):
initializer = variables.local_variables_initializer()
class ExportImportAcrossScopesTest(test.TestCase):
@test_util.run_deprecated_v1
def testPartitionedVariables(self):
def make_graph_with_partitioned_variables(use_resource):
variable_scope.get_variable(
name="weights",
partitioner=partitioned_variables.fixed_size_partitioner(3, axis=0),
initializer=random_ops.truncated_normal([100, 10]),
use_resource=use_resource)
# The next variable illustrates the necessity of restoring collections
# in a deterministic fashion when using ResourceVariables.
variable_scope.get_variable(
name="another",
shape=[],
collections=["a", "b", "z", "f", "e", "d", "g"],
use_resource=use_resource)
self._testExportImportAcrossScopes(
make_graph_with_partitioned_variables, use_resource=False)
self._testExportImportAcrossScopes(
make_graph_with_partitioned_variables, use_resource=True)
def _testExportImportAcrossScopes(self, graph_fn, use_resource):
"""Tests export and importing a graph across scopes.
Args:
graph_fn: A closure that creates a graph on the current scope.
use_resource: A bool indicating whether or not to use ResourceVariables.
"""
with ops.Graph().as_default() as original_graph:
with variable_scope.variable_scope("dropA/dropB/keepA"):
graph_fn(use_resource=use_resource)
exported_meta_graph_def = meta_graph.export_scoped_meta_graph(
graph=original_graph,
export_scope="dropA/dropB")[0]
with ops.Graph().as_default() as imported_graph:
meta_graph.import_scoped_meta_graph(
exported_meta_graph_def,
import_scope="importA")
with ops.Graph().as_default() as expected_graph:
with variable_scope.variable_scope("importA/keepA"):
graph_fn(use_resource=use_resource)
result = meta_graph.export_scoped_meta_graph(graph=imported_graph)[0]
expected = meta_graph.export_scoped_meta_graph(graph=expected_graph)[0]
if use_resource:
# Clear all shared_name attributes before comparing, since they are
# orthogonal to scopes and are not updated on export/import.
for meta_graph_def in [result, expected]:
for node in meta_graph_def.graph_def.node:
shared_name_attr = "shared_name"
shared_name_value = node.attr.get(shared_name_attr, None)
if shared_name_value and shared_name_value.HasField("s"):
if shared_name_value.s:
node.attr[shared_name_attr].s = b""
test_util.assert_meta_graph_protos_equal(self, expected, result)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/framework/meta_graph_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
import numpy as np
from google.protobuf import json_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Helpers for creating Example objects
example = example_pb2.Example
feature = feature_pb2.Feature
features = lambda d: feature_pb2.Features(feature=d)
bytes_feature = lambda v: feature(bytes_list=feature_pb2.BytesList(value=v))
int64_feature = lambda v: feature(int64_list=feature_pb2.Int64List(value=v))
float_feature = lambda v: feature(float_list=feature_pb2.FloatList(value=v))
# Helpers for creating SequenceExample objects
feature_list = lambda l: feature_pb2.FeatureList(feature=l)
feature_lists = lambda d: feature_pb2.FeatureLists(feature_list=d)
sequence_example = example_pb2.SequenceExample
def flatten(list_of_lists):
"""Flatten one level of nesting."""
return itertools.chain.from_iterable(list_of_lists)
def flatten_values_tensors_or_sparse(tensors_list):
"""Flatten each SparseTensor object into 3 Tensors for session.run()."""
return list(
flatten([[v.indices, v.values, v.dense_shape]
if isinstance(v, sparse_tensor.SparseTensor) else [v]
for v in tensors_list]))
def _compare_output_to_expected(tester, dict_tensors, expected_tensors,
flat_output):
tester.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))
i = 0 # Index into the flattened output of session.run()
for k, v in dict_tensors.items():
expected_v = expected_tensors[k]
tf_logging.info("Comparing key: %s", k)
if isinstance(v, sparse_tensor.SparseTensor):
# Three outputs for SparseTensor : indices, values, shape.
tester.assertEqual([k, len(expected_v)], [k, 3])
tester.assertAllEqual(expected_v[0], flat_output[i])
tester.assertAllEqual(expected_v[1], flat_output[i + 1])
tester.assertAllEqual(expected_v[2], flat_output[i + 2])
i += 3
else:
# One output for standard Tensor.
tester.assertAllEqual(expected_v, flat_output[i])
i += 1
class ParseExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.cached_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
out = parsing_ops.parse_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
return
else:
# Returns dict w/ Tensors and SparseTensors.
out = parsing_ops.parse_example(**kwargs)
result = flatten_values_tensors_or_sparse(out.values())
# Check values.
tf_result = self.evaluate(result)
_compare_output_to_expected(self, out, expected_values, tf_result)
# Check shapes; if serialized is a Tensor we need its size to
# properly check.
serialized = kwargs["serialized"]
batch_size = (
self.evaluate(serialized).size if isinstance(serialized, ops.Tensor)
else np.asarray(serialized).size)
for k, f in kwargs["features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(
tuple(out[k].get_shape().as_list()), (batch_size,) + f.shape)
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 2))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(out[k].dense_shape.get_shape().as_list()), (2,))
@test_util.run_deprecated_v1
def testEmptySerializedWithAllDefaults(self):
sparse_name = "st_a"
a_name = "a"
b_name = "b"
c_name = "c:has_a_tricky_name"
a_default = [0, 42, 0]
b_default = np.random.rand(3, 3).astype(bytes)
c_default = np.random.rand(2).astype(np.float32)
expected_st_a = ( # indices, values, shape
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
expected_output = {
sparse_name: expected_st_a,
a_name: np.array(2 * [[a_default]]),
b_name: np.array(2 * [b_default]),
c_name: np.array(2 * [c_default]),
}
self._test({
"example_names": np.empty((0,), dtype=bytes),
"serialized": ops.convert_to_tensor(["", ""]),
"features": {
sparse_name:
parsing_ops.VarLenFeature(dtypes.int64),
a_name:
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
b_name:
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
c_name:
parsing_ops.FixedLenFeature(
(2,), dtypes.float32, default_value=c_default),
}
}, expected_output)
def testEmptySerializedWithoutDefaultsShouldFail(self):
input_features = {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=[0, 42, 0]),
"b":
parsing_ops.FixedLenFeature(
(3, 3),
dtypes.string,
default_value=np.random.rand(3, 3).astype(bytes)),
# Feature "c" is missing a default, this gap will cause failure.
"c":
parsing_ops.FixedLenFeature((2,), dtype=dtypes.float32),
}
# Edge case where the key is there but the feature value is empty
original = example(features=features({"c": feature()}))
self._test(
{
"example_names": ["in1"],
"serialized": [original.SerializeToString()],
"features": input_features,
},
expected_err=(
errors_impl.OpError,
"Name: in1, Feature: c \\(data type: float\\) is required"))
# Standard case of missing key and value.
self._test(
{
"example_names": ["in1", "in2"],
"serialized": ["", ""],
"features": input_features,
},
expected_err=(
errors_impl.OpError,
"Name: in1, Feature: c \\(data type: float\\) is required"))
def testDenseNotMatchingShapeShouldFail(self):
original = [
example(features=features({
"a": float_feature([1, 1, 3]),
})),
example(features=features({
"a": float_feature([-1, -1]),
}))
]
names = ["passing", "failing"]
serialized = [m.SerializeToString() for m in original]
self._test(
{
"example_names": names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature((1, 3), dtypes.float32)
}
},
expected_err=(errors_impl.OpError,
"Name: failing, Key: a, Index: 1. Number of float val"))
def testDenseDefaultNoShapeShouldFail(self):
original = [
example(features=features({
"a": float_feature([1, 1, 3]),
})),
]
serialized = [m.SerializeToString() for m in original]
self._test(
{
"example_names": ["failing"],
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature(None, dtypes.float32)
}
},
expected_err=(ValueError, "Missing shape for feature a"))
@test_util.run_deprecated_v1
def testSerializedContainingSparse(self):
original = [
example(features=features({
"st_c": float_feature([3, 4])
})),
example(
features=features({
"st_c": float_feature([]), # empty float list
})),
example(
features=features({
"st_d": feature(), # feature with nothing in it
})),
example(
features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
}))
]
serialized = [m.SerializeToString() for m in original]
expected_st_c = ( # indices, values, shape
np.array([[0, 0], [0, 1], [3, 0], [3, 1], [3, 2]], dtype=np.int64),
np.array([3.0, 4.0, 1.0, 2.0, -1.0], dtype=np.float32),
np.array([4, 3], dtype=np.int64)) # batch == 2, max_elems = 3
expected_st_d = ( # indices, values, shape
np.array([[3, 0]], dtype=np.int64), np.array(["hi"], dtype=bytes),
np.array([4, 1], dtype=np.int64)) # batch == 2, max_elems = 1
expected_output = {
"st_c": expected_st_c,
"st_d": expected_st_d,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"st_c": parsing_ops.VarLenFeature(dtypes.float32),
"st_d": parsing_ops.VarLenFeature(dtypes.string)
}
}, expected_output)
def testSerializedContainingSparseFeature(self):
original = [
example(
features=features({
"val": float_feature([3, 4]),
"idx": int64_feature([5, 10])
})),
example(
features=features({
"val": float_feature([]), # empty float list
"idx": int64_feature([])
})),
example(
features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(
features=features({
"val": float_feature([1, 2, -1]),
"idx":
int64_feature([0, 9, 3]) # unsorted
}))
]
serialized = [m.SerializeToString() for m in original]
expected_sp = ( # indices, values, shape
np.array([[0, 5], [0, 10], [3, 0], [3, 3], [3, 9]], dtype=np.int64),
np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32),
np.array([4, 13], dtype=np.int64)) # batch == 4, max_elems = 13
expected_output = {
"sp": expected_sp,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.float32, [13])
}
}, expected_output)
def testSerializedContainingSparseFeatureReuse(self):
original = [
example(
features=features({
"val1": float_feature([3, 4]),
"val2": float_feature([5, 6]),
"idx": int64_feature([5, 10])
})),
example(
features=features({
"val1": float_feature([]), # empty float list
"idx": int64_feature([])
})),
]
serialized = [m.SerializeToString() for m in original]
expected_sp1 = ( # indices, values, shape
np.array([[0, 5], [0, 10]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32), np.array(
[2, 13], dtype=np.int64)) # batch == 2, max_elems = 13
expected_sp2 = ( # indices, values, shape
np.array([[0, 5], [0, 10]], dtype=np.int64),
np.array([5.0, 6.0], dtype=np.float32), np.array(
[2, 7], dtype=np.int64)) # batch == 2, max_elems = 13
expected_output = {
"sp1": expected_sp1,
"sp2": expected_sp2,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"sp1":
parsing_ops.SparseFeature("idx", "val1", dtypes.float32, 13),
"sp2":
parsing_ops.SparseFeature(
"idx", "val2", dtypes.float32, size=7, already_sorted=True)
}
}, expected_output)
def testSerializedContaining3DSparseFeature(self):
original = [
example(
features=features({
"val": float_feature([3, 4]),
"idx0": int64_feature([5, 10]),
"idx1": int64_feature([0, 2]),
})),
example(
features=features({
"val": float_feature([]), # empty float list
"idx0": int64_feature([]),
"idx1": int64_feature([]),
})),
example(
features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(
features=features({
"val": float_feature([1, 2, -1]),
"idx0": int64_feature([0, 9, 3]), # unsorted
"idx1": int64_feature([1, 0, 2]),
}))
]
serialized = [m.SerializeToString() for m in original]
expected_sp = (
# indices
np.array(
[[0, 5, 0], [0, 10, 2], [3, 0, 1], [3, 3, 2], [3, 9, 0]],
dtype=np.int64),
# values
np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32),
# shape batch == 4, max_elems = 13
np.array([4, 13, 3], dtype=np.int64))
expected_output = {
"sp": expected_sp,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"sp":
parsing_ops.SparseFeature(["idx0", "idx1"], "val",
dtypes.float32, [13, 3])
}
}, expected_output)
def testSerializedContainingDense(self):
aname = "a"
bname = "b*has+a:tricky_name"
original = [
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
})),
example(
features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b""]),
}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
aname:
np.array([[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(["b0_str", ""], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string),
}
}, expected_output)
# This test is identical as the previous one except
# for the creation of 'serialized'.
def testSerializedContainingDenseWithConcat(self):
aname = "a"
bname = "b*has+a:tricky_name"
# TODO(lew): Feature appearing twice should be an error in future.
original = [
(example(features=features({
aname: float_feature([10, 10]),
})),
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
}))),
(
example(features=features({
bname: bytes_feature([b"b100"]),
})),
example(
features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"]),
})),
),
]
serialized = [
m.SerializeToString() + n.SerializeToString() for (m, n) in original
]
expected_output = {
aname:
np.array([[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(["b0_str", "b1"], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string),
}
}, expected_output)
def testSerializedContainingDenseScalar(self):
original = [
example(features=features({
"a": float_feature([1]),
})),
example(features=features({}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"a":
np.array([[1], [-1]], dtype=np.float32) # 2x1 (column vector)
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a":
parsing_ops.FixedLenFeature(
(1,), dtype=dtypes.float32, default_value=-1),
}
}, expected_output)
def testSerializedContainingDenseWithDefaults(self):
original = [
example(features=features({
"a": float_feature([1, 1]),
})),
example(features=features({
"b": bytes_feature([b"b1"]),
})),
example(features=features({
"b": feature()
})),
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"a":
np.array([[1, 1], [3, -3], [3, -3]], dtype=np.float32).reshape(
3, 1, 2, 1),
"b":
np.array(["tmp_str", "b1", "tmp_str"], dtype=bytes).reshape(
3, 1, 1, 1, 1),
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a":
parsing_ops.FixedLenFeature(
(1, 2, 1), dtype=dtypes.float32, default_value=[3.0, -3.0]),
"b":
parsing_ops.FixedLenFeature(
(1, 1, 1, 1), dtype=dtypes.string, default_value="tmp_str"),
}
}, expected_output)
@test_util.run_deprecated_v1
def testSerializedContainingSparseAndSparseFeatureAndDenseWithNoDefault(self):
expected_st_a = ( # indices, values, shape
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
expected_sp = ( # indices, values, shape
np.array([[0, 0], [0, 3], [1, 7]], dtype=np.int64),
np.array(["a", "b", "c"], dtype="|S"), np.array(
[2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(
features=features({
"c": float_feature([3, 4]),
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})),
example(
features=features({
"c": float_feature([1, 2]),
"val": bytes_feature([b"c"]),
"idx": int64_feature([7])
}))
]
names = ["in1", "in2"]
serialized = [m.SerializeToString() for m in original]
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
expected_output = {
"st_a": expected_st_a,
"sp": expected_sp,
"a": np.array(2 * [[a_default]]),
"b": np.array(2 * [b_default]),
"c": np.array([[3, 4], [1, 2]], dtype=np.float32),
}
self._test(
{
"example_names": names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature("idx", "val", dtypes.string, 13),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature((2,), dtypes.float32),
}
},
expected_output)
@test_util.run_deprecated_v1
def testSerializedContainingSparseAndSparseFeatureWithReuse(self):
expected_idx = ( # indices, values, shape
np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.int64),
np.array([0, 3, 7, 1]),
np.array([2, 2], dtype=np.int64)) # batch == 4, max_elems = 2
expected_sp = ( # indices, values, shape
np.array([[0, 0], [0, 3], [1, 1], [1, 7]], dtype=np.int64),
np.array(["a", "b", "d", "c"], dtype="|S"),
np.array([2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(
features=features({
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})),
example(
features=features({
"val": bytes_feature([b"c", b"d"]),
"idx": int64_feature([7, 1])
}))
]
names = ["in1", "in2"]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"idx": expected_idx,
"sp": expected_sp,
}
self._test({
"example_names": names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
"idx":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.string, [13]),
}
}, expected_output)
def _testSerializedContainingVarLenDenseLargerBatch(self, batch_size):
# During parsing, data read from the serialized proto is stored in buffers.
# For small batch sizes, a buffer will contain one minibatch entry.
# For larger batch sizes, a buffer may contain several minibatch
# entries. This test identified a bug where the code that copied
# data out of the buffers and into the output tensors assumed each
# buffer only contained one minibatch entry. The bug has since been fixed.
truth_int = [i for i in range(batch_size)]
truth_str = [[("foo%d" % i).encode(), ("bar%d" % i).encode()]
for i in range(batch_size)]
expected_str = copy.deepcopy(truth_str)
# Delete some intermediate entries
for i in range(batch_size):
col = 1
if np.random.rand() < 0.25:
# w.p. 25%, drop out the second entry
expected_str[i][col] = b"default"
col -= 1
truth_str[i].pop()
if np.random.rand() < 0.25:
# w.p. 25%, drop out the second entry (possibly again)
expected_str[i][col] = b"default"
truth_str[i].pop()
expected_output = {
# Batch size batch_size, 1 time step.
"a": np.array(truth_int, dtype=np.int64).reshape(batch_size, 1),
# Batch size batch_size, 2 time steps.
"b": np.array(expected_str, dtype="|S").reshape(batch_size, 2),
}
original = [
example(
features=features({
"a": int64_feature([truth_int[i]]),
"b": bytes_feature(truth_str[i])
})) for i in range(batch_size)
]
serialized = [m.SerializeToString() for m in original]
self._test({
"serialized": ops.convert_to_tensor(serialized, dtype=dtypes.string),
"features": {
"a":
parsing_ops.FixedLenSequenceFeature(
shape=(),
dtype=dtypes.int64,
allow_missing=True,
default_value=-1),
"b":
parsing_ops.FixedLenSequenceFeature(
shape=[],
dtype=dtypes.string,
allow_missing=True,
default_value="default"),
}
}, expected_output)
def testSerializedContainingVarLenDenseLargerBatch(self):
np.random.seed(3456)
for batch_size in (1, 10, 20, 100, 256):
self._testSerializedContainingVarLenDenseLargerBatch(batch_size)
@test_util.run_deprecated_v1
def testSerializedContainingVarLenDense(self):
aname = "a"
bname = "b"
cname = "c"
dname = "d"
example_names = ["in1", "in2", "in3", "in4"]
original = [
example(features=features({
cname: int64_feature([2]),
})),
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str", b"b1_str"]),
})),
example(
features=features({
aname: float_feature([-1, -1, 2, 2]),
bname: bytes_feature([b"b1"]),
})),
example(
features=features({
aname: float_feature([]),
cname: int64_feature([3]),
})),
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
aname:
np.array(
[
[0, 0, 0, 0],
[1, 1, 0, 0],
[-1, -1, 2, 2],
[0, 0, 0, 0],
],
dtype=np.float32).reshape(4, 2, 2, 1),
bname:
np.array(
[["", ""], ["b0_str", "b1_str"], ["b1", ""], ["", ""]],
dtype=bytes).reshape(4, 2, 1, 1, 1),
cname:
np.array([2, 0, 0, 3], dtype=np.int64).reshape(4, 1),
dname:
np.empty(shape=(4, 0), dtype=bytes),
}
self._test({
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
}, expected_output)
# Test with padding values.
expected_output_custom_padding = dict(expected_output)
expected_output_custom_padding[aname] = np.array(
[
[-2, -2, -2, -2],
[1, 1, -2, -2],
[-1, -1, 2, 2],
[-2, -2, -2, -2],
],
dtype=np.float32).reshape(4, 2, 2, 1)
self._test({
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=-2.0),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
}, expected_output_custom_padding)
# Change number of required values so the inputs are not a
# multiple of this size.
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(
errors_impl.OpError, "Name: in3, Key: b, Index: 2. "
"Number of bytes values is not a multiple of stride length."))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=[]),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"Cannot reshape a tensor with 0 elements to shape"))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenFeature(
(None, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"First dimension of shape for feature a unknown. "
"Consider using FixedLenSequenceFeature."))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
cname:
parsing_ops.FixedLenFeature(
(1, None), dtype=dtypes.int64, default_value=[[1]]),
}
},
expected_err=(ValueError,
"All dimensions of shape for feature c need to be known "
r"but received \(1, None\)."))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=False),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"Unsupported: FixedLenSequenceFeature requires "
"allow_missing to be True."))
class ParseSingleExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.cached_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
out = parsing_ops.parse_single_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
else:
# Returns dict w/ Tensors and SparseTensors.
out = parsing_ops.parse_single_example(**kwargs)
# Check values.
tf_result = sess.run(flatten_values_tensors_or_sparse(out.values()))
_compare_output_to_expected(self, out, expected_values, tf_result)
# Check shapes.
for k, f in kwargs["features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(
tuple(out[k].get_shape()), tensor_shape.as_shape(f.shape))
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(out[k].dense_shape.get_shape().as_list()), (1,))
@test_util.run_deprecated_v1
def testSingleExampleWithSparseAndSparseFeatureAndDense(self):
original = example(
features=features({
"c": float_feature([3, 4]),
"d": float_feature([0.0, 1.0]),
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3]),
"st_a": float_feature([3.0, 4.0])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0], [1]], dtype=np.int64), # indices
np.array([3.0, 4.0], dtype=np.float32), # values
np.array([2], dtype=np.int64)) # shape: max_values = 2
expected_sp = ( # indices, values, shape
np.array([[0], [3]], dtype=np.int64), np.array(["a", "b"], dtype="|S"),
np.array([13], dtype=np.int64)) # max_values = 13
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
expected_output = {
"st_a": expected_st_a,
"sp": expected_sp,
"a": [a_default],
"b": b_default,
"c": np.array([3, 4], dtype=np.float32),
"d": np.array([0.0, 1.0], dtype=np.float32),
}
self._test(
{
"example_names": ops.convert_to_tensor("in1"),
"serialized": ops.convert_to_tensor(serialized),
"features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.float32),
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.string,
[13]),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature(2, dtypes.float32),
"d":
parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True)
}
},
expected_output)
class ParseSequenceExampleTest(test.TestCase):
def testCreateSequenceExample(self):
value = sequence_example(
context=features({
"global_feature": float_feature([1, 2, 3]),
}),
feature_lists=feature_lists({
"repeated_feature_2_frames":
feature_list([
bytes_feature([b"a", b"b", b"c"]),
bytes_feature([b"a", b"d", b"e"])
]),
"repeated_feature_3_frames":
feature_list([
int64_feature([3, 4, 5, 6, 7]),
int64_feature([-1, 0, 0, 0, 0]),
int64_feature([1, 2, 3, 4, 5])
])
}))
value.SerializeToString() # Smoke test
def _test(self,
kwargs,
expected_context_values=None,
expected_feat_list_values=None,
expected_length_values=None,
expected_err=None,
batch=False):
expected_context_values = expected_context_values or {}
expected_feat_list_values = expected_feat_list_values or {}
expected_length_values = expected_length_values or {}
with self.cached_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
if batch:
c_out, fl_out, _ = parsing_ops.parse_sequence_example(**kwargs)
else:
c_out, fl_out = parsing_ops.parse_single_sequence_example(**kwargs)
if c_out:
sess.run(flatten_values_tensors_or_sparse(c_out.values()))
if fl_out:
sess.run(flatten_values_tensors_or_sparse(fl_out.values()))
else:
# Returns dicts w/ Tensors and SparseTensors.
if batch:
(context_out, feat_list_out,
lengths_out) = parsing_ops.parse_sequence_example(**kwargs)
else:
(context_out,
feat_list_out) = parsing_ops.parse_single_sequence_example(**kwargs)
lengths_out = {}
context_result = sess.run(
flatten_values_tensors_or_sparse(
context_out.values())) if context_out else []
feat_list_result = sess.run(
flatten_values_tensors_or_sparse(
feat_list_out.values())) if feat_list_out else []
lengths_result = sess.run(
flatten_values_tensors_or_sparse(
lengths_out.values())) if lengths_out else []
# Check values.
_compare_output_to_expected(self, context_out, expected_context_values,
context_result)
_compare_output_to_expected(self, feat_list_out,
expected_feat_list_values, feat_list_result)
_compare_output_to_expected(self, lengths_out, expected_length_values,
lengths_result)
# Check shapes; if serialized is a Tensor we need its size to
# properly check.
if "context_features" in kwargs:
for k, f in kwargs["context_features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
if batch:
self.assertEqual(
tuple(context_out[k].get_shape().as_list()[1:]), f.shape)
else:
self.assertEqual(
tuple(context_out[k].get_shape().as_list()), f.shape)
elif isinstance(f, parsing_ops.VarLenFeature) and batch:
self.assertEqual(
tuple(context_out[k].indices.get_shape().as_list()), (None, 2))
self.assertEqual(
tuple(context_out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(context_out[k].dense_shape.get_shape().as_list()), (2,))
elif isinstance(f, parsing_ops.VarLenFeature) and not batch:
self.assertEqual(
tuple(context_out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(
tuple(context_out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(context_out[k].dense_shape.get_shape().as_list()), (1,))
def _testBoth(self,
kwargs,
expected_context_values=None,
expected_feat_list_values=None,
expected_err=None):
# Test using tf.io.parse_single_sequence_example
self._test(
kwargs,
expected_context_values=expected_context_values,
expected_feat_list_values=expected_feat_list_values,
expected_err=expected_err,
batch=False)
# Convert the input to a batch of size 1, and test using
# tf.parse_sequence_example.
# Some replacements are needed for the batch version.
kwargs["serialized"] = [kwargs.pop("serialized")]
kwargs["example_names"] = [kwargs.pop("example_name")
] if "example_name" in kwargs else None
# Disable error string matching; it's not consistent for batch mode.
if expected_err:
expected_err = (expected_err[0], "")
# Add a batch dimension to expected output
if expected_context_values:
new_values = {}
for k in expected_context_values:
v = expected_context_values[k]
if isinstance(kwargs["context_features"][k],
parsing_ops.FixedLenFeature):
new_values[k] = np.expand_dims(v, axis=0)
else:
# Sparse tensor.
new_values[k] = (np.insert(v[0], 0, 0, axis=1), v[1],
np.insert(v[2], 0, 1))
expected_context_values = new_values
expected_length_values = {}
if expected_feat_list_values:
new_values = {}
for k in expected_feat_list_values:
v = expected_feat_list_values[k]
if isinstance(kwargs["sequence_features"][k],
parsing_ops.FixedLenSequenceFeature):
expected_length_values[k] = [np.shape(v)[0]]
new_values[k] = np.expand_dims(v, axis=0)
else:
# Sparse tensor.
new_values[k] = (np.insert(v[0], 0, 0, axis=1), v[1],
np.insert(v[2], 0, 1))
expected_feat_list_values = new_values
self._test(
kwargs,
expected_context_values=expected_context_values,
expected_feat_list_values=expected_feat_list_values,
expected_length_values=expected_length_values,
expected_err=expected_err,
batch=True)
@test_util.run_deprecated_v1
def testSequenceExampleWithSparseAndDenseContext(self):
original = sequence_example(
context=features({
"c": float_feature([3, 4]),
"st_a": float_feature([3.0, 4.0])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0], [1]], dtype=np.int64), # indices
np.array([3.0, 4.0], dtype=np.float32), # values
np.array([2], dtype=np.int64)) # shape: num_features = 2
a_default = [[1, 2, 3]]
b_default = np.random.rand(3, 3).astype(bytes)
expected_context_output = {
"st_a": expected_st_a,
"a": a_default,
"b": b_default,
"c": np.array([3, 4], dtype=np.float32),
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"context_features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.float32),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature((2,), dtypes.float32),
}
},
expected_context_values=expected_context_output)
@test_util.run_deprecated_v1
def testSequenceExampleWithMultipleSizeFeatureLists(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([
int64_feature([-1, 0, 1]),
int64_feature([2, 3, 4]),
int64_feature([5, 6, 7]),
int64_feature([8, 9, 10]),
]),
"b":
feature_list([bytes_feature([b"r00", b"r01", b"r10", b"r11"])]),
"c":
feature_list([float_feature([3, 4]),
float_feature([-1, 2])]),
}))
serialized = original.SerializeToString()
expected_feature_list_output = {
"a":
np.array(
[ # outer dimension is time.
[[-1, 0, 1]], # inside are 1x3 matrices
[[2, 3, 4]],
[[5, 6, 7]],
[[8, 9, 10]]
],
dtype=np.int64),
"b":
np.array(
[ # outer dimension is time, inside are 2x2 matrices
[[b"r00", b"r01"], [b"r10", b"r11"]]
],
dtype=bytes),
"c":
np.array(
[ # outer dimension is time, inside are 2-vectors
[3, 4], [-1, 2]
],
dtype=np.float32),
"d":
np.empty(shape=(0, 5), dtype=np.float32), # empty_allowed_missing
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a":
parsing_ops.FixedLenSequenceFeature((1, 3), dtypes.int64),
"b":
parsing_ops.FixedLenSequenceFeature((2, 2), dtypes.string),
"c":
parsing_ops.FixedLenSequenceFeature(2, dtypes.float32),
"d":
parsing_ops.FixedLenSequenceFeature(
(5,), dtypes.float32, allow_missing=True),
}
},
expected_feat_list_values=expected_feature_list_output)
@test_util.run_deprecated_v1
def testSequenceExampleWithoutDebugName(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([int64_feature([3, 4]),
int64_feature([1, 0])]),
"st_a":
feature_list([
float_feature([3.0, 4.0]),
float_feature([5.0]),
float_feature([])
]),
"st_b":
feature_list([
bytes_feature([b"a"]),
bytes_feature([]),
bytes_feature([]),
bytes_feature([b"b", b"c"])
])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0, 0], [0, 1], [1, 0]], dtype=np.int64), # indices
np.array([3.0, 4.0, 5.0], dtype=np.float32), # values
np.array([3, 2], dtype=np.int64)) # shape: num_time = 3, max_feat = 2
expected_st_b = (
np.array([[0, 0], [3, 0], [3, 1]], dtype=np.int64), # indices
np.array(["a", "b", "c"], dtype="|S"), # values
np.array([4, 2], dtype=np.int64)) # shape: num_time = 4, max_feat = 2
expected_st_c = (
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # values
np.array([0, 0], dtype=np.int64)) # shape: num_time = 0, max_feat = 0
expected_feature_list_output = {
"a": np.array([[3, 4], [1, 0]], dtype=np.int64),
"st_a": expected_st_a,
"st_b": expected_st_b,
"st_c": expected_st_c,
}
self._testBoth(
{
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"st_a": parsing_ops.VarLenFeature(dtypes.float32),
"st_b": parsing_ops.VarLenFeature(dtypes.string),
"st_c": parsing_ops.VarLenFeature(dtypes.int64),
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64),
}
},
expected_feat_list_values=expected_feature_list_output)
@test_util.run_deprecated_v1
def testSequenceExampleWithSparseAndDenseFeatureLists(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([int64_feature([3, 4]),
int64_feature([1, 0])]),
"st_a":
feature_list([
float_feature([3.0, 4.0]),
float_feature([5.0]),
float_feature([])
]),
"st_b":
feature_list([
bytes_feature([b"a"]),
bytes_feature([]),
bytes_feature([]),
bytes_feature([b"b", b"c"])
])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0, 0], [0, 1], [1, 0]], dtype=np.int64), # indices
np.array([3.0, 4.0, 5.0], dtype=np.float32), # values
np.array([3, 2], dtype=np.int64)) # shape: num_time = 3, max_feat = 2
expected_st_b = (
np.array([[0, 0], [3, 0], [3, 1]], dtype=np.int64), # indices
np.array(["a", "b", "c"], dtype="|S"), # values
np.array([4, 2], dtype=np.int64)) # shape: num_time = 4, max_feat = 2
expected_st_c = (
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # values
np.array([0, 0], dtype=np.int64)) # shape: num_time = 0, max_feat = 0
expected_feature_list_output = {
"a": np.array([[3, 4], [1, 0]], dtype=np.int64),
"st_a": expected_st_a,
"st_b": expected_st_b,
"st_c": expected_st_c,
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"st_a": parsing_ops.VarLenFeature(dtypes.float32),
"st_b": parsing_ops.VarLenFeature(dtypes.string),
"st_c": parsing_ops.VarLenFeature(dtypes.int64),
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64),
}
},
expected_feat_list_values=expected_feature_list_output)
@test_util.run_deprecated_v1
def testSequenceExampleWithEmptyFeatureInFeatureLists(self):
original = sequence_example(
feature_lists=feature_lists({
"st_a":
feature_list([
float_feature([3.0, 4.0]),
feature(),
float_feature([5.0]),
]),
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64), # indices
np.array([3.0, 4.0, 5.0], dtype=np.float32), # values
np.array([3, 2], dtype=np.int64)) # shape: num_time = 3, max_feat = 2
expected_feature_list_output = {
"st_a": expected_st_a,
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"st_a": parsing_ops.VarLenFeature(dtypes.float32),
}
},
expected_feat_list_values=expected_feature_list_output)
def testSequenceExampleListWithInconsistentDataFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a": feature_list([int64_feature([-1, 0]),
float_feature([2, 3])])
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError, "Feature list: a, Index: 1."
" Data types don't match. Expected type: int64"))
def testSequenceExampleListWithWrongDataTypeFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a": feature_list([float_feature([2, 3])])
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError,
"Feature list: a, Index: 0. Data types don't match."
" Expected type: int64"))
def testSequenceExampleListWithWrongSparseDataTypeFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([
int64_feature([3, 4]),
int64_feature([1, 2]),
float_feature([2.0, 3.0])
])
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError,
"Name: in1, Feature list: a, Index: 2."
" Data types don't match. Expected type: int64"
" Feature is: float_list"))
def testSequenceExampleListWithWrongShapeFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([int64_feature([2, 3]),
int64_feature([2, 3, 4])]),
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError, r"Name: in1, Key: a, Index: 1."
r" Number of int64 values != expected."
r" values size: 3 but output shape: \[2\]"))
def testSequenceExampleWithMissingFeatureListFails(self):
original = sequence_example(feature_lists=feature_lists({}))
# Test fails because we didn't add:
# feature_list_dense_defaults = {"a": None}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(original.SerializeToString()),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(
errors_impl.OpError,
"Name: in1, Feature list 'a' is required but could not be found."
" Did you mean to include it in"
" feature_list_dense_missing_assumed_empty or"
" feature_list_dense_defaults?"))
@test_util.run_deprecated_v1
def testSequenceExampleBatch(self):
first = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([
int64_feature([-1, 0, 1]),
int64_feature([2, 3, 4]),
int64_feature([5, 6, 7]),
int64_feature([8, 9, 10]),
])
}))
second = sequence_example(
feature_lists=feature_lists({
"a": feature_list([
int64_feature([21, 2, 11]),
])
}))
serialized = [first.SerializeToString(), second.SerializeToString()]
expected_feature_list_output = {
"a":
np.array(
[ # outermost dimension is example id
[ # middle dimension is time.
[[-1, 0, 1]], # inside are 1x3 matrices
[[2, 3, 4]],
[[5, 6, 7]],
[[8, 9, 10]]
],
[ # middle dimension is time.
[[21, 2, 11]], # inside are 1x3 matrices
[[0, 0, 0]], # additional entries are padded with 0
[[0, 0, 0]],
[[0, 0, 0]]
]
],
dtype=np.int64),
"d":
np.empty(shape=(2, 0, 5), dtype=np.float32), # allowed_missing
}
self._test(
{
"example_names": ops.convert_to_tensor(["in1", "in2"]),
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a":
parsing_ops.FixedLenSequenceFeature((1, 3), dtypes.int64),
"d":
parsing_ops.FixedLenSequenceFeature(
(5,), dtypes.float32, allow_missing=True),
}
},
expected_feat_list_values=expected_feature_list_output,
expected_length_values={
"a": [4, 1],
"d": [0, 0]
},
batch=True)
class DecodeRawTest(test.TestCase):
def _decode_v1(self, words):
with self.cached_session():
examples = np.array(words)
example_tensor = constant_op.constant(
examples, shape=examples.shape, dtype=dtypes.string)
byte_tensor = parsing_ops.decode_raw_v1(example_tensor, dtypes.uint8)
return self.evaluate(byte_tensor)
def _decode_v2(self, words, fixed_length=None):
with self.cached_session():
examples = np.array(words)
byte_tensor = parsing_ops.decode_raw(
examples, dtypes.uint8, fixed_length=fixed_length)
return self.evaluate(byte_tensor)
def _ordinalize(self, words, fixed_length=None):
outputs = []
if fixed_length is None:
fixed_length = len(words[0])
for word in words:
output = []
for i in range(fixed_length):
if i < len(word):
output.append(ord(word[i]))
else:
output.append(0)
outputs.append(output)
return np.array(outputs)
def testDecodeRawV1EqualLength(self):
words = ["string1", "string2"]
observed = self._decode_v1(words)
expected = self._ordinalize(words)
self.assertAllEqual(expected.shape, observed.shape)
self.assertAllEqual(expected, observed)
def testDecodeRawV2FallbackEqualLength(self):
words = ["string1", "string2"]
observed = self._decode_v2(words)
expected = self._ordinalize(words)
self.assertAllEqual(expected.shape, observed.shape)
self.assertAllEqual(expected, observed)
def testDecodeRawV1VariableLength(self):
words = ["string", "longer_string"]
with self.assertRaises(errors_impl.InvalidArgumentError):
self._decode_v1(words)
def testDecodeRawV2FallbackVariableLength(self):
words = ["string", "longer_string"]
with self.assertRaises(errors_impl.InvalidArgumentError):
self._decode_v2(words)
def testDecodeRawV2VariableLength(self):
words = ["string", "longer_string"]
observed = self._decode_v2(words, fixed_length=8)
expected = self._ordinalize(words, fixed_length=8)
self.assertAllEqual(expected.shape, observed.shape)
self.assertAllEqual(expected, observed)
class DecodeJSONExampleTest(test.TestCase):
def _testRoundTrip(self, examples):
with self.cached_session() as sess:
examples = np.array(examples, dtype=np.object)
json_tensor = constant_op.constant(
[json_format.MessageToJson(m) for m in examples.flatten()],
shape=examples.shape,
dtype=dtypes.string)
binary_tensor = parsing_ops.decode_json_example(json_tensor)
binary_val = self.evaluate(binary_tensor)
if examples.shape:
self.assertShapeEqual(binary_val, json_tensor)
for input_example, output_binary in zip(
np.array(examples).flatten(), binary_val.flatten()):
output_example = example_pb2.Example()
output_example.ParseFromString(output_binary)
self.assertProtoEquals(input_example, output_example)
else:
output_example = example_pb2.Example()
output_example.ParseFromString(binary_val)
self.assertProtoEquals(examples.item(), output_example)
def testEmptyTensor(self):
self._testRoundTrip([])
self._testRoundTrip([[], [], []])
def testEmptyExamples(self):
self._testRoundTrip([example(), example(), example()])
def testDenseFeaturesScalar(self):
self._testRoundTrip(
example(features=features({
"a": float_feature([1, 1, 3])
})))
def testDenseFeaturesVector(self):
self._testRoundTrip([
example(features=features({
"a": float_feature([1, 1, 3])
})),
example(features=features({
"a": float_feature([-1, -1, 2])
})),
])
def testDenseFeaturesMatrix(self):
self._testRoundTrip([
[example(features=features({
"a": float_feature([1, 1, 3])
}))],
[example(features=features({
"a": float_feature([-1, -1, 2])
}))],
])
def testSparseFeatures(self):
self._testRoundTrip([
example(features=features({
"st_c": float_feature([3, 4])
})),
example(features=features({
"st_c": float_feature([])
})),
example(features=features({
"st_d": feature()
})),
example(
features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
})),
])
def testSerializedContainingBytes(self):
aname = "a"
bname = "b*has+a:tricky_name"
self._testRoundTrip([
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"])
})),
example(
features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"])
})),
])
@test_util.run_deprecated_v1
def testInvalidSyntax(self):
with self.cached_session() as sess:
json_tensor = constant_op.constant(["{]"])
binary_tensor = parsing_ops.decode_json_example(json_tensor)
with self.assertRaisesOpError("Error while parsing JSON"):
self.evaluate(binary_tensor)
class ParseTensorOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testToFloat32(self):
with self.cached_session():
expected = np.random.rand(3, 4, 5).astype(np.float32)
tensor_proto = tensor_util.make_tensor_proto(expected)
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.float32)
result = tensor.eval(
feed_dict={serialized: tensor_proto.SerializeToString()})
self.assertAllEqual(expected, result)
@test_util.run_deprecated_v1
def testToUint8(self):
with self.cached_session():
expected = np.random.rand(3, 4, 5).astype(np.uint8)
tensor_proto = tensor_util.make_tensor_proto(expected)
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.uint8)
result = tensor.eval(
feed_dict={serialized: tensor_proto.SerializeToString()})
self.assertAllEqual(expected, result)
@test_util.run_deprecated_v1
def testTypeMismatch(self):
with self.cached_session():
expected = np.random.rand(3, 4, 5).astype(np.uint8)
tensor_proto = tensor_util.make_tensor_proto(expected)
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.uint16)
with self.assertRaisesOpError(
r"Type mismatch between parsed tensor \(uint8\) and dtype "
r"\(uint16\)"):
tensor.eval(feed_dict={serialized: tensor_proto.SerializeToString()})
@test_util.run_deprecated_v1
def testInvalidInput(self):
with self.cached_session():
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.uint16)
with self.assertRaisesOpError(
"Could not parse `serialized` as TensorProto: 'bogus'"):
tensor.eval(feed_dict={serialized: "bogus"})
with self.assertRaisesOpError(
r"Expected `serialized` to be a scalar, got shape: \[1\]"):
tensor.eval(feed_dict={serialized: ["bogus"]})
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/parsing_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseTensorsMap."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
# pylint: disable=protected-access
add_sparse_to_tensors_map = sparse_ops._add_sparse_to_tensors_map
add_many_sparse_to_tensors_map = sparse_ops._add_many_sparse_to_tensors_map
take_many_sparse_from_tensors_map = (
sparse_ops._take_many_sparse_from_tensors_map)
# pylint: enable=protected-access
class SparseTensorsMapTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
if dtype is None:
dtype = dtypes.int32
return sparse_tensor_lib.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
[2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
@test_util.run_deprecated_v1
def testAddTakeMany(self):
with self.session(graph=ops.Graph(), use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
handle0 = add_sparse_to_tensors_map(sp_input0, shared_name="a")
handle1 = add_sparse_to_tensors_map(sp_input1, shared_name="a")
self.assertEqual(handle0.get_shape(), ())
handles_concat = array_ops.stack([handle0, handle1])
sp_out = take_many_sparse_from_tensors_map(
sparse_map_op=handle0.op, sparse_handles=handles_concat)
combined_indices, combined_values, combined_shape = self.evaluate(sp_out)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
self.assertAllEqual(combined_values[:6], sp_input0[1])
self.assertAllEqual(combined_values[6:], sp_input1[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testFeedAddTakeMany(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
sparse_handles = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=sparse_handles)
combined_indices, combined_values, combined_shape = self.evaluate(
sp_roundtrip)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
self.assertAllEqual(combined_values[:6], input0_val[1])
self.assertAllEqual(combined_values[6:], input1_val[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testAddManyTakeManyRoundTrip(self):
with self.session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
handles = add_many_sparse_to_tensors_map(sparse_tensor)
roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handles.op, sparse_handles=handles)
handles_value, roundtrip_value = sess.run(
[handles, roundtrip],
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertEqual(handles_value.shape, (4,))
self.assertAllEqual(roundtrip_value.indices, indices_value)
self.assertAllEqual(roundtrip_value.values, values_value)
self.assertAllEqual(roundtrip_value.dense_shape, shape_value)
@test_util.run_deprecated_v1
def testDeserializeFailsInconsistentRank(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
handle_concat = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=handle_concat)
with self.assertRaisesOpError(
r"Inconsistent rank across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"):
self.evaluate(sp_roundtrip)
@test_util.run_deprecated_v1
def testTakeManyFailsWrongInputOp(self):
with self.session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
handle = add_sparse_to_tensors_map(input_val)
handle_value = self.evaluate(handle)
bad_handle = handle_value + 10
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=[handle_value, bad_handle])
with self.assertRaisesOpError(r"Unable to find SparseTensor: 10"):
self.evaluate(sp_roundtrip)
class BenchmarkSparseTensorsMapVsSerialization(test.Benchmark):
def benchmarkVeryLarge2DFloatSparseTensor(self):
np.random.seed(127)
num_elements = 10000
batch_size = 64
indices_batch = np.random.randint(
batch_size, size=num_elements, dtype=np.int64)
indices_value = np.arange(num_elements, dtype=np.int64)
indices = np.asarray(
sorted(zip(indices_batch, indices_value)), dtype=np.int64)
values = ["feature_value_for_embedding_lookup"] * num_elements
shape = np.asarray([batch_size, num_elements], dtype=np.int64)
with session.Session(config=benchmark.benchmark_config()) as sess:
with ops.device("/cpu:0"):
indices = variables.Variable(indices)
values = variables.Variable(values)
shape = variables.Variable(shape)
st = sparse_tensor_lib.SparseTensor(indices, values, shape)
st_handles = add_many_sparse_to_tensors_map(st)
st_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=st_handles.op, sparse_handles=st_handles)
st_roundtrip_op = st_roundtrip.values.op
st_serialized = sparse_ops.serialize_many_sparse(st)
st_deserialized = sparse_ops.deserialize_many_sparse(
st_serialized, dtype=values.dtype)
st_deserialized_op = st_deserialized.values.op
variables.global_variables_initializer().run()
st_roundtrip_values = self.evaluate(st_roundtrip)
st_deserialized_values = self.evaluate(st_deserialized)
np.testing.assert_equal(st_roundtrip_values.values,
st_deserialized_values.values)
np.testing.assert_equal(st_roundtrip_values.indices,
st_deserialized_values.indices)
np.testing.assert_equal(st_roundtrip_values.dense_shape,
st_deserialized_values.dense_shape)
self.run_op_benchmark(
sess,
st_roundtrip_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_tensor_maps")
self.run_op_benchmark(
sess,
st_deserialized_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_serialization")
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import convolutional
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.compat import collections_abc
def GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
Relative to inception, each depth value is `depth // shrink`.
Args:
shrink: Factor to shrink each depth value by relative to Inception.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the convolution
parameters of Inception layers.
"""
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],
[4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],
[4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],
[4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],
[4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],
[4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],
[4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],
[4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],
[4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],
[4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],
[4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],
[4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],
[4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],
[1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],
[1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],
[1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],
[3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],
[3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],
[3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],
[1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],
[1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],
[3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],
[1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],
[3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],
[1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],
[1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],
[3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],
[1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],
[3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],
[1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],
[1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],
[4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],
[4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],
[4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],
[4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],
[4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],
[4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],
[4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],
[4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],
[4, 147, 147, 64]]
strides = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1
]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID
]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCHW" format is only supported on CUDA.
test_configs += [("NCHW", True)]
return test_configs
class Conv2DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
if use_gpu and not test_util.GpuSupportsHalfMatMulAndConv():
return [dtypes.float32] + optional_float64
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float32, dtypes.float16] + optional_float64
def _CreateNumpyTensor(self, shape):
total_size = 1
for s in shape:
total_size *= s
return np.arange(1, total_size + 1, dtype=np.float32).reshape(shape)
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype, use_gpu):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
use_gpu: True if the operations should be run on GPU
Returns:
Symbolic tensor value that can be used to execute the computation
"""
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if isinstance(padding, (list, tuple)):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
if isinstance(padding, (list, tuple)):
padding = test_util.NHWCToNCHW(padding)
conv = nn_ops.conv2d(
t1,
t2,
dilations=dilations,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that CPU and GPU produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
np.random.seed(1234) # Make it reproducible.
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
def _SetupVal(data_format, use_gpu):
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
values = self.evaluate(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-3, atol=1e-3)
def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,
stride, dilation, padding, data_format,
use_gpu):
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections_abc.Iterable):
strides = list(stride)
else:
strides = [stride, stride]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
full_strides = [1, 1] + strides
full_dilation = [1, 1] + dilation
else:
full_strides = [1] + strides + [1]
full_dilation = [1] + dilation + [1]
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilation,
data_format=data_format)
computed = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilation,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
expected = test_util.NCHWToNHWC(expected)
computed = test_util.NCHWToNHWC(computed)
return expected, computed
def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, strides,
padding, dilations, rtol=1e-4):
expected_results = []
computed_results = []
for data_format, use_gpu in GetTestConfigs():
expected, computed = self._ComputeReferenceDilatedConv(
tensor_in_sizes, filter_in_sizes, strides, dilations, padding,
data_format, use_gpu)
expected_results.append(expected)
computed_results.append(computed)
tolerance = 1e-2 if use_gpu else 1e-5
expected_values = self.evaluate(expected_results)
computed_values = self.evaluate(computed_results)
for e_value, c_value in zip(expected_values, computed_values):
tf_logging.debug("expected = %s", e_value)
tf_logging.debug("actual = %s", c_value)
self.assertAllClose(
e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=rtol)
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
expected,
dilations=(1, 1),
gpu_only=False,
test_grappler_layout_optimizer=False,
tol=1e-5,
fp16_tol=1e-3):
if gpu_only and not test.is_gpu_available(cuda_only=True):
return
tensors = []
dilations = list(dilations)
for (data_format, use_gpu) in GetTestConfigs():
if gpu_only and not use_gpu:
continue
for dtype in self._DtypesToTest(use_gpu):
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
dtype,
use_gpu=use_gpu)
if test_grappler_layout_optimizer and data_format == "NHWC" and use_gpu:
# Grappler's layout optimizer will not optimize a fetch node, so
# this identity allows Grappler to optimize the Conv2D node.
result = array_ops.identity(result)
tensors.append(result)
values = self.evaluate(tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
tol_to_use = fp16_tol if value.dtype == np.float16 else tol
self.assertAllClose(expected, np.ravel(value), atol=tol_to_use,
rtol=tol_to_use)
self.assertShapeEqual(value, conv)
def _VerifyExplicitPaddings(self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
dilations=(1, 1),
test_grappler_layout_optimizer=False,
tol=1e-5,
fp16_tol=1e-3):
"""Verifies Conv2D with explicit padding generates correct values.
It does this by comparing with Conv2D without explicit padding. This
function assumes Conv2D without explicit padding works correctly.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
strides: [row_stride, col_stride] for the convolution;
padding: Explicit padding amounts.
dilations: Dilation values
test_grappler_layout_optimizer: If True, allow the Grappler layout
optimizer to run, which turns NHWC Conv2Ds on the GPU to NCHW Conv2Ds.
tol: The absolute and relative tolerance for non-fp16 dtypes.
fp16_tol: The absolute and relative tolerance for fp16.
"""
input_tensor = self._CreateNumpyTensor(tensor_in_sizes)
filter_tensor = self._CreateNumpyTensor(filter_in_sizes)
input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)])
dilations = list(dilations)
conv2d_result = nn_ops.conv2d(
input_tensor,
filter_tensor, [1] + list(strides) + [1],
"VALID",
dilations=[1] + dilations + [1])
expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1])))
self._VerifyValues(
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
expected,
dilations,
test_grappler_layout_optimizer=test_grappler_layout_optimizer,
tol=tol,
fp16_tol=fp16_tol)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x1Filter(self):
expected_output = [
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter2x1Dilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2DEmpty(self):
expected_output = []
self._VerifyValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
dilations=[1, 2],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2(self):
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2Same(self):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="SAME",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride1x2(self):
expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]
self._VerifyValues(
tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideValid(self):
expected_output = [65, 95, 275, 305]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideSame(self):
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 9, 11])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="SAME",
expected=[44, 28, 41, 16])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
padding="VALID",
expected=[50, 60])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID")
@test_util.run_in_graph_and_eager_modes()
def testConv2D0x0Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding=[[0, 0], [0, 0]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[3, 4, 3, 2],
filter_in_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding=[[0, 0], [0, 0]])
@test_util.run_in_graph_and_eager_modes()
def testConv2D1x1Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 2],
filter_in_sizes=[2, 1, 2, 1],
strides=[1, 1],
padding=[[2, 2], [2, 2]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 2],
filter_in_sizes=[1, 1, 2, 1],
strides=[2, 1],
padding=[[2, 2], [2, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DOnlyBottomPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 2],
strides=[1, 1],
padding=[[0, 3], [0, 0]], tol=3e-5)
self._VerifyExplicitPaddings(
tensor_in_sizes=[2, 2, 4, 3],
filter_in_sizes=[1, 2, 3, 2],
strides=[2, 2],
padding=[[0, 3], [0, 0]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DOnlyTopRightPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 2],
strides=[1, 1],
padding=[[1, 0], [0, 2]],
tol=5e-5)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 4, 2],
filter_in_sizes=[2, 2, 2, 2],
strides=[1, 3],
padding=[[1, 0], [0, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DLotsPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 1, 1, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding=[[3, 4], [4, 2]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 1],
filter_in_sizes=[2, 2, 1, 3],
strides=[2, 1],
padding=[[3, 4], [4, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DExplicitPaddingWithDilations(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 3, 2, 1],
filter_in_sizes=[1, 2, 1, 2],
strides=[1, 1],
padding=[[1, 0], [0, 1]],
dilations=[2, 1])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[3, 2, 2, 1],
strides=[1, 1],
padding=[[2, 1], [1, 2]],
dilations=[2, 3])
def testConv2DExplicitPaddingWithLayoutOptimizer(self):
# Test with Grappler's layout optimizer, to ensure the layout optimizer
# handles explicit padding correctly.
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 3, 2, 1],
filter_in_sizes=[1, 2, 1, 2],
strides=[1, 1],
padding=[[1, 0], [0, 1]],
dilations=[2, 1],
test_grappler_layout_optimizer=True)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[3, 2, 2, 1],
strides=[1, 1],
padding=[[2, 1], [1, 2]],
dilations=[2, 3],
test_grappler_layout_optimizer=True)
def _VerifyGroupConvFwd(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype):
"""Verify the output of group convolution is equal to a for-loop implementation.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
"""
tensor_in = self._CreateNumpyTensor(tensor_in_sizes)
filter_in = self._CreateNumpyTensor(filter_in_sizes)
num_groups = tensor_in_sizes[3] // filter_in_sizes[2]
assert num_groups > 1 and \
filter_in_sizes[2] * num_groups == tensor_in_sizes[3]
with test_util.device(True):
t1 = constant_op.constant(tensor_in, dtype=dtype)
t2 = constant_op.constant(filter_in, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
t1_splits = array_ops.split(t1, num_groups, axis=1)
else:
t1_splits = array_ops.split(t1, num_groups, axis=3)
t2_splits = array_ops.split(t2, num_groups, axis=3)
def MakeConv2d(inputs, filters):
return nn_ops.conv2d(
inputs,
filters,
strides,
padding,
dilations=dilations,
data_format=data_format)
group_conv = MakeConv2d(t1, t2)
group_conv_loop = array_ops.concat(
[MakeConv2d(t1s, t2s) for t1s, t2s in zip(t1_splits, t2_splits)],
axis=1 if data_format == "NCHW" else 3)
results = self.evaluate([group_conv, group_conv_loop])
tol_to_use = 1e-5
self.assertAllClose(
results[0], results[1], atol=tol_to_use, rtol=tol_to_use)
@test_util.run_in_graph_and_eager_modes
@test_util.run_cuda_only
def testConv2DGroupConvFwd(self):
for data_format in ["NHWC", "NCHW"]:
for dilation in [1, 2]:
for stride in [1, 2]:
self._VerifyGroupConvFwd([10, 32, 32, 16], [3, 3, 4, 8],
dilations=[dilation, dilation],
strides=[stride, stride],
padding="SAME",
data_format=data_format,
dtype=dtypes.float32)
@test_util.deprecated_graph_mode_only
@test_util.run_cuda_only
def testInputGradientGroupConv(self):
for data_format in ["NCHW", "NHWC"]:
for test_input in [True, False]:
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
num_groups=2,
padding="VALID",
in_depth=4,
out_depth=6,
stride_rows=1,
stride_cols=1,
test_input=test_input,
data_format=data_format,
use_gpu=True,
max_err=0.005)
@test_util.deprecated_graph_mode_only
@test_util.run_cuda_only
def testFilterGradientGroupConv(self):
for data_format in ["NCHW", "NHWC"]:
for test_input in [True, False]:
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
num_groups=2,
padding="VALID",
in_depth=4,
out_depth=6,
stride_rows=1,
stride_cols=1,
test_input=test_input,
data_format=data_format,
use_gpu=True,
max_err=0.005)
# TODO(yzhwang): this currently fails.
# self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1],
# filter_in_sizes=[2, 2, 1, 1],
# strides=[4, 4], padding="SAME",
# expected=[72, 112, 392, 432])
# Testing for backprops
def _RunAndVerifyBackpropInput(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu,
err,
dilations=(1, 1)):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
x1 = self._CreateNumpyTensor(filter_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
with test_util.device(use_gpu):
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if isinstance(padding, (list, tuple)):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
if isinstance(padding, (list, tuple)):
padding = test_util.NHWCToNCHW((padding))
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
# "values" consists of two tensors for two backprops
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(expected, value.flatten(), err)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
np.random.seed(1234) # Make it reproducible.
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
if data_format == "NCHW":
new_input_sizes = test_util.NHWCToNCHW(input_sizes)
else:
new_input_sizes = input_sizes
t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropInput(self):
expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropInput(self):
expected_output = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInput(self):
expected_output = [
14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0,
140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0
]
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=3e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):
expected_output = [
1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0,
16.0, 15.0, 20.0, 18.0, 24.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropInput(self):
expected_output = [
1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
# Testing for backprops
def _RunAndVerifyBackpropFilter(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu,
dilations=(1, 1),
err=1e-5):
x0 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
explicit_strides = [1] + strides + [1]
new_padding = padding
new_dilations = [1] + dilations + [1]
if isinstance(new_padding, (list, tuple)):
new_padding = [(0, 0)] + new_padding + [(0, 0)]
if data_format == "NCHW":
explicit_strides = test_util.NHWCToNCHW(explicit_strides)
new_dilations = test_util.NHWCToNCHW(new_dilations)
if isinstance(padding, (list, tuple)):
new_padding = test_util.NHWCToNCHW(new_padding)
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=explicit_strides,
padding=new_padding,
dilations=new_dilations,
data_format=data_format)
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(expected, value.flatten(), err)
def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
np.random.seed(1234) # Make it reproducible.
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropFilter(self):
expected = [5.0, 8.0, 14.0, 17.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropFilter(self):
expected = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DBackpropFilterWithEmptyInput(self):
expected = [0, 0, 0, 0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilter(self):
expected = [
17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0,
37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0,
117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0,
120.0, 153.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):
expected = [161.0, 182.0, 287.0, 308.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropFilter(self):
expected_output = [78.]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self):
expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
# Testing for backprops
def _RunAndVerifyBackpropInputDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
x1 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(filter_sizes)
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t1)[0]
conv_2 = gradients_impl.gradients(conv_forward_2, t1)[0]
# "values" consists of two tensors for two backprops
value = self.evaluate(conv)
value_2 = self.evaluate(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.debug("expected = %s", value_2)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
# Testing for backprops
def _RunAndVerifyBackpropFilterDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
x1 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(filter_sizes)
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t2)[0]
conv_2 = gradients_impl.gradients(conv_forward, t2)[0]
value = self.evaluate(conv)
value_2 = self.evaluate(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.debug("expected = %s", value_2)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DEmptyBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 4, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DEmptyBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 2, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
@test_util.deprecated_graph_mode_only
def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def _RunAndVerifyBackpropInputExplicitPadding(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
data_format,
use_gpu,
dilations=(1, 1),
err=2e-5):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
if not use_gpu and dilations != (1, 1):
return # Non-default dilations is currently not supported on the CPU.
x1 = self._CreateNumpyTensor(filter_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
padded_input_sizes = input_sizes[:]
padded_input_sizes[1] += padding[0][0] + padding[0][1]
padded_input_sizes[2] += padding[1][0] + padding[1][1]
c = nn_ops.conv2d_backprop_input(
padded_input_sizes,
x1,
x2,
strides=[1] + strides + [1],
padding="VALID",
dilations=[1] + dilations + [1])
c = c[:, padding[0][0]:(c.shape[1] - padding[0][1]), padding[1][0]:(
c.shape[2] - padding[1][1]), :]
expected = list(self.evaluate(array_ops.reshape(c, [-1])))
self._RunAndVerifyBackpropInput(
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu=use_gpu,
err=err,
dilations=dilations)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding0x0BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding=[[0, 0], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 4, 2],
filter_sizes=[2, 2, 2, 3],
output_sizes=[1, 1, 2, 3],
strides=[2, 2],
padding=[[0, 0], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding1x1BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 3, 4, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 2],
filter_sizes=[1, 1, 2, 1],
output_sizes=[1, 4, 3, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 4, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 4, 2, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
dilations=[2, 2], use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding2x2BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[2, 3, 1, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[2, 2, 5, 1],
strides=[3, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 6, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 3, 4, 1],
strides=[1, 2],
padding=[[2, 2], [2, 2]],
data_format=data_format,
dilations=[2, 3],
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_1_8_4_1_BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 10, 8, 1],
strides=[1, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=5e-5)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 5, 3, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 4, 8, 1],
strides=[3, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_5_0_2_2_BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[1, 7, 7, 1],
strides=[1, 1],
padding=[[5, 0], [2, 2]],
data_format=data_format,
err=5e-5,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 4, 2, 1],
filter_sizes=[3, 3, 1, 1],
output_sizes=[1, 5, 2, 1],
strides=[1, 2],
padding=[[5, 0], [2, 2]],
data_format=data_format,
dilations=[2, 1],
use_gpu=use_gpu)
def _RunAndVerifyBackpropFilterExplicitPadding(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
data_format,
use_gpu,
dilations=(1, 1),
err=1e-5):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
if not use_gpu and dilations != (1, 1):
return # Non-default dilations is currently not supported on the CPU.
x0 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
x0 = np.pad(x0, [(0, 0)] + padding + [(0, 0)], "constant")
c = nn_ops.conv2d_backprop_filter(
x0,
filter_sizes,
x2,
strides=[1] + strides + [1],
padding="VALID",
dilations=[1] + dilations + [1])
expected = list(self.evaluate(array_ops.reshape(c, [-1])))
self._RunAndVerifyBackpropFilter(
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu=use_gpu,
dilations=dilations,
err=err)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding0x0BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding=[[0, 0], [0, 0]],
data_format=data_format, use_gpu=use_gpu)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 4, 2],
filter_sizes=[2, 2, 2, 3],
output_sizes=[1, 1, 2, 3],
strides=[2, 2],
padding=[[0, 0], [0, 0]],
data_format=data_format, use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding1x1BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 3, 4, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
err=5e-5)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 2],
filter_sizes=[1, 1, 2, 1],
output_sizes=[1, 4, 3, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
use_gpu=use_gpu,
data_format=data_format)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 4, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 4, 2, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 2])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding2x2BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[2, 3, 1, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[2, 2, 5, 1],
strides=[3, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 6, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 3, 4, 1],
strides=[1, 2],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 3])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_1_8_4_1_BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 10, 8, 1],
strides=[1, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 5, 3, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 4, 8, 1],
strides=[3, 1],
padding=[[1, 8], [4, 2]],
use_gpu=use_gpu,
data_format=data_format)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_5_0_2_2_BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[1, 7, 7, 1],
strides=[1, 1],
padding=[[5, 0], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 4, 2, 1],
filter_sizes=[3, 3, 1, 1],
output_sizes=[1, 5, 2, 1],
strides=[1, 2],
padding=[[5, 0], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 1])
# Gradient checkers
def ConstructAndTestGradient(self,
batch,
input_rows,
input_cols,
filter_rows,
filter_cols,
in_depth,
out_depth,
stride_rows,
stride_cols,
padding,
test_input,
data_format,
use_gpu,
num_groups=1,
max_err=0.0025):
assert in_depth % num_groups == 0 and out_depth % num_groups == 0
input_shape = [batch, input_rows, input_cols, in_depth]
filter_shape = [filter_rows, filter_cols, in_depth // num_groups, out_depth]
# TODO(yangke): re-factor the computation of output shape.
if padding == "VALID":
output_rows = (input_rows - filter_rows + stride_rows) // stride_rows
output_cols = (input_cols - filter_cols + stride_cols) // stride_cols
elif padding == "SAME":
output_rows = (input_rows + stride_rows - 1) // stride_rows
output_cols = (input_cols + stride_cols - 1) // stride_cols
else:
self.assertIsInstance(padding, (list, tuple))
output_rows = (input_rows + padding[1][0] + padding[1][1] - filter_rows +
stride_rows) // stride_rows
output_cols = (input_cols + padding[2][0] + padding[2][1] - filter_cols +
stride_cols) // stride_cols
output_shape = [batch, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
# Conv2DGrad functions are not compiled for double due to
# a problem in the way Eigen's Conv2DGrad works for double.
# So we disable the DOUBLE path. We should re-enable this
# when double support returns for CPU and/or GPU.
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=dtype, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=dtype, name="filter")
strides = [1, stride_rows, stride_cols, 1]
new_padding = padding
if data_format == "NCHW":
new_input_tensor = test_util.NHWCToNCHW(input_tensor)
strides = test_util.NHWCToNCHW(strides)
if isinstance(padding, (list, tuple)):
new_padding = test_util.NHWCToNCHW(padding)
else:
new_input_tensor = input_tensor
conv = nn_ops.conv2d(
new_input_tensor,
filter_tensor,
strides,
new_padding,
data_format=data_format,
name="conv")
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
self.assertEqual(output_shape, conv.get_shape())
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor,
input_shape,
conv,
output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor,
filter_shape,
conv,
output_shape)
if dtype == dtypes.float32:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
tf_logging.debug("conv_2d gradient error = %s", err)
self.assertLess(err, max_err)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
#@test_util.deprecated_graph_mode_only
#def testFilterGradientValidPaddingStrideThree(self):
# for (data_format, use_gpu) in GetTestConfigs():
# self.ConstructAndTestGradient(
# batch=2,
# input_rows=8,
# input_cols=7,
# filter_rows=4,
# filter_cols=4,
# in_depth=2,
# out_depth=3,
# stride_rows=3,
# stride_cols=3,
# padding="VALID",
# test_input=False,
# data_format=data_format,
# use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=3,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
#@test_util.deprecated_graph_mode_only
#def testFilterGradientSamePaddingStrideThree(self):
# for (data_format, use_gpu) in GetTestConfigs():
# self.ConstructAndTestGradient(
# batch=2,
# input_rows=8,
# input_cols=7,
# filter_rows=4,
# filter_cols=4,
# in_depth=2,
# out_depth=3,
# stride_rows=3,
# stride_cols=3,
# padding="SAME",
# test_input=False,
# data_format=data_format,
# use_gpu=use_gpu)
#@test_util.deprecated_graph_mode_only
#def testFilterGradientSamePaddingStride2x1(self):
# for (data_format, use_gpu) in GetTestConfigs():
# self.ConstructAndTestGradient(
# batch=2,
# input_rows=8,
# input_cols=7,
# filter_rows=4,
# filter_cols=4,
# in_depth=2,
# out_depth=3,
# stride_rows=2,
# stride_cols=1,
# padding="SAME",
# test_input=False,
# data_format=data_format,
# use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient1x1PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.0025)
@test_util.deprecated_graph_mode_only
def testFilterGradient1x1PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient1x1PaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient1x1PaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient2x2PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [2, 2], [2, 2], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.003)
@test_util.deprecated_graph_mode_only
def testFilterGradient2x2PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [2, 2], [2, 2], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.003)
@test_util.deprecated_graph_mode_only
def testInputGradient1_2_3_4PaddingStride3x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=5,
filter_rows=4,
filter_cols=2,
in_depth=3,
out_depth=2,
stride_rows=3,
stride_cols=2,
padding=[[0, 0], [1, 2], [3, 4], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient1_2_3_4PaddingStride3x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=5,
filter_rows=4,
filter_cols=2,
in_depth=3,
out_depth=2,
stride_rows=3,
stride_cols=2,
padding=[[0, 0], [1, 2], [3, 4], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient4_3_2_1PaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=3,
input_rows=5,
input_cols=7,
filter_rows=3,
filter_cols=2,
in_depth=1,
out_depth=2,
stride_rows=2,
stride_cols=1,
padding=[[0, 0], [4, 3], [2, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient4_3_2_1PaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=3,
input_rows=5,
input_cols=7,
filter_rows=3,
filter_cols=2,
in_depth=1,
out_depth=2,
stride_rows=2,
stride_cols=1,
padding=[[0, 0], [4, 3], [2, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient0_0_0_5PaddingStride1x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=6,
input_cols=7,
filter_rows=3,
filter_cols=4,
in_depth=3,
out_depth=2,
stride_rows=1,
stride_cols=2,
padding=[[0, 0], [0, 0], [0, 5], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient0_0_0_5PaddingStride1x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=6,
input_cols=7,
filter_rows=3,
filter_cols=4,
in_depth=3,
out_depth=2,
stride_rows=1,
stride_cols=2,
padding=[[0, 0], [0, 0], [0, 5], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
# Incorrect filter shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
strides=[1, 1, 1, 1],
padding="SAME")
# Depth mismatch.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[4, 4, 2, 2]),
strides=[1, 1, 1, 1],
padding="SAME")
# Input depth divisible by filter depth (group convolution).
# No exceptions should appear.
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 8]),
array_ops.placeholder(dtypes.float32, shape=[4, 4, 2, 16]),
strides=[1, 1, 1, 1],
padding="SAME")
# Negative padding.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, -1], [1, 2], [0, 0]])
# Nonzero padding in nonspatial dimension.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[1, 0], [0, 0], [0, 0], [0, 0]])
# Nonzero NCHW padding in nonspatial dimension.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, 1], [0, 0], [0, 0]],
data_format="NCHW")
# Wrong amount of padding
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, 0], [0, 0]])
# Only specify one padding amount per dimension
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0], [0], [0], [0]])
# Explicit padding elements are not lists
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[0, 0, 0, 0])
@test_util.deprecated_graph_mode_only
@test_util.disable_xla("b/123337890") # Error messages differ
def testOpEdgeCases(self):
with self.cached_session() as sess:
# Illegal strides.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[2, 1, 1, 1],
padding="SAME"))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 2],
padding="SAME"))
# Filter larger than input.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[20, 21, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[21, 20, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
# Filter larger than input + padding.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(dtypes.float32, shape=[24, 25, 3, 2]),
strides=[1, 1, 1, 1],
padding=[[0, 0], [2, 2], [2, 2], [0, 0]]))
# Negative padding during backprop.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"nonnegative"):
sess.run(
nn_ops.conv2d_backprop_input([32, 20, 20, 3],
array_ops.placeholder(
dtypes.float32,
shape=[18, 18, 3, 2]),
array_ops.placeholder(
dtypes.float32,
shape=[32, 3, 2, 2]),
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0],
[0, 0]]))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"nonnegative"):
sess.run(
nn_ops.conv2d_backprop_filter(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
[18, 18, 3, 2],
array_ops.placeholder(dtypes.float32, shape=[32, 3, 2, 2]),
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]))
class DepthwiseConv2DTest(test.TestCase):
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.cached_session() as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = self.evaluate(conv)
tf_logging.debug("value = %s", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
class SeparableConv2DTest(test.TestCase):
def _InitValues(self, sizes):
"""Initializes values for input tensors.
Args:
sizes: Tensor dimensions.
Returns:
Tensor initialized to values.
"""
total_size = 1
for s in sizes:
total_size *= s
x = [f * 0.5 for f in range(1, total_size + 1)]
return constant_op.constant(x, shape=sizes)
def _VerifyValues(self,
tensor_in_sizes,
depthwise_filter_in_sizes,
pointwise_filter_in_sizes,
stride,
padding,
expected,
data_format="NHWC"):
"""Verifies the output values of the separable convolution function.
Args:
tensor_in_sizes: Input tensor dimensions.
depthwise_filter_in_sizes: Depthwise filter tensor dimensions.
pointwise_filter_in_sizes: Pointwise filter tensor dimensions.
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
data_format: string data format for input tensor.
"""
with self.cached_session(use_gpu=True) as sess:
t1 = self._InitValues(tensor_in_sizes)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
real_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
real_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
conv = nn_impl.separable_conv2d(
real_t1,
f1,
f2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = array_ops.transpose(conv, [0, 2, 3, 1])
value = self.evaluate(conv)
tf_logging.debug("value = %s", value)
self.assertArrayNear(expected, np.ravel(value), 1e-3)
self.assertShapeEqual(value, conv)
def _testSeparableConv2D(self, data_format):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7].
# Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2).
expected_output = [
6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5,
8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5,
11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5,
4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5,
15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5,
18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5,
6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5,
19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5,
22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5,
24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5,
10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,
7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,
7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,
2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 7],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
def testSeparableConv2D(self):
self._testSeparableConv2D("NHWC")
def disabledtestSeparableConv2DNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2D("NCHW")
def _testSeparableConv2DEqualInputOutputDepth(self, data_format):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6].
# Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2).
expected_output = [
5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0,
8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0,
10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0,
11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0,
14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0,
17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0,
17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0,
20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0,
24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5,
5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0,
6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5,
1923.75, 2007.0, 2090.25, 2173.5
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 6],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
@test_util.deprecated_graph_mode_only
def testSeparableConv2DEqualInputOutputDepth(self):
self._testSeparableConv2DEqualInputOutputDepth("NHWC")
def testSeparableConv2DEqualInputOutputDepthNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2DEqualInputOutputDepth("NCHW")
class DeepConv2DTest(test.TestCase):
def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that DeepConv2D and Conv2D produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
np.random.seed(1234) # Make it reproducible.
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
with self.cached_session(use_gpu=False) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)
os.environ["TF_USE_DEEP_CONV2D"] = "0"
values_expect = self.evaluate([conv])
os.environ["TF_USE_DEEP_CONV2D"] = "1"
values_test = self.evaluate([conv])
self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5)
def _RunTestCases(self, conv_strides, padding):
input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288],
[2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]]
filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384],
[3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]]
for input_shape, filter_shape in zip(input_sizes, filter_sizes):
self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding)
def testConv2D3x3FilterStride1x1Valid(self):
self._RunTestCases([1, 1], "VALID")
def testConv2D3x3FilterStride1x1Same(self):
self._RunTestCases([1, 1], "SAME")
class Conv2DBenchmark(test.Benchmark):
def benchmarkGPUConvStackFirst(self):
# Benchmark the first iteration of a conv-net with many identical conv
# operations.
if not test.is_gpu_available():
return
with ops.Graph().as_default(), session_lib.Session() as session:
batch_size = 1
timesteps = 600
features = 1
inputs = random_ops.random_uniform(
[batch_size, 1, timesteps, features], seed=1234)
num_outputs_list = [512] * 40 + [1]
kernel_w = 3
x = inputs
for num_outputs in num_outputs_list:
x = convolutional.conv2d(x, num_outputs, [1, kernel_w])
outputs = x
variables.global_variables_initializer().run()
num_iterations = 4
for iter_index in xrange(num_iterations):
start = time.time()
session.run(outputs)
wall_time = time.time() - start
self.report_benchmark(
name="conv_stack_iter_%d" % iter_index, wall_time=wall_time)
tf_logging.info("conv_stack_iter_%d: %.4f" % (iter_index, wall_time))
def _bench_op(self, name, op, burn_iters, num_iters):
config = config_pb2.ConfigProto()
# Prevent Grappler from optimizing away the entire graph.
config.graph_options.rewrite_options.dependency_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
with session_lib.Session(config=config) as session:
variables.global_variables_initializer().run()
self.run_op_benchmark(
session, op, burn_iters=burn_iters, min_iters=num_iters, name=name)
def benchmarkExplicitVsManualPadding(self):
"""Compare performance of EXPLICIT padding and calling tf.pad.
A Conv2D op with EXPLICIT padding is benchmarked, and a tf.pad with the same
padding followed by an equivalent Conv2D op is benchmarked.
"""
if not test.is_gpu_available():
return
with ops.Graph().as_default():
burn_iters = 15
num_iters = 300
batch_size = 64
# The input and filter correspond to the first layer of Resnet50.
input = variables.Variable( # pylint: disable=redefined-builtin
random_ops.random_uniform([
batch_size,
3,
224,
224
]))
filter = variables.Variable(random_ops.random_uniform([7, 7, 3, 64])) # pylint: disable=redefined-builtin
strides = [1, 1, 2, 2]
padding = [(0, 0), (0, 0), (3, 3), (3, 3)]
output_explicit_pad = nn_ops.conv2d(
input, filter, strides, padding=padding, data_format="NCHW")
input_padded = array_ops.pad(input, padding)
output_manual_pad = nn_ops.conv2d(
input_padded, filter, strides, padding="VALID", data_format="NCHW")
# Benchmark just the forward pass.
self._bench_op("explicit_pad_forward", output_explicit_pad.op, burn_iters,
num_iters)
self._bench_op("manual_pad_forward", output_manual_pad.op, burn_iters,
num_iters)
# Benchmark both the forward and backwards passes.
input_grad_explicit_pad, filter_grad_explicit_pad = (
gradients_impl.gradients(output_explicit_pad, [input, filter]))
self._bench_op(
"explicit_pad_backward",
control_flow_ops.group(input_grad_explicit_pad,
filter_grad_explicit_pad), burn_iters,
num_iters)
input_grad_manual_pad, filter_grad_manual_pad = gradients_impl.gradients(
output_manual_pad, [input, filter])
self._bench_op(
"manual_pad_backward",
control_flow_ops.group(input_grad_manual_pad, filter_grad_manual_pad),
burn_iters, num_iters)
def benchmarkExplicitVsSamePaddingGraph(self):
"""Compare performance of EXPLICIT and SAME padding in graph mode.
A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op
with explicit padding is benchmarked, where the padding is the same as in
the SAME case. The purpose is to ensure EXPLICIT padding is just as
efficient as the SAME case
"""
if not test.is_gpu_available():
return
with ops.Graph().as_default():
burn_iters = 15
num_convs = 20
num_iters = 50
batch_size = 64
# The input and filter correspond to a middle layer of Resnet50.
input = variables.Variable( # pylint: disable=redefined-builtin
random_ops.random_uniform([
batch_size,
256,
14,
14
]))
filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin
strides = [1, 1, 1, 1]
padding = [(0, 0), (0, 0), (1, 1), (1, 1)]
output_explicit_pad = input
output_same_pad = input
for _ in range(num_convs):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
grad_explicit_pad, = gradients_impl.gradients(output_explicit_pad, filter)
grad_same_pad, = gradients_impl.gradients(output_same_pad, filter)
self._bench_op("graph_explicit_pad", grad_explicit_pad.op, burn_iters,
num_iters)
self._bench_op("graph_same_pad", grad_same_pad.op, burn_iters, num_iters)
def benchmarkExplicitVsSamePaddingEager(self):
"""Compare performance of EXPLICIT and SAME padding in eager mode.
A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op
with explicit padding is benchmarked, where the padding is the same as in
the SAME case. Currently, EXPLICIT padding is slightly slower, due to the
fact the Python padding list must be checked and processed before the Conv2D
op can run.
"""
# TODO(reedwm): Make EXPLICIT padding as fast as SAME padding.
if not test.is_gpu_available():
return
with context.eager_mode():
burn_iters = 15
num_convs = 20
num_iters = 50
batch_size = 64
# The input and filter correspond to a middle layer of Resnet50.
input = variables.Variable( # pylint: disable=redefined-builtin
random_ops.random_uniform([
batch_size,
256,
14,
14
]))
filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin
strides = [1, 1, 1, 1]
padding = [(0, 0), (0, 0), (1, 1), (1, 1)]
output_explicit_pad = input
output_same_pad = input
for _ in range(burn_iters):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
start = time.time()
for _ in range(num_iters):
with backprop.GradientTape() as tape:
for _ in range(num_convs):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
tape.gradient(output_explicit_pad, filter)
end = time.time()
self.report_benchmark(
name="eager_explicit_pad",
wall_time=(end - start) / num_iters,
iters=num_iters)
start = time.time()
for _ in range(num_iters):
with backprop.GradientTape() as tape:
for _ in range(num_convs):
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
tape.gradient(output_same_pad, filter)
end = time.time()
self.report_benchmark(
name="eager_same_pad",
wall_time=(end - start) / num_iters,
iters=num_iters)
def GetInceptionFwdTest(input_size, filter_size, stride, padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionFwd %s", (input_size, filter_size,
stride, padding))
return
tf_logging.info("Testing InceptionFwd %s", (input_size, filter_size, stride,
padding))
self._CompareFwdValues(input_size, filter_size, [stride, stride], padding)
return Test
def GetInceptionFwdDilatedConvTest(input_size, filter_size, stride, padding):
def Test(self):
if stride == 1:
tf_logging.info("Testing InceptionFwd with dilations %s",
(input_size, filter_size, stride, padding))
self._VerifyDilatedConvValues(
tensor_in_sizes=input_size,
filter_in_sizes=filter_size,
strides=[stride, stride],
dilations=[2, 2],
padding=padding,
rtol=5e-4)
return Test
def GetInceptionBackInputTest(input_size, filter_size, output_size, stride,
padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
return
tf_logging.info("Testing InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
self._CompareBackpropInput(input_size, filter_size, output_size,
[stride, stride], padding)
return Test
def GetInceptionBackFilterTest(input_size, filter_size, output_size, strides,
padding, gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
return
tf_logging.info("Testing InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
self._CompareBackFilter(input_size, filter_size, output_size, strides,
padding)
return Test
if __name__ == "__main__":
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(GetShrunkInceptionShapes()):
setattr(Conv2DTest, "testInceptionFwd_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(input_size_, filter_size_, stride_,
padding_)))
setattr(
Conv2DTest, "testInceptionFwdDilatedConv_" + str(index),
test_util.run_in_graph_and_eager_modes(GetInceptionFwdDilatedConvTest(
input_size_, filter_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackInput_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(input_size_, filter_size_,
output_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackFilter_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(input_size_, filter_size_,
output_size_, [stride_, stride_],
padding_)))
# TODO(b/35359731)
# Fwd, BckInput, and BackFilter to test that for certain input parameter
# set, winograd nonfused algorithm will be excluded from conv autotune. If
# in such case, winograd nonfused algorithm is added as one option of the
# conv autotune, and cuDNN version is smaller than 7, the following tests
# will fail.
ishape = [1, 400, 400, 1]
fshape = [1, 1, 1, 256]
oshape = [1, 400, 400, 256]
setattr(Conv2DTest, "testInceptionFwd_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True)))
setattr(Conv2DTest, "testInceptionFwdDilatedConv_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdDilatedConvTest(ishape, fshape, 1, "SAME")))
setattr(Conv2DTest, "testInceptionBackInput_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(ishape, fshape, oshape, 1, "SAME",
gpu_only=True)))
setattr(Conv2DTest, "testInceptionBackFilter_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(ishape, fshape, oshape, [1, 1], "SAME",
gpu_only=True)))
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/conv_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reshape_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class ReshapeTest(test.TestCase):
def _testReshape(self, x, y, use_gpu=False):
with self.cached_session(use_gpu=use_gpu):
np_ans = x.reshape(y)
tf_ans = array_ops.reshape(x, y)
out = self.evaluate(tf_ans)
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertShapeEqual(np_ans, tf_ans)
# Repeat with an int64 shape tensor.
y64 = constant_op.constant(y, dtype=dtypes.int64)
tf_ans = array_ops.reshape(x, y64)
out = self.evaluate(tf_ans)
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertShapeEqual(np_ans, tf_ans)
def _testZeroDimReshape(self, x, shape, expected, use_gpu=False):
with self.cached_session(use_gpu=use_gpu):
y = array_ops.reshape(x, shape)
out = self.evaluate(y)
self.assertEqual(expected, out.shape)
# Repeat with an int64 shape tensor.
shape64 = constant_op.constant(shape, dtype=dtypes.int64)
y = array_ops.reshape(x, shape64)
out = self.evaluate(y)
self.assertEqual(expected, out.shape)
def _testBothReshape(self, x, y):
self._testReshape(x, y, False)
self._testReshape(x, y, True)
def testBoolBasic(self):
x = np.arange(1., 7.).reshape([1, 6]) > 3
self._testBothReshape(x, [2, 3])
def testFloatBasic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float32)
self._testBothReshape(x, [2, 3])
def testDoubleBasic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float64)
self._testBothReshape(x, [2, 3])
def testInt32Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.int32)
self._testBothReshape(x, [2, 3])
def testComplex64Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex64)
self._testBothReshape(x, [2, 3])
def testComplex128Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex128)
self._testBothReshape(x, [2, 3])
def testFloatReshapeThreeDimensions(self):
x = np.arange(1., 28.).reshape([1, 27]).astype(np.float32)
self._testBothReshape(x, [3, 3, 3])
def testFloatUnspecifiedDimOnly(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [-1])
def testFloatUnspecifiedDimBegin(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [-1, 2])
def testFloatUnspecifiedDimEnd(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [3, -1])
def testZeroDimBasic(self):
x = np.zeros([0, 6]).astype(np.float32)
self._testBothReshape(x, [0, 2, 3])
def testZeroDimReshapeR1(self):
x = np.zeros([0, 6]).astype(np.float32)
self._testBothReshape(x, [-1])
def testZeroDimReshapeR3(self):
x = np.zeros([0, 6]).astype(np.float32)
self._testBothReshape(x, [-1, 2, 3])
# TODO(vrv): Add tests for failure conditions once python test_util
# reports errors.
@test_util.run_deprecated_v1
def testFloatReshapeGradThreeDimensions(self):
x = np.arange(1., 25.).reshape([2, 3, 4]).astype(np.float32)
s = list(np.shape(x))
with self.cached_session():
input_tensor = constant_op.constant(x)
reshape_out = array_ops.reshape(input_tensor, [1, 8, 3])
err = gradient_checker.compute_gradient_error(
input_tensor, s, reshape_out, s, x_init_value=x)
print("Reshape gradient error = " % err)
self.assertLess(err, 1e-3)
def testFloatEmpty(self):
x = np.empty((0, 0, 0, 0), dtype=np.float32)
self._testBothReshape(x, [1, 2, 3, 0])
self._testBothReshape(x, [1, 0, 0, 4])
self._testBothReshape(x, [0, 0, 0, 0])
self._testBothReshape(x, [1, 2, 0])
self._testBothReshape(x, [0, 0, 0])
self._testBothReshape(x, [1, -1, 5])
def testZeroDimWithUnspecifiedDim(self):
for use_gpu in (True, False):
self._testZeroDimReshape(x=np.zeros([0, 6]).astype(np.float32),
shape=[0, -1, 3],
expected=(0, 2, 3),
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testErrors(self):
y = constant_op.constant(0.0, shape=[23, 29, 31])
with self.assertRaisesRegexp(ValueError, "must be evenly divisible by 17"):
array_ops.reshape(y, [17, -1])
z = constant_op.constant(0.0, shape=[32, 128])
with self.assertRaisesRegexp(ValueError,
"Cannot reshape a tensor with 4096 elements"):
array_ops.reshape(z, [4095])
@test_util.run_deprecated_v1
def testPartialShapes(self):
x = array_ops.placeholder(dtypes.float32)
# Unknown input shape, partial new shape.
y = array_ops.reshape(x, [1, 1, -1, 1])
self.assertEqual([1, 1, None, 1], y.get_shape().as_list())
# Unknown input shape, unknown new shape.
y = array_ops.reshape(x, array_ops.placeholder(dtypes.int32))
self.assertEqual(None, y.get_shape().ndims)
# Unknown input shape, known rank for new shape.
y = array_ops.reshape(x, array_ops.placeholder(dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.stack()`.
y = array_ops.reshape(x, [array_ops.placeholder(dtypes.int32), 37])
self.assertEqual([None, 37], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.concat()`.
y = array_ops.reshape(
x,
array_ops.concat(
[array_ops.placeholder(
dtypes.int32, shape=(2,)), [37, 42]], 0))
self.assertEqual([None, None, 37, 42], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.shape()`.
y = array_ops.reshape(
x,
array_ops.shape(
array_ops.placeholder(
dtypes.float32, shape=[None, 37, None])))
self.assertEqual([None, 37, None], y.get_shape().as_list())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/reshape_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg.linalg_impl.matrix_exponential."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
def np_expm(x): # pylint: disable=invalid-name
"""Slow but accurate Taylor series matrix exponential."""
y = np.zeros(x.shape, dtype=x.dtype)
xn = np.eye(x.shape[0], dtype=x.dtype)
for n in range(40):
if n > 0:
xn /= float(n)
y += xn
xn = np.dot(xn, x)
return y
class ExponentialOpTest(test.TestCase):
def _verifyExponential(self, x, np_type):
inp = x.astype(np_type)
with test_util.use_gpu():
with ops.device("/cpu:0"):
tf_ans = linalg_impl.matrix_exponential(inp)
if x.size == 0:
np_ans = np.empty(x.shape, dtype=np_type)
else:
if x.ndim > 2:
np_ans = np.zeros(inp.shape, dtype=np_type)
for i in itertools.product(*[range(x) for x in inp.shape[:-2]]):
np_ans[i] = np_expm(inp[i])
else:
np_ans = np_expm(inp)
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out, rtol=1e-3, atol=1e-3)
def _verifyExponentialReal(self, x):
for np_type in [np.float32, np.float64]:
self._verifyExponential(x, np_type)
def _verifyExponentialComplex(self, x):
for np_type in [np.complex64, np.complex128]:
self._verifyExponential(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0),
np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
def testNonsymmetricReal(self):
# 2x2 matrices
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
self._verifyExponentialReal(matrix1)
self._verifyExponentialReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyExponentialReal(self._makeBatch(matrix1, matrix2))
@test_util.run_deprecated_v1
def testNonsymmetricComplex(self):
if test.is_built_with_rocm():
self.skipTest("ROCm does not support BLAS operations for complex types")
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyExponentialComplex(matrix1)
self._verifyExponentialComplex(matrix2)
# Complex batch
self._verifyExponentialComplex(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefiniteReal(self):
# 2x2 matrices
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._verifyExponentialReal(matrix1)
self._verifyExponentialReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyExponentialReal(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefiniteComplex(self):
if test.is_built_with_rocm():
self.skipTest("ROCm does not support BLAS operations for complex types")
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyExponentialComplex(matrix1)
self._verifyExponentialComplex(matrix2)
# Complex batch
self._verifyExponentialComplex(self._makeBatch(matrix1, matrix2))
@test_util.run_deprecated_v1
def testNonSquareMatrix(self):
# When the exponential of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
linalg_impl.matrix_exponential(np.array([[1., 2., 3.], [3., 4., 5.]]))
@test_util.run_deprecated_v1
def testWrongDimensions(self):
# The input to the exponential should be at least a 2-dimensional tensor.
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_impl.matrix_exponential(tensor3)
def testEmpty(self):
self._verifyExponentialReal(np.empty([0, 2, 2]))
self._verifyExponentialReal(np.empty([2, 0, 0]))
@test_util.run_deprecated_v1
def testDynamic(self):
with self.session(use_gpu=True) as sess:
inp = array_ops.placeholder(ops.dtypes.float32)
expm = linalg_impl.matrix_exponential(inp)
matrix = np.array([[1., 2.], [3., 4.]])
sess.run(expm, feed_dict={inp: matrix})
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
expm1 = linalg_impl.matrix_exponential(matrix1)
expm2 = linalg_impl.matrix_exponential(matrix2)
expm = self.evaluate([expm1, expm2])
self.assertAllEqual(expm[0], expm[1])
class MatrixExponentialBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))
def benchmarkMatrixExponentialOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
expm = linalg_impl.matrix_exponential(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(expm),
min_iters=25,
name="matrix_exponential_cpu_{shape}".format(
shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix = self._GenerateMatrix(shape)
expm = linalg_impl.matrix_exponential(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(expm),
min_iters=25,
name="matrix_exponential_gpu_{shape}".format(
shape=shape))
def _TestRandomSmall(dtype, batch_dims, size):
def Test(self):
np.random.seed(42)
shape = batch_dims + (size, size)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=shape).astype(dtype)
self._verifyExponentialReal(matrix)
return Test
def _TestL1Norms(dtype, shape, scale):
def Test(self):
np.random.seed(42)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape)).reshape(shape).astype(dtype)
print(dtype, shape, scale, matrix)
l1_norm = np.max(np.sum(np.abs(matrix), axis=matrix.ndim-2))
matrix /= l1_norm
self._verifyExponentialReal(scale * matrix)
return Test
if __name__ == "__main__":
for dtype_ in [np.float32, np.float64, np.complex64, np.complex128]:
for batch_ in [(), (2,), (2, 2)]:
for size_ in [4, 7]:
name = "%s_%d_%d" % (dtype_.__name__, len(batch_), size_)
setattr(ExponentialOpTest, "testL1Norms_" + name,
_TestRandomSmall(dtype_, batch_, size_))
for shape_ in [(3, 3), (2, 3, 3)]:
for dtype_ in [np.float32, np.complex64]:
for scale_ in [0.1, 1.5, 5.0, 20.0]:
name = "%s_%d_%d" % (dtype_.__name__, len(shape_), int(scale_*10))
setattr(ExponentialOpTest, "testL1Norms_" + name,
_TestL1Norms(dtype_, shape_, scale_))
for dtype_ in [np.float64, np.complex128]:
for scale_ in [0.01, 0.2, 0.5, 1.5, 6.0, 25.0]:
name = "%s_%d_%d" % (dtype_.__name__, len(shape_), int(scale_*100))
setattr(ExponentialOpTest, "testL1Norms_" + name,
_TestL1Norms(dtype_, shape_, scale_))
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/matrix_exponential_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
# TODO(yangzihao): Currently matmul autotuning is disabled by default. Use
# os.environ["TF_MATMUL_AUTOTUNE_ENABLE"] = "1" to enable it.
class MatVecTest(test_lib.TestCase):
"""Simple test for matvec, which is sugar on top of matmul."""
def testTwoByTwoCase(self):
a = np.array([[1, 2], [3, 4]])
b = np.array([5, 6])
c = math_ops.matvec(a, b)
self.assertAllEqual((2,), c.shape)
self.assertAllEqual([5 + 2 * 6, 3 * 5 + 4 * 6], c)
def _AddTest(test, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, test_util.deprecated_graph_mode_only(fn))
def _GetTransposedMatrices(x, x_name, kwargs):
if kwargs["transpose_" + x_name] is True:
return x.T
elif kwargs["adjoint_" + x_name] is True:
return np.conj(x.T)
else:
return x
class MatMulTest(test_lib.TestCase):
pass # Filled in below
def _GetMatMulTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
np_val = np.matrix(a_np_) * np.matrix(b_np_)
use_gpu = True
if a_np_.dtype is np.float16 and (
not test_util.GpuSupportsHalfMatMulAndConv()):
use_gpu = False
print("Built without fp16 matmul support for Cuda, running test on CPU.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
with self.cached_session() as sess, test_util.device(use_gpu):
if use_static_shape_:
a = constant_op.constant(effective_a_np)
b = constant_op.constant(effective_b_np)
with ops.device("/cpu:0"):
res = math_ops.matmul(a, b, **kwargs_)
tf_val = self.evaluate(res)
else:
a = array_ops.placeholder(a_np_.dtype)
b = array_ops.placeholder(b_np_.dtype)
with ops.device("/cpu:0"):
res = math_ops.matmul(a, b, **kwargs_)
tf_val = sess.run(res, feed_dict={a: effective_a_np, b: effective_b_np})
self.assertAllCloseAccordingToType(
tf_val,
np_val,
float_rtol=3e-5,
float_atol=3e-5,
half_rtol=0.2,
half_atol=0.2)
return Test
class MatMulGradientTest(test_lib.TestCase):
pass # Will be filled in below.
def _GetMatMulGradientTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
if not use_static_shape_ or a_np_.dtype in (np.int32, np.int64, np.float16):
self.skipTest("Skipping infeasible gradient test.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
epsilon = np.finfo(a_np_.dtype).eps
delta = epsilon**(1.0 / 3.0)
tol = 20 * delta
with self.session():
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(x, effective_b_np, **kwargs_),
[effective_a_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(effective_a_np, x, **kwargs_),
[effective_b_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
return Test
class MatMulStatsTest(test_lib.TestCase):
@test_util.run_v1_only("Test requires a Graph and NodeDef inspection")
def testSimpleStatistics(self):
a = variables.Variable(random_ops.random_normal([25, 16]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b)
g = ops.get_default_graph()
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
@test_util.run_v1_only("Test requires a Graph and NodeDef inspection")
def testTransposedStatistics(self):
a = variables.Variable(random_ops.random_normal([16, 25]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b, transpose_a=True)
g = ops.get_default_graph()
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
try:
# @ operator supported since python 3.5.
infix_matmul = operator.matmul
except AttributeError:
# For earlier versions of python, emulate regular behavior.
# Useful to build and test for 3.5+ on earlier versions.
def infix_matmul(x, y): # pylint: disable=invalid-name
try:
r = type(x).__matmul__(x, y)
except AttributeError:
r = NotImplemented
if r is NotImplemented and type(x) is not type(y):
try:
r = type(y).__rmatmul__(y, x)
except AttributeError:
r = NotImplemented
if r is NotImplemented:
raise TypeError("unsupported operand type(s) for @: '{}' and '{}'"
.format(type(x).__name__, type(y).__name__))
return r
class MatMulInfixOperatorTest(test_lib.TestCase):
def testMismatchedShape(self):
with self.assertRaisesRegexp(
Exception, "(Shape must be rank 2 but is rank 1|is not a matrix)"):
infix_matmul(
ops.convert_to_tensor([10.0, 20.0, 30.0]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testMismatchedDimensions(self):
with self.assertRaisesRegexp(
Exception, "(Dimensions must be equal|Matrix size-incompatible)"):
infix_matmul(
ops.convert_to_tensor([[10.0, 20.0, 30.0]]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
@test_util.run_v1_only("Tensor.op is generally not applicable in TF 2")
def testInfixMatmulIsTfMatmul(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
self.assertEqual(c.op.type, "MatMul")
def testInfixMatmulDoesDotProduct(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
d = math_ops.matmul(a, b)
self.assertAllEqual(c, d)
if __name__ == "__main__":
sizes = [1, 3, 5]
trans_options = [[False, False], [True, False], [False, True]]
dtypes_to_test = [np.int32, np.int64, np.float16, np.float32, np.float64]
if not test_lib.is_built_with_rocm():
# ROCm does not support BLAS operations for complex types
dtypes_to_test += [np.complex64, np.complex128]
# TF2 does not support placeholders under eager so we skip it
for use_static_shape in set([True, tf2.enabled()]):
for dtype in dtypes_to_test:
if not use_static_shape and (dtype == np.int32 or dtype == np.int64):
# TODO(rmlarsen): Re-enable this test when we have fixed the underlying
# bug in Windows (b/35935459).
continue
for m in sizes:
for n in sizes:
for k in sizes:
# Construct compatible random matrices a_np of size [m, k] and b_np
# of size [k, n].
a_np = np.random.normal(-5, 5, m * k).astype(dtype).reshape([m, k])
if dtype in (np.complex64, np.complex128):
a_np.imag = np.random.normal(-5, 5,
m * k).astype(dtype).reshape([m, k])
b_np = np.random.normal(-5, 5, k * n).astype(dtype).reshape([k, n])
if dtype in (np.complex64, np.complex128):
b_np.imag = np.random.normal(-5, 5,
k * n).astype(dtype).reshape([k, n])
for adjoint_a, transpose_a in trans_options:
for adjoint_b, transpose_b in trans_options:
name = "%s_%s_%s_%s_%s_%s_%s_%s_%s" % (
use_static_shape, dtype.__name__, m, n, k, adjoint_a,
transpose_a, adjoint_b, transpose_b)
_AddTest(MatMulTest, "MatMulTest", name,
_GetMatMulTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
_AddTest(MatMulGradientTest, "MatMulGradientTest", name,
_GetMatMulGradientTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/matmul_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.session_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import session_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class SessionOpsTest(test.TestCase):
@test_util.run_deprecated_v1
def testHandleBasic(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = self.evaluate(h)
# Feed a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
y = math_ops.multiply(x, 10)
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
@test_util.run_deprecated_v1
def testHandleEval(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = self.evaluate(h)
# Get the tensor from its handle.
self.assertEqual(50, h.eval())
@test_util.run_deprecated_v1
def testHandleAndValue(self):
with self.cached_session() as sess:
# Return a handle and a value.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
v = math_ops.multiply(a, c)
h, v = self.evaluate([h, v])
self.assertEqual(50, h.eval())
self.assertEqual(500, v)
@test_util.run_deprecated_v1
def testHandleCond(self):
with self.cached_session() as sess:
# Return a handle and a value
a = constant_op.constant(10)
b = constant_op.constant(5)
p = math_ops.less(a, b)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
p, h = self.evaluate([p, h])
# Run by feeding a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
if p:
y = math_ops.multiply(x, 10)
else:
y = math_ops.multiply(x, 100)
result = sess.run(y, feed_dict={f: h.handle})
self.assertEqual(5000, result)
@test_util.run_deprecated_v1
def testHandleForLoop(self):
with self.cached_session() as sess:
# Initialize a handle.
a = constant_op.constant(0)
h = session_ops.get_session_handle(a)
h = self.evaluate(h)
# Do some computation.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
# Must define the loop body outside the loop.
h_x = session_ops.get_session_handle(math_ops.add(x, 1))
for _ in range(100):
# This exercises garbage collection.
h = sess.run(h_x, feed_dict={f: h.handle})
self.assertEqual(100, h.eval())
@test_util.run_deprecated_v1
def testHandleWhileLoop(self):
with self.cached_session() as sess:
# Initialize a handle.
a = constant_op.constant(0)
h = session_ops.get_session_handle(a)
h = self.evaluate(h)
# Do some computation.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
b = constant_op.constant(100)
p = math_ops.less(x, b)
# Must define the loop body outside the loop.
h_x = session_ops.get_session_handle(math_ops.add(x, 1))
while True:
rp, h = sess.run([p, h_x], feed_dict={f: h.handle})
if not rp:
break
self.assertEqual(101, h.eval())
@test_util.run_deprecated_v1
def testHandleMover(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = self.evaluate(h)
# Feed a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
y = math_ops.multiply(x, 10)
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
# Feed another tensor handle.
with ops.device(test.gpu_device_name()):
a = constant_op.constant(10)
h = session_ops.get_session_handle(a)
h = self.evaluate(h)
self.assertEqual(100, sess.run(y, feed_dict={f: h.handle}))
@test_util.run_deprecated_v1
def testHandleDelete(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
self.evaluate(h).delete()
@test_util.run_deprecated_v1
def testHandleDeleteRaw(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = self.evaluate(h)
# Delete using a raw tensor handle.
raw_h = h.get_raw_handle()
f, x = session_ops.delete_session_tensor(raw_h)
sess.run(x, feed_dict={f: raw_h})
@test_util.run_deprecated_v1
def testMultiDevices(self):
with self.cached_session() as sess:
with ops.device(test.gpu_device_name()):
a = constant_op.constant(1.0)
a_handle = self.evaluate(session_ops.get_session_handle(a))
with ops.device("/cpu:0"):
b = constant_op.constant(2.0)
b_handle = self.evaluate(session_ops.get_session_handle(b))
a_p, a_t = session_ops.get_session_tensor(a_handle.handle, dtypes.float32)
b_p, b_t = session_ops.get_session_tensor(b_handle.handle, dtypes.float32)
c = math_ops.add(a_t, b_t)
c_handle = sess.run(
session_ops.get_session_handle(c),
feed_dict={a_p: a_handle.handle,
b_p: b_handle.handle})
self.assertEqual(3.0, c_handle.eval())
@test_util.run_deprecated_v1
def testHandleGC(self):
with self.cached_session() as sess:
# initial values live on CPU
with ops.device("/cpu:0"):
one = constant_op.constant(1, dtype=dtypes.float32)
one_handle = self.evaluate(session_ops.get_session_handle(one))
x_handle = self.evaluate(session_ops.get_session_handle(one))
# addition lives on GPU
with ops.device(test.gpu_device_name()):
add_h1, add_t1 = session_ops.get_session_tensor(one_handle.handle,
dtypes.float32)
add_h2, add_t2 = session_ops.get_session_tensor(x_handle.handle,
dtypes.float32)
add_op = math_ops.add(add_t1, add_t2)
add_output = session_ops.get_session_handle(add_op)
# add 1 to tensor 20 times
for _ in range(20):
x_handle = sess.run(
add_output,
feed_dict={add_h1: one_handle.handle,
add_h2: x_handle.handle})
@test_util.run_deprecated_v1
def testHandlePlacement(self):
with self.cached_session() as sess:
a = constant_op.constant(1.0)
a_handle_op = session_ops.get_session_handle(a)
b = constant_op.constant(2.0)
b_handle_op = session_ops.get_session_handle(b)
a_handle = self.evaluate(a_handle_op)
b_handle = self.evaluate(b_handle_op)
a_p, a_t = session_ops.get_session_tensor(a_handle.handle, dtypes.float32)
b_p, b_t = session_ops.get_session_tensor(b_handle.handle, dtypes.float32)
c = math_ops.add(a_t, b_t)
c_handle = sess.run(
session_ops.get_session_handle(c),
feed_dict={a_p: a_handle.handle,
b_p: b_handle.handle})
self.assertEqual(3.0, c_handle.eval())
@test_util.run_deprecated_v1
def testFeedOneHandleDirectly(self):
with self.cached_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
d = math_ops.multiply(c, c)
h_c = self.evaluate(session_ops.get_session_handle(c))
self.assertAllClose(2500.0, sess.run(d, feed_dict={c: h_c}))
@test_util.run_deprecated_v1
def testDirectHandleFeedOverlappingWithFetches(self):
with self.cached_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
h_c = self.evaluate(session_ops.get_session_handle(c))
d = array_ops.identity(c)
c_val = sess.run(c, feed_dict={c: h_c})
self.assertAllClose(50.0, c_val)
d_val = sess.run(d, feed_dict={c: h_c})
self.assertAllClose(50.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: h_c, d: 60.0})
self.assertAllClose(50.0, c_val)
self.assertAllClose(60.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: 60.0, d: h_c})
self.assertAllClose(60.0, c_val)
self.assertAllClose(50.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: h_c, d: h_c})
self.assertAllClose(50.0, c_val)
self.assertAllClose(50.0, d_val)
@test_util.run_deprecated_v1
def testFeedTwoHandlesDirectly(self):
with self.cached_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
d = math_ops.div(a, b)
e = math_ops.subtract(c, d)
h_c = self.evaluate(session_ops.get_session_handle(c))
h_d = self.evaluate(session_ops.get_session_handle(d))
self.assertAllClose(48.0, sess.run(e, feed_dict={c: h_c, d: h_d}))
self.assertAllClose(-48.0, sess.run(e, feed_dict={c: h_d, d: h_c}))
@test_util.run_deprecated_v1
def testFeedHandleToVariableDirectly(self):
with self.cached_session() as sess:
a = variables.Variable(12.0)
inc_a = state_ops.assign_add(a, 2.0)
b = math_ops.add(a, 5.0)
self.evaluate(a.initializer)
h_a_read = sess.run(session_ops.get_session_handle(a.read_value()))
self.assertAllClose(12.0, self.evaluate(a))
self.assertAllClose(17.0, sess.run(b, feed_dict={a: h_a_read}))
self.evaluate(inc_a)
self.assertAllClose(19.0, sess.run(b, feed_dict={a: h_a_read}))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/session_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for DepthToSpace op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class DepthToSpaceTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
input_nhwc = math_ops.cast(inputs, dtype)
with self.cached_session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
if test.is_gpu_available():
with self.cached_session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# test NCHW on GPU
input_nchw = test_util.NHWCToNCHW(input_nhwc)
output_nchw = array_ops.depth_to_space(
input_nchw, block_size, data_format="NCHW")
output_nhwc = test_util.NCHWToNHWC(output_nchw)
self.assertAllEqual(output_nhwc.eval(), outputs)
@test_util.run_deprecated_v1
def testBasic(self):
x_np = [[[[1, 2, 3, 4]]]]
block_size = 2
x_out = [[[[1], [2]], [[3], [4]]]]
self._testOne(x_np, block_size, x_out)
@test_util.run_deprecated_v1
def testBasicFloat16(self):
x_np = [[[[1, 2, 3, 4]]]]
block_size = 2
x_out = [[[[1], [2]], [[3], [4]]]]
self._testOne(x_np, block_size, x_out, dtype=dtypes.float16)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
@test_util.run_deprecated_v1
def testBlockSize2(self):
x_np = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
block_size = 2
x_out = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
self._testOne(x_np, block_size, x_out)
@test_util.run_deprecated_v1
def testBlockSize2Batch10(self):
block_size = 2
def batch_input_elt(i):
return [[[1 * i, 2 * i, 3 * i, 4 * i],
[5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
def batch_output_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
self._testOne(x_np, block_size, x_out)
def testBatchSize0(self):
block_size = 2
batch_size = 0
input_nhwc = array_ops.ones([batch_size, 2, 3, 12])
x_out = array_ops.ones([batch_size, 4, 6, 3])
with self.cached_session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
self.evaluate(x_tf)
if test.is_gpu_available():
with self.cached_session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
self.evaluate(x_tf)
# Tests for different width and height.
@test_util.run_deprecated_v1
def testNonSquare(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]],
[[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]],
[[5, 50], [6, 60]],
[[7, 70], [8, 80]],
[[9, 90], [10, 100]],
[[11, 110], [12, 120]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
@test_util.run_deprecated_v1
def testBlockSize4FlatInput(self):
x_np = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
block_size = 4
x_out = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
@test_util.run_deprecated_v1
def testDepthInterleaved(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
@test_util.run_deprecated_v1
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
@test_util.run_deprecated_v1
def testDepthInterleavedLarger(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40],
[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
self._testOne(x_np, block_size, x_out)
# Error handling:
# Tests for a block larger for the depth. In this case should raise an
# exception.
@test_util.run_deprecated_v1
def testBlockSizeTooLarge(self):
x_np = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
block_size = 4
# Raise an exception, since th depth is only 4 and needs to be
# divisible by 16.
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
self.evaluate(out_tf)
# Test when the block size is 0.
@test_util.run_deprecated_v1
def testBlockSize0(self):
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
self.evaluate(out_tf)
# Test when the block size is 1. The block size should be > 1.
@test_util.run_deprecated_v1
def testBlockSizeOne(self):
x_np = [[[[1, 1, 1, 1],
[2, 2, 2, 2]],
[[3, 3, 3, 3],
[4, 4, 4, 4]]]]
block_size = 1
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
self.evaluate(out_tf)
@test_util.run_deprecated_v1
def testBlockSizeLargerThanInput(self):
# The block size is too large for this input.
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 10
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
self.evaluate(out_tf)
@test_util.run_deprecated_v1
def testBlockSizeNotDivisibleDepth(self):
# The depth is not divisible by the square of the block size.
x_np = [[[[1, 1, 1, 1],
[2, 2, 2, 2]],
[[3, 3, 3, 3],
[4, 4, 4, 4]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
@test_util.run_deprecated_v1
def testUnknownShape(self):
t = array_ops.depth_to_space(
array_ops.placeholder(dtypes.float32), block_size=4)
self.assertEqual(4, t.get_shape().ndims)
def depthToSpaceUsingTranspose(self, tensor, block_size, data_format):
block_size_sq = block_size * block_size
if data_format == "NHWC":
b, ih, iw, ic = tensor.shape.as_list()
assert ic % block_size_sq == 0, (ic, block_size_sq)
ow, oh, oc = iw * block_size, ih * block_size, ic // block_size_sq
tensor = array_ops.reshape(tensor,
[b, ih, iw, block_size, block_size, oc])
tensor = array_ops.transpose(tensor, [0, 1, 3, 2, 4, 5])
tensor = array_ops.reshape(tensor, [b, oh, ow, oc])
elif data_format == "NCHW":
b, ic, ih, iw = tensor.shape.as_list()
assert ic % block_size_sq == 0, (ic, block_size_sq)
ow, oh, oc = iw * block_size, ih * block_size, ic // block_size_sq
tensor = array_ops.reshape(tensor,
[b, block_size, block_size, oc, ih, iw])
tensor = array_ops.transpose(tensor, [0, 3, 4, 1, 5, 2])
tensor = array_ops.reshape(tensor, [b, oc, oh, ow])
return tensor
def compareToTranspose(self, batch_size, in_height, in_width, out_channels,
block_size, data_format, use_gpu):
in_channels = out_channels * block_size * block_size
nhwc_input_shape = [batch_size, in_height, in_width, in_channels]
nchw_input_shape = [batch_size, in_channels, in_height, in_width]
total_size = np.prod(nhwc_input_shape)
if data_format == "NCHW_VECT_C":
# Initialize the input tensor with qint8 values that circle -127..127.
x = [((f + 128) % 255) - 127 for f in range(total_size)]
t = constant_op.constant(x, shape=nhwc_input_shape, dtype=dtypes.float32)
expected = self.depthToSpaceUsingTranspose(t, block_size, "NHWC")
t = test_util.NHWCToNCHW_VECT_C(t)
t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
t = array_ops.depth_to_space(t, block_size, data_format="NCHW_VECT_C")
t = gen_array_ops.dequantize(t, -128, 127)
actual = test_util.NCHW_VECT_CToNHWC(t)
else:
# Initialize the input tensor with ascending whole numbers as floats.
x = [f * 1.0 for f in range(total_size)]
shape = nchw_input_shape if data_format == "NCHW" else nhwc_input_shape
t = constant_op.constant(x, shape=shape, dtype=dtypes.float32)
expected = self.depthToSpaceUsingTranspose(t, block_size, data_format)
actual = array_ops.depth_to_space(t, block_size, data_format=data_format)
with self.session(use_gpu=use_gpu) as sess:
actual_vals, expected_vals = self.evaluate([actual, expected])
self.assertTrue(np.array_equal(actual_vals, expected_vals))
def testAgainstTranspose(self):
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", False)
self.compareToTranspose(3, 2, 3, 2, 2, "NHWC", False)
self.compareToTranspose(1, 2, 3, 2, 3, "NHWC", False)
if not test.is_gpu_available():
tf_logging.info("skipping gpu tests since gpu not available")
return
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", True)
self.compareToTranspose(3, 2, 3, 2, 2, "NHWC", True)
self.compareToTranspose(3, 2, 3, 1, 2, "NCHW", True)
self.compareToTranspose(3, 2, 3, 2, 2, "NCHW", True)
self.compareToTranspose(3, 2, 3, 1, 3, "NCHW", True)
self.compareToTranspose(3, 2, 3, 2, 3, "NCHW", True)
self.compareToTranspose(5, 7, 11, 3, 2, "NCHW", True)
self.compareToTranspose(3, 200, 300, 32, 2, "NCHW", True)
self.compareToTranspose(3, 2, 3, 8, 2, "NCHW_VECT_C", True)
self.compareToTranspose(3, 2, 3, 4, 3, "NCHW_VECT_C", True)
self.compareToTranspose(3, 2, 3, 8, 3, "NCHW_VECT_C", True)
self.compareToTranspose(5, 7, 11, 12, 2, "NCHW_VECT_C", True)
self.compareToTranspose(3, 200, 300, 32, 2, "NCHW_VECT_C", True)
class DepthToSpaceGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size, data_format):
# NCHW is implemented for only GPU.
if data_format == "NCHW" and not test.is_gpu_available():
return
assert 4 == x.ndim
with self.cached_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.depth_to_space(tf_x, block_size, data_format=data_format)
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for depth_to_space of x which is a four dimensional
# tensor of shape [b, h, w, d * block_size * block_size].
def _compare(self, b, h, w, d, block_size, data_format):
block_size_sq = block_size * block_size
data = np.random.normal(0, 1, b * h * w * d * block_size_sq).astype(
np.float32)
if data_format == "NHWC":
x = data.reshape([b, h, w, d * block_size_sq])
else:
x = data.reshape([b, d * block_size_sq, h, w])
self._checkGrad(x, block_size, data_format)
# Don't use very large numbers as dimensions here, as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
block_size = 2
self._compare(3, 2, 5, 3, block_size, "NHWC")
self._compare(3, 2, 5, 3, block_size, "NCHW")
@test_util.run_deprecated_v1
def testSmall2(self):
block_size = 3
self._compare(1, 2, 3, 2, block_size, "NHWC")
self._compare(1, 2, 3, 2, block_size, "NCHW")
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/depthtospace_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.BatchMatMul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
def GetRandomNormalInput(shape, dtype):
# float16 has limited range so we reduce the variance of the scalars.
scale = 10.0 if dtype != np.float16 else 0.1
loc = -10.0 if dtype != np.float16 else 0.1
vals = np.array(np.random.normal(loc, scale, np.prod(shape)), dtype=dtype)
if dtype in (np.complex64, np.complex128):
imag = np.array(np.random.normal(loc, scale, np.prod(shape)), dtype=dtype)
vals += 1j * imag
return vals.reshape(shape)
class BatchMatmulOpTest(test.TestCase):
# Uses numpy to compute batch_matmul(x, y, adjoint_a, adjoint_b).
def _npBatchMatmul(self, x, y, adjoint_a, adjoint_b):
# output's shape depends on adj[0] and adj[1]
if adjoint_a:
x = np.conjugate(np.swapaxes(x, -1, -2))
if adjoint_b:
y = np.conjugate(np.swapaxes(y, -1, -2))
return np.matmul(x, y)
# Compares TensorFlow BatchMatmul with NumPy's matmul.
def _compare(self, x_in, y_in, adjoint_a, adjoint_b, static_shape):
x_t_shape = x_in.shape[:-2] + (x_in.shape[-1], x_in.shape[-2])
y_t_shape = y_in.shape[:-2] + (y_in.shape[-1], y_in.shape[-2])
x = x_in if not adjoint_a else x_in.reshape(x_t_shape)
y = y_in if not adjoint_b else y_in.reshape(y_t_shape)
is_floating = x.dtype != np.int32
tol = 100 * np.finfo(x.dtype).eps if is_floating else 0
with self.cached_session(use_gpu=is_floating) as sess:
if static_shape:
with ops.device("/cpu:0"):
z0 = math_ops.matmul(x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
z0_val = self.evaluate(z0)
else:
x_ph = array_ops.placeholder(x.dtype)
y_ph = array_ops.placeholder(y.dtype)
with ops.device("/cpu:0"):
z0 = math_ops.matmul(
x_ph, y_ph, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
z0_val = sess.run(z0, feed_dict={x_ph: x, y_ph: y})
z1 = self._npBatchMatmul(x, y, adjoint_a, adjoint_b)
self.assertAllClose(z0_val, z1, rtol=tol, atol=tol)
def _testNonEmpty(self, dtype, adjoint_a, adjoint_b, use_static_shape):
def CompareNonEmpty(self, a_shape, b_shape):
self._compare(
GetRandomNormalInput(a_shape, dtype),
GetRandomNormalInput(b_shape, dtype),
adjoint_a,
adjoint_b,
static_shape=use_static_shape)
CompareNonEmpty(self, [1, 2, 3], [1, 3, 5])
CompareNonEmpty(self, [1, 2, 3], [1, 3, 1])
CompareNonEmpty(self, [1, 1, 3], [1, 3, 5])
CompareNonEmpty(self, [1, 2, 3], [1, 3, 5])
CompareNonEmpty(self, [7, 1, 3], [7, 3, 5])
CompareNonEmpty(self, [7, 2, 3], [7, 3, 1])
CompareNonEmpty(self, [7, 2, 3], [7, 3, 5])
CompareNonEmpty(self, [10, 64, 75], [10, 75, 30])
CompareNonEmpty(self, [5, 7, 2, 3], [5, 7, 3, 5])
def _testBroadcasting(self, dtype, adjoint_a, adjoint_b, use_static_shape):
def CompareNonEmpty(self, a_shape, b_shape):
self._compare(
GetRandomNormalInput(a_shape, dtype),
GetRandomNormalInput(b_shape, dtype),
adjoint_a,
adjoint_b,
static_shape=use_static_shape)
CompareNonEmpty(self, [2, 3], [1, 3, 5])
CompareNonEmpty(self, [1, 2, 3], [3, 5])
CompareNonEmpty(self, [5, 1, 2, 3], [1, 7, 3, 5])
CompareNonEmpty(self, [5, 2, 2, 3], [3, 5])
CompareNonEmpty(self, [2, 3], [5, 2, 3, 5])
CompareNonEmpty(self, [4, 5, 1, 2, 3], [1, 1, 3, 5])
CompareNonEmpty(self, [1, 2, 1, 4, 2, 1, 3, 4], [3, 2, 1, 1, 1, 2, 4, 2])
def _testEmpty(self, dtype, adjoint_a, adjoint_b, use_static_shape):
def CompareEmpty(self, a_shape, b_shape):
self._compare(
np.zeros(a_shape).astype(dtype),
np.zeros(b_shape).astype(dtype),
adjoint_a,
adjoint_b,
static_shape=use_static_shape)
CompareEmpty(self, [0, 3, 2], [0, 2, 4])
CompareEmpty(self, [3, 0, 2], [3, 2, 5])
CompareEmpty(self, [3, 3, 2], [3, 2, 0])
def _GetBatchMatmulOpTest(dtype, adjoint_a, adjoint_b, use_static_shape):
def Test(self):
np.random.seed(42)
self._testNonEmpty(dtype, adjoint_a, adjoint_b, use_static_shape)
self._testEmpty(dtype, adjoint_a, adjoint_b, use_static_shape)
return Test
def _GetBatchMatmulOpBroadcastingTest(dtype, adjoint_a, adjoint_b,
use_static_shape):
def Test(self):
with compat.forward_compatibility_horizon(2019, 4, 26):
np.random.seed(42)
self._testBroadcasting(dtype, adjoint_a, adjoint_b, use_static_shape)
return Test
class BatchMatmulGradientTest(test.TestCase):
# loss = sum(batch_matmul(x, y)). Verify dl/dx and dl/dy via the
# gradient checker.
def _checkGrad(self, x_in, y_in, adjoint_a, adjoint_b):
x_t_shape = x_in.shape[:-2] + (x_in.shape[-1], x_in.shape[-2])
y_t_shape = y_in.shape[:-2] + (y_in.shape[-1], y_in.shape[-2])
x = x_in if not adjoint_a else x_in.reshape(x_t_shape)
y = y_in if not adjoint_b else y_in.reshape(y_t_shape)
epsilon = np.finfo(x.dtype).eps
# Since our gradient is linear, a larger delta decreases the error.
delta = 10 * epsilon**(1.0 / 3.0)
def Loss(x, y):
return math_ops.reduce_sum(math_ops.matmul(x, y, adjoint_a, adjoint_b))
with self.cached_session(use_gpu=True):
((x_jacob_t, y_jacob_t),
(x_jacob_n, y_jacob_n)) = gradient_checker_v2.compute_gradient(
Loss, [x, y], delta=delta)
tol = 10 * delta
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=tol, atol=tol)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=tol, atol=tol)
# Tests gradients of a batched matmul of x, and y
def _compare(self, a_shape, b_shape, dtype, adjoint_a, adjoint_b):
np.random.seed(42)
x = GetRandomNormalInput(a_shape, dtype)
y = GetRandomNormalInput(b_shape, dtype)
self._checkGrad(x, y, adjoint_a, adjoint_b)
def _GetBatchMatmulGradientTest(dtype, adjoint_a, adjoint_b):
def Test(self):
def CheckGradients(self, a_shape, b_shape):
self._compare(a_shape, b_shape, dtype, adjoint_a, adjoint_b)
CheckGradients(self, [1, 2, 3], [1, 3, 5])
CheckGradients(self, [3, 4, 7], [3, 7, 10])
return Test
def _GetBatchMatmulGradientWithBroadcastingTest(dtype, adjoint_a, adjoint_b):
def Test(self):
def CheckGradients(self, a_shape, b_shape):
self._compare(a_shape, b_shape, dtype, adjoint_a, adjoint_b)
with compat.forward_compatibility_horizon(2019, 4, 26):
CheckGradients(self, [1, 5, 2, 3], [7, 1, 3, 2])
CheckGradients(self, [2, 3], [1, 3, 5])
CheckGradients(self, [2, 3], [5, 3, 5])
CheckGradients(self, [5, 2, 5], [5, 3])
CheckGradients(self, [5, 2, 2, 3], [3, 5])
CheckGradients(self, [4, 5, 1, 2, 3], [1, 1, 3, 5])
CheckGradients(self, [1, 2, 1, 4, 2, 1, 3, 4], [3, 2, 1, 1, 1, 2, 4, 2])
return Test
class BatchMatMulBenchmark(test.Benchmark):
# Batch sizes are 512.
shape_pairs = [
# Typical fully connected layer.
((4, 8, 4, 2, 1, 1024), (1024, 1024)),
((4, 1, 4, 1, 1, 1024), (1, 8, 1, 2, 1024, 1024)),
# Square matmul.
((4, 8, 4, 2, 512, 512), (512, 512)),
((4, 1, 4, 1, 512, 512), (1, 8, 1, 2, 512, 512)),
# Matrix-vector multiplies.
((4, 8, 4, 2, 10000, 200), (200, 1)),
((4, 1, 4, 1, 10000, 200), (1, 8, 1, 2, 200, 1)),
# Vector-matrix multiplies.
((4, 8, 4, 2, 1, 200), (200, 10000)),
((4, 1, 4, 1, 1, 200), (1, 8, 1, 2, 200, 10000)),
]
def benchmarkBatchMatMulBroadcast(self):
for (a_shape, b_shape) in self.shape_pairs:
with compat.forward_compatibility_horizon(2019, 4, 26):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix_a = variables.Variable(
GetRandomNormalInput(a_shape, np.float32))
matrix_b = variables.Variable(
GetRandomNormalInput(b_shape, np.float32))
variables.global_variables_initializer().run()
# Use batch matmul op's internal broadcasting.
self.run_op_benchmark(
sess,
math_ops.matmul(matrix_a, matrix_b),
min_iters=50,
name="batch_matmul_cpu_{}_{}".format(a_shape, b_shape))
# Manually broadcast the input matrices using the broadcast_to op.
broadcasted_batch_shape = array_ops.broadcast_static_shape(
matrix_a.shape[:-2], matrix_b.shape[:-2])
broadcasted_a_shape = broadcasted_batch_shape.concatenate(
matrix_a.shape[-2:])
broadcasted_b_shape = broadcasted_batch_shape.concatenate(
matrix_b.shape[-2:])
self.run_op_benchmark(
sess,
math_ops.matmul(
array_ops.broadcast_to(matrix_a, broadcasted_a_shape),
array_ops.broadcast_to(matrix_b, broadcasted_b_shape)),
min_iters=50,
name="batch_matmul_manual_broadcast_cpu_{}_{}".format(
a_shape, b_shape))
if __name__ == "__main__":
dtypes_to_test = [np.float16, np.float32, np.float64, np.int32]
if not test.is_built_with_rocm():
# ROCm does not support BLAS operations for complex types
dtypes_to_test += [np.complex64, np.complex128]
for dtype_ in dtypes_to_test:
for adjoint_a_ in False, True:
for adjoint_b_ in False, True:
name = "%s_%s_%s" % (dtype_.__name__, adjoint_a_, adjoint_b_)
# TF2 does not support placeholders under eager so we skip it.
for use_static_shape_ in set([True, tf2.enabled()]):
setattr(
BatchMatmulOpTest,
"testBatchMatmulOp_" + name + "_{}".format(use_static_shape_),
_GetBatchMatmulOpTest(dtype_, adjoint_a_, adjoint_b_,
use_static_shape_))
# Broadcasting is supported only in v2.
setattr(
BatchMatmulOpTest, "testBatchMatmulBroadcasting_" + name +
("_%s" % use_static_shape_),
_GetBatchMatmulOpBroadcastingTest(dtype_, adjoint_a_, adjoint_b_,
use_static_shape_))
if dtype_ == np.int32:
continue
setattr(BatchMatmulGradientTest, "testBatchMatmulGradient_" + name,
_GetBatchMatmulGradientTest(dtype_, adjoint_a_, adjoint_b_))
# Broadcasting is supported only in v2.
setattr(
BatchMatmulGradientTest,
"testBatchMatmulGradientWithBroadcasting_" + name,
_GetBatchMatmulGradientWithBroadcastingTest(dtype_, adjoint_a_,
adjoint_b_))
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/batch_matmul_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for segment reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class SegmentReductionHelper(test.TestCase):
def _input(self, input_shape, dtype=dtypes_lib.int32):
num_elem = 1
for x in input_shape:
num_elem *= x
values = np.arange(1, num_elem + 1)
np_values = values.reshape(input_shape).astype(dtype.as_numpy_dtype)
# Add a non-zero imaginary component to complex types.
if dtype.is_complex:
np_values -= 1j * np_values
return constant_op.constant(
np_values, shape=input_shape, dtype=dtype), np_values
def _segmentReduce(self, indices, x, op1, op2=None, num_segments=None,
initial_value=0):
if not x.size:
return np.array([])
indices = np.asarray(indices)
if num_segments is None:
num_segments = indices[-1] + 1
output = [None] * num_segments
slice_shape = x.shape[indices.ndim:]
x_flat = x.reshape((indices.size,) + slice_shape)
for i, index in enumerate(indices.ravel()):
if (output[index] is not None) and op1 == np.max:
for j in range(0, output[index].shape[0]):
output[index][j] = op1([output[index][j], x_flat[i][j]])
elif output[index] is not None:
output[index] = op1(output[index], x_flat[i])
else:
output[index] = x_flat[i]
# zero initialize values that are still uncalcuated.
initial_value_slice = np.ones(slice_shape) * initial_value
output = [o if o is not None else initial_value_slice for o in output]
if op2 is not None:
output = [op2(o) for o in output]
output = [o.reshape(slice_shape) for o in output]
return np.array(output)
def _mean_cum_op(self, x, y):
return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2)
def _mean_reduce_op(self, x):
return x[0] / x[1] if isinstance(x, tuple) else x
def _sqrt_n_reduce_op(self, x):
return x[0] / np.sqrt(x[1]) if isinstance(x, tuple) else x
class SegmentReductionOpTest(SegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128
]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, math_ops.segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.segment_mean),
(np.ndarray.__mul__, None, math_ops.segment_prod),
(np.minimum, None, math_ops.segment_min),
(np.maximum, None, math_ops.segment_max)]
# A subset of ops has been enabled for complex numbers
complex_ops_list = [(np.add, None, math_ops.segment_sum),
(np.ndarray.__mul__, None, math_ops.segment_prod),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.segment_mean)]
n = 10
shape = [n, 2]
indices = [i // 3 for i in range(n)]
for dtype in dtypes:
if dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
curr_ops_list = complex_ops_list
else:
curr_ops_list = ops_list
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtype)
for np_op1, np_op2, tf_op in curr_ops_list:
np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2)
s = tf_op(data=tf_x, segment_ids=indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
@test_util.run_deprecated_v1
def testSegmentIdsShape(self):
shape = [4, 4]
tf_x, _ = self._input(shape)
indices = constant_op.constant([0, 1, 2, 2], shape=[2, 2])
with self.assertRaises(ValueError):
math_ops.segment_sum(data=tf_x, segment_ids=indices)
@test_util.run_deprecated_v1
def testSegmentIdsSize(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape)
indices = [0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment_ids should be the same size"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsValid(self):
# This is a baseline for the following SegmentIdsInvalid* tests.
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, 1]
result = math_ops.segment_sum(data=tf_x, segment_ids=indices).eval()
self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result)
def testSegmentIdsGreaterThanZero(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)
indices = [1, 1, 2, 2]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
def testSegmentIdsHole(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 3, 3]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid1(self):
shape = [4, 4]
with self.cached_session():
tf_x, _ = self._input(shape)
indices = [-1, -1, 0, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id -1 out of range \[0, 1\), possibly because "
"'segment_ids' input is not sorted."):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid2(self):
shape = [4, 4]
with self.cached_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids are not increasing"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid3(self):
shape = [4, 4]
with self.cached_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 2, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), possibly "
"because 'segment_ids' input is not sorted."):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid4(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, -1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid5(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, -2]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradient(self):
shape = [4, 4]
indices = [0, 1, 2, 2]
for tf_op in [
math_ops.segment_sum, math_ops.segment_mean, math_ops.segment_min,
math_ops.segment_max
]:
with self.cached_session():
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, segment_ids=indices)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
def testInvalidIds(self):
# Test case for GitHub issue 46888.
for op in [
math_ops.segment_max,
math_ops.segment_min,
math_ops.segment_mean,
math_ops.segment_sum,
math_ops.segment_prod,
]:
with self.cached_session():
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
s = op(data=np.ones((1, 10, 1)), segment_ids=[1676240524292489355])
self.evaluate(s)
class UnsortedSegmentTest(SegmentReductionHelper):
def __init__(self, methodName='runTest'):
# Each item is np_op1, np_op2, tf_op, initial_value functor
self.ops_list = [(np.add, None,
math_ops.unsorted_segment_sum, lambda t: 0),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.unsorted_segment_mean, lambda t: 0),
(self._mean_cum_op, self._sqrt_n_reduce_op,
math_ops.unsorted_segment_sqrt_n, lambda t: 0),
(np.ndarray.__mul__, None,
math_ops.unsorted_segment_prod, lambda t: 1),
(np.minimum, None,
math_ops.unsorted_segment_min, lambda t: t.max),
(np.maximum, None,
math_ops.unsorted_segment_max, lambda t: t.min)]
# A subset of ops has been enabled for complex numbers
self.complex_ops_list = [(np.add, None,
math_ops.unsorted_segment_sum, lambda t: 0),
(np.ndarray.__mul__, None,
math_ops.unsorted_segment_prod, lambda t: 1)]
self.differentiable_dtypes = [dtypes_lib.float16, dtypes_lib.float32,
dtypes_lib.float64]
self.all_dtypes = (self.differentiable_dtypes +
[dtypes_lib.bfloat16,
dtypes_lib.int64, dtypes_lib.int32,
dtypes_lib.complex64, dtypes_lib.complex128])
super(UnsortedSegmentTest, self).__init__(methodName=methodName)
def testValues(self):
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in self.all_dtypes:
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
tf_x, np_x = self._input(shape, dtype=dtype)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=True):
for np_op1, np_op2, tf_op, init_op in ops_list:
# sqrt_n doesn't support integers
if (np_op2 == self._sqrt_n_reduce_op and dtype.is_integer):
continue
# todo(philjd): enable this test once real_div supports bfloat16
if (np_op2 in [self._sqrt_n_reduce_op, self._mean_reduce_op] and
dtype == dtypes_lib.bfloat16):
continue
np_ans = self._segmentReduce(
indices, np_x, np_op1, np_op2, num_segments=num_segments,
initial_value=init_op(dtype))
s = tf_op(tf_x, segment_ids=indices, num_segments=num_segments)
tf_ans = self.evaluate(s)
if dtype is dtypes_lib.bfloat16:
tf_ans = tf_ans.astype(np.float32)
self.assertAllCloseAccordingToType(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
def testNumSegmentsTypes(self):
dtypes = [dtypes_lib.int32, dtypes_lib.int64]
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in dtypes:
with self.cached_session(use_gpu=True):
tf_x, np_x = self._input(shape)
num_segments_constant = constant_op.constant(
num_segments, dtype=dtype)
np_ans = self._segmentReduce(
indices, np_x, np.add, op2=None, num_segments=num_segments)
s = math_ops.unsorted_segment_sum(
data=tf_x,
segment_ids=indices,
num_segments=num_segments_constant)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
@test_util.run_deprecated_v1
def testGradients(self):
num_cols = 2
indices_flat = np.array([0, 4, 0, -1, 3, -1, 4, 7, 7, 3])
num_segments = max(indices_flat) + 3
for dtype in self.differentiable_dtypes:
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
# test CPU and GPU as tf.gather behaves differently on each device
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
for _, _, tf_op, _ in ops_list:
tf_x, np_x = self._input(shape, dtype=dtype)
s = tf_op(tf_x, indices, num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [num_segments, num_cols],
x_init_value=np_x,
delta=1)
self.assertAllClose(jacob_t, jacob_n)
@test_util.run_deprecated_v1
def testProdGrad(self):
# additional test for the prod gradient to ensure correct handling of zeros
values = np.array([0, 0, 1, 0, 2, 2, 3, 3, 3], dtype=np.float32)
indices = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32)
indices_neg = np.array([-1, 0, 0, -1, 1, 1, -1, 2, 2], dtype=np.int32)
values_tf = constant_op.constant(values)
# ground truth partial derivatives
gradients_indices = np.zeros((9, 3), dtype=np.float32)
gradients_indices_neg = np.zeros((9, 3), dtype=np.float32)
# the derivative w.r.t. to the other segments is zero, so here we only
# explicitly set the grad values for the corresponding segment
gradients_indices[range(9), indices] = [0, 0, 0, 4, 0, 0, 9, 9, 9]
gradients_indices_neg[range(9), indices_neg] = [0, 1, 0, 0, 2, 2, 0, 3, 3]
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
for ind, grad_gt in [(indices, gradients_indices),
(indices_neg, gradients_indices_neg)]:
s = math_ops.unsorted_segment_prod(values_tf,
constant_op.constant(ind), 3)
jacob_t, jacob_n = gradient_checker.compute_gradient(
values_tf, (9,), s, (3,), x_init_value=values, delta=1)
self.assertAllClose(jacob_t, jacob_n)
self.assertAllClose(jacob_t, grad_gt)
@test_util.run_deprecated_v1
def testGradientMatchesSegmentSum(self):
# Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
# and compare the outputs, which should be identical.
# NB: for this test to work, indices must be valid for SegmentSum, namely
# it must be sorted, the indices must be contiguous, and num_segments
# must be max(indices) + 1.
indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
n = len(indices)
num_cols = 2
shape = [n, num_cols]
num_segments = max(indices) + 1
for dtype in self.differentiable_dtypes:
with self.cached_session(use_gpu=True):
tf_x, np_x = self._input(shape, dtype=dtype)
# Results from UnsortedSegmentSum
unsorted_s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
unsorted_jacob_t, unsorted_jacob_n = (
gradient_checker.compute_gradient(tf_x, shape, unsorted_s,
[num_segments, num_cols],
x_init_value=np_x, delta=1))
# Results from SegmentSum
sorted_s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
sorted_jacob_t, sorted_jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
sorted_s, [num_segments, num_cols],
x_init_value=np_x,
delta=1)
self.assertAllClose(unsorted_jacob_t, sorted_jacob_t)
self.assertAllClose(unsorted_jacob_n, sorted_jacob_n)
@test_util.run_deprecated_v1
def testBadIndices(self):
# Note: GPU kernel does not return the out-of-range error needed for this
# test, so this test is marked as cpu-only.
# Note: With PR #13055 a negative index will be ignored silently.
with self.session(use_gpu=False):
for bad in [[2]], [[7]]:
unsorted = math_ops.unsorted_segment_sum([[17]], bad, num_segments=2)
with self.assertRaisesOpError(
r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
self.evaluate(unsorted)
@test_util.run_deprecated_v1
def testEmptySecondDimension(self):
dtypes = [np.float16, np.float32, np.float64, np.int64, np.int32,
np.complex64, np.complex128]
with self.session(use_gpu=True):
for dtype in dtypes:
for itype in (np.int32, np.int64):
data = np.zeros((2, 0), dtype=dtype)
segment_ids = np.array([0, 1], dtype=itype)
unsorted = math_ops.unsorted_segment_sum(data, segment_ids, 2)
self.assertAllEqual(unsorted.eval(), np.zeros((2, 0), dtype=dtype))
def testDropNegatives(self):
# Note: the test is done by replacing segment_ids with 8 to -1
# for index and replace values generated by numpy with 0.
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in self.all_dtypes:
with self.session(use_gpu=True):
tf_x, np_x = self._input(shape, dtype=dtype)
np_ans = self._segmentReduce(
indices, np_x, np.add, op2=None, num_segments=num_segments)
# Replace np_ans[8] with 0 for the value
np_ans[8:] = 0
# Replace 8 with -1 in indices
np.place(indices, indices == 8, [-1])
s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
class SparseSegmentReductionHelper(SegmentReductionHelper):
def _sparse_input(self, input_shape, num_indices, dtype=dtypes_lib.int32):
a, b = super(SparseSegmentReductionHelper, self)._input(input_shape, dtype)
indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32)
return (constant_op.constant(
indices, dtype=dtypes_lib.int32), indices, a, b)
def _sparseSegmentReduce(self,
x,
indices,
segment_indices,
op1,
op2=None,
num_segments=None):
return self._segmentReduce(
segment_indices, x[indices], op1, op2, num_segments=num_segments)
class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32
]
mean_dtypes = [dtypes_lib.float32, dtypes_lib.float64]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, math_ops.sparse_segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.sparse_segment_mean)]
n = 400
shape = [n, 2]
segment_indices = []
for i in range(20):
for _ in range(i + 1):
segment_indices.append(i)
num_indices = len(segment_indices)
for dtype in dtypes:
with self.cached_session(use_gpu=False):
tf_indices, np_indices, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtype)
for np_op1, np_op2, tf_op in ops_list:
if tf_op == math_ops.sparse_segment_mean and dtype not in mean_dtypes:
continue
np_ans = self._sparseSegmentReduce(np_x, np_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
def testSegmentIdsHole(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum), (
self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]
segment_indices = [0, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
def testWithNumSegments(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum_with_num_segments),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.sparse_segment_mean_with_num_segments)]
segment_indices = [0, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
num_segments = 5
with self.session(use_gpu=False):
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(
np_x,
tf_indices,
segment_indices,
np_op1,
np_op2,
num_segments=num_segments)
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
def testWithEmptySegments(self):
tf_x = constant_op.constant([], shape=[0, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments
]
segment_indices = []
tf_indices = []
num_segments = 5
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
tf_ans = self.evaluate(s)
self.assertAllClose(np.zeros([5, 4]), tf_ans)
def testSegmentIdsGreaterThanZero(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum), (
self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]
segment_indices = [1, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
def testValid(self):
# Baseline for the test*Invalid* methods below.
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
self.evaluate(s)
@test_util.run_deprecated_v1
def testIndicesInvalid1(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, -1, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[1\] == -1 out of range \[0, 10\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testIndicesInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[3\] == 10 out of range \[0, 10\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 0, 1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids are not increasing"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid3(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), possibly because "
"'segment_ids' input is not sorted"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid4(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"Segment id -1 out of range \[0, 2\), possibly because "
"'segment_ids' input is not sorted"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid6(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid7(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -2]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid8(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [2**62 - 1]
tf_indices = [2**62 - 1]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
#with self.assertRaisesOpError(
# "Encountered overflow when multiplying"):
self.evaluate(s)
def testSegmentWithNumSegmentsValid(self):
# Baseline for the test*WithNumSegmentsInvalid* methods below.
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]
num_segments = 5
segment_indices = [0, 1, 3, 3]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentWithNumSegmentsInvalid1(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]
num_segments = 5
segment_indices = [0, 1, 3, 5]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
with self.assertRaisesOpError("segment ids must be < num_segments"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentWithNumSegmentsInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]
num_segments = -2
segment_indices = [0, 1, 3, 3]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
with self.assertRaisesRegexp(
ValueError, "Cannot specify a negative value for num_segments"):
tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
@test_util.run_deprecated_v1
def testGradient(self):
shape = [10, 4]
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
for tf_op in [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]:
with self.cached_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
@test_util.run_deprecated_v1
def testGradientWithEmptySegmentsAtEnd(self):
shape = [10, 4]
num_segments = 5
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
for tf_op in [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]:
with self.cached_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtypes_lib.float64)
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [5, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
def testGradientValid(self):
# Baseline for the testGradient*Invalid* methods below.
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientIndicesInvalid1(self):
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index 10 out of range \[0, 10\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientIndicesInvalid2(self):
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, -1, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index -1 out of range \[0, 10\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientSegmentsInvalid1(self):
tf_x, _ = self._input(
[3, 4], dtype=dtypes_lib.float32) # expecting 3 segments
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 1, 4] # 5 segments
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError("Invalid number of segments"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientSegmentsInvalid2(self):
tf_x, _ = self._input([1, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 1 out of range \[0, 1\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientSegmentsInvalid3(self):
tf_x, _ = self._input([2, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id -1 out of range \[0, 2\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientSegmentsInvalid4(self):
tf_x, _ = self._input([0, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, -1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 0 out of range \[0, 0\)"):
self.evaluate(s)
class SegmentReductionOpBenchmark(test.Benchmark):
outer_dim_options = [2**x for x in range(9, 14, 2)]
ratio_options = [2**x for x in range(1, 6, 2)]
inner_dim_options = [2**x for x in range(9, 14, 2)]
# randomly generated sizes with less alignments
inner_dim_options += [
1120, 1215, 1856, 1302, 1329, 1531, 1313, 1672, 1851, 1584
]
dtype_options = [np.float32, np.float64]
options = (outer_dim_options, ratio_options, inner_dim_options, dtype_options)
# pylint: disable=g-long-lambda
op_functors = [lambda vc, vs, seg_ids:
("sorted", math_ops.segment_sum(vc, vs)),
lambda vc, vs, seg_ids:
("unsorted",
math_ops.unsorted_segment_sum(vc, vs, seg_ids[-1]+1))]
# pylint: enable=g-long-lambda
repeat = 10
def _npTypeToStr(self, t):
if t == np.float32:
return "fp32"
if t == np.float64:
return "fp64"
def _runGraph(self, op_functor, outer_dim, ratio, inner_dim, dtype):
output_outer_dim = int(outer_dim / ratio)
const = np.random.randint(5, size=(outer_dim, inner_dim))
seg_ids = np.sort(np.random.randint(output_outer_dim, size=outer_dim))
vs = variables.Variable(seg_ids.astype(np.int32))
with ops.device("/gpu:0"):
vc = variables.Variable(const.astype(dtype))
name, op = op_functor(vc, vs, seg_ids)
with session.Session() as sess:
variables.global_variables_initializer().run()
r = self.run_op_benchmark(
sess,
op,
min_iters=self.repeat,
name="_".join(
map(str,
[name, outer_dim, ratio, inner_dim,
self._npTypeToStr(dtype)])))
return name, r["wall_time"]
def benchmarkSegmentSumGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):
op_functor = self.op_functors[0]
with ops.Graph().as_default():
self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)
def benchmarkUnsortedSegmentSumGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):
op_functor = self.op_functors[1]
with ops.Graph().as_default():
self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/segment_reduction_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for broadcast rules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import test
def _test_values(shape):
return np.reshape(np.cumsum(np.ones(shape), dtype=np.int32), newshape=shape)
class AssertBroadcastableTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def _test_valid(self, weights, values):
static_op = weights_broadcast_ops.assert_broadcastable(
weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.assert_broadcastable(
weights=weights_placeholder, values=values_placeholder)
with self.cached_session():
static_op.run()
dynamic_op.run(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
@test_util.run_deprecated_v1
def testScalar(self):
self._test_valid(weights=5, values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def test1x1x1(self):
self._test_valid(
weights=np.asarray((5,)).reshape((1, 1, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def test1x1xN(self):
self._test_valid(
weights=np.asarray((5, 7, 11, 3)).reshape((1, 1, 4)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def test1xNx1(self):
self._test_valid(
weights=np.asarray((5, 11)).reshape((1, 2, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def test1xNxN(self):
self._test_valid(
weights=np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testNx1x1(self):
self._test_valid(
weights=np.asarray((5, 7, 11)).reshape((3, 1, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testNx1xN(self):
self._test_valid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testNxNxN(self):
self._test_valid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4)),
values=_test_values((3, 2, 4)))
def _test_invalid(self, weights, values):
error_msg = 'weights can not be broadcast to values'
with self.assertRaisesRegexp(ValueError, error_msg):
weights_broadcast_ops.assert_broadcastable(weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.assert_broadcastable(
weights=weights_placeholder, values=values_placeholder)
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.OpError, error_msg):
dynamic_op.run(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
@test_util.run_deprecated_v1
def testInvalid1(self):
self._test_invalid(weights=np.asarray((5,)), values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalid1x1(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidPrefixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12)).reshape((3, 2)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidSuffixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12, 7, 5)).reshape((2, 4)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidOnesExtraDim(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1, 1, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidPrefixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidSuffixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((1, 3, 2, 4)),
values=_test_values((3, 2, 4)))
class BroadcastWeightsTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def _test_valid(self, weights, values, expected):
static_op = weights_broadcast_ops.broadcast_weights(
weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.broadcast_weights(
weights=weights_placeholder, values=values_placeholder)
with self.cached_session():
self.assertAllEqual(expected, self.evaluate(static_op))
self.assertAllEqual(expected, dynamic_op.eval(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
}))
@test_util.run_deprecated_v1
def testScalar(self):
self._test_valid(
weights=5,
values=_test_values((3, 2, 4)),
expected=5 * np.ones((3, 2, 4)))
@test_util.run_deprecated_v1
def test1x1x1(self):
self._test_valid(
weights=np.asarray((5,)).reshape((1, 1, 1)),
values=_test_values((3, 2, 4)),
expected=5 * np.ones((3, 2, 4)))
@test_util.run_deprecated_v1
def test1x1xN(self):
weights = np.asarray((5, 7, 11, 3)).reshape((1, 1, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 2, 1)))
@test_util.run_deprecated_v1
def test1xNx1(self):
weights = np.asarray((5, 11)).reshape((1, 2, 1))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 1, 4)))
@test_util.run_deprecated_v1
def test1xNxN(self):
weights = np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 1, 1)))
@test_util.run_deprecated_v1
def testNx1x1(self):
weights = np.asarray((5, 7, 11)).reshape((3, 1, 1))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(1, 2, 4)))
@test_util.run_deprecated_v1
def testNx1xN(self):
weights = np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(1, 2, 1)))
@test_util.run_deprecated_v1
def testNxNxN(self):
weights = np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4))
self._test_valid(
weights=weights, values=_test_values((3, 2, 4)), expected=weights)
def _test_invalid(self, weights, values):
error_msg = 'weights can not be broadcast to values'
with self.assertRaisesRegexp(ValueError, error_msg):
weights_broadcast_ops.broadcast_weights(weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.broadcast_weights(
weights=weights_placeholder, values=values_placeholder)
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.OpError, error_msg):
dynamic_op.eval(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
@test_util.run_deprecated_v1
def testInvalid1(self):
self._test_invalid(weights=np.asarray((5,)), values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalid1x1(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidPrefixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12)).reshape((3, 2)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidSuffixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12, 7, 5)).reshape((2, 4)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidOnesExtraDim(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1, 1, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidPrefixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4, 1)),
values=_test_values((3, 2, 4)))
@test_util.run_deprecated_v1
def testInvalidSuffixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((1, 3, 2, 4)),
values=_test_values((3, 2, 4)))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/weights_broadcast_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for BatchToSpace op.
Additional tests are included in spacetobatch_op_test.py, where the BatchToSpace
op is tested in tandem with its reverse SpaceToBatch op.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class PythonOpImpl(object):
@staticmethod
def batch_to_space(*args, **kwargs):
return array_ops.batch_to_space(*args, **kwargs)
class CppOpImpl(object):
@staticmethod
def batch_to_space(*args, **kwargs):
return gen_array_ops.batch_to_space(*args, **kwargs)
class BatchToSpaceDepthToSpace(test.TestCase, PythonOpImpl):
# Verifies that: batch_to_space(x) = transpose(depth_to_space(transpose(x)))
@test_util.run_deprecated_v1
def testDepthToSpaceTranspose(self):
x = np.arange(20 * 5 * 8 * 7, dtype=np.float32).reshape([20, 5, 8, 7])
block_size = 2
for crops_dtype in [dtypes.int64, dtypes.int32]:
crops = array_ops.zeros((2, 2), dtype=crops_dtype)
y1 = self.batch_to_space(x, crops, block_size=block_size)
y2 = array_ops.transpose(
array_ops.depth_to_space(
array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
[3, 1, 2, 0])
with self.cached_session():
self.assertAllEqual(y1.eval(), y2.eval())
class BatchToSpaceDepthToSpaceCpp(BatchToSpaceDepthToSpace, CppOpImpl):
pass
class BatchToSpaceErrorHandlingTest(test.TestCase, PythonOpImpl):
@test_util.run_deprecated_v1
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 2
with self.assertRaises(ValueError):
_ = self.batch_to_space(x_np, crops, block_size)
@test_util.run_deprecated_v1
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 0
with self.assertRaises(ValueError):
out_tf = self.batch_to_space(x_np, crops, block_size)
out_tf.eval()
@test_util.run_deprecated_v1
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 1
with self.assertRaises(ValueError):
out_tf = self.batch_to_space(x_np, crops, block_size)
out_tf.eval()
@test_util.run_deprecated_v1
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 10
with self.assertRaises(ValueError):
out_tf = self.batch_to_space(x_np, crops, block_size)
out_tf.eval()
@test_util.run_deprecated_v1
def testBlockSizeSquaredNotDivisibleBatch(self):
# The block size squared does not divide the batch.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = self.batch_to_space(x_np, crops, block_size)
@test_util.run_deprecated_v1
def testUnknownShape(self):
t = self.batch_to_space(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
block_size=4)
self.assertEqual(4, t.get_shape().ndims)
class BatchToSpaceErrorHandlingCppTest(BatchToSpaceErrorHandlingTest,
CppOpImpl):
pass
class BatchToSpaceNDErrorHandlingTest(test.TestCase):
def _testStaticShape(self, input_shape, block_shape, paddings, error):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes known at graph construction time.
with self.assertRaises(error):
_ = array_ops.batch_to_space_nd(
np.zeros(input_shape, np.float32), block_shape, paddings)
def _testDynamicShape(self, input_shape, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes unknown at graph construction time.
input_placeholder = array_ops.placeholder(dtypes.float32)
block_shape_placeholder = array_ops.placeholder(
dtypes.int32, shape=block_shape.shape)
paddings_placeholder = array_ops.placeholder(dtypes.int32)
t = array_ops.batch_to_space_nd(input_placeholder, block_shape_placeholder,
paddings_placeholder)
with self.assertRaises(ValueError):
_ = t.eval({
input_placeholder: np.zeros(input_shape, np.float32),
block_shape_placeholder: block_shape,
paddings_placeholder: paddings
})
def _testShape(self, input_shape, block_shape, paddings, error):
self._testStaticShape(input_shape, block_shape, paddings, error)
self._testDynamicShape(input_shape, block_shape, paddings)
@test_util.run_deprecated_v1
def testInputWrongDimMissingBatch(self):
self._testShape([2, 2], [2, 2], [[0, 0], [0, 0]], ValueError)
self._testShape([2, 2, 3], [2, 2, 3], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockSize0(self):
# The block size is 0.
self._testShape([1, 2, 2, 1], [0, 1], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockSizeNegative(self):
self._testShape([1, 2, 2, 1], [-1, 1], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testNegativePadding(self):
self._testShape([1, 2, 2], [1, 1], [[0, -1], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testCropTooLarge(self):
# The amount to crop exceeds the padded size.
self._testShape([1 * 2 * 2, 2, 3, 1], [2, 2], [[3, 2], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockSizeSquaredNotDivisibleBatch(self):
# The batch dimension is not divisible by the product of the block_shape.
self._testShape([3, 1, 1, 1], [2, 3], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testUnknownShape(self):
# Verify that input shape and paddings shape can be unknown.
_ = array_ops.batch_to_space_nd(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
# Only number of input dimensions is known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, None)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual(4, t.get_shape().ndims)
# Dimensions are partially known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, 2)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(3 * 2 * 3, None, None, 2)), [2, 3],
array_ops.placeholder(dtypes.int32))
self.assertEqual([3, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(3 * 2 * 3, None, 2, 2)), [2, 3],
[[1, 1], [0, 1]])
self.assertEqual([3, None, 5, 2], t.get_shape().as_list())
# Dimensions are fully known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(3 * 2 * 3, 2, 1, 2)), [2, 3],
[[1, 1], [0, 0]])
self.assertEqual([3, 2, 3, 2], t.get_shape().as_list())
class BatchToSpaceGradientTest(test.TestCase, PythonOpImpl):
# Check the gradients.
def _checkGrad(self, x, crops, block_size):
assert 4 == x.ndim
with self.cached_session():
tf_x = ops.convert_to_tensor(x)
tf_y = self.batch_to_space(tf_x, crops, block_size)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for batch_to_space of x which is a four dimensional
# tensor of shape [b * block_size * block_size, h, w, d].
def _compare(self, b, h, w, d, block_size, crop_beg, crop_end):
block_size_sq = block_size * block_size
x = np.random.normal(0, 1, b * h * w * d *
block_size_sq).astype(np.float32).reshape(
[b * block_size * block_size, h, w, d])
crops = np.array(
[[crop_beg, crop_end], [crop_beg, crop_end]], dtype=np.int32)
self._checkGrad(x, crops, block_size)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
block_size = 2
crop_beg = 0
crop_end = 0
self._compare(1, 2, 3, 5, block_size, crop_beg, crop_end)
@test_util.run_deprecated_v1
def testSmall2(self):
block_size = 2
crop_beg = 0
crop_end = 0
self._compare(2, 4, 3, 2, block_size, crop_beg, crop_end)
@test_util.run_deprecated_v1
def testSmallCrop1x1(self):
block_size = 2
crop_beg = 1
crop_end = 1
self._compare(1, 2, 3, 5, block_size, crop_beg, crop_end)
class BatchToSpaceGradientCppTest(BatchToSpaceGradientTest, CppOpImpl):
pass
class BatchToSpaceNDGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_shape, crops, crops_dtype):
block_shape = np.array(block_shape)
crops = constant_op.constant(
np.array(crops).reshape((len(block_shape), 2)), crops_dtype)
with self.cached_session():
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.batch_to_space_nd(tf_x, block_shape, crops)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
def _compare(self, input_shape, block_shape, crops, crops_dtype):
input_shape = list(input_shape)
input_shape[0] *= np.prod(block_shape)
x = np.random.normal(
0, 1, np.prod(input_shape)).astype(np.float32).reshape(input_shape)
self._checkGrad(x, block_shape, crops, crops_dtype)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([1, 2, 3, 5], [2, 2], [[0, 0], [0, 0]], dtype)
@test_util.run_deprecated_v1
def testSmall2(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([2, 4, 3, 2], [2, 2], [[0, 0], [0, 0]], dtype)
@test_util.run_deprecated_v1
def testSmallCrop1x1(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([1, 2, 3, 5], [2, 2], [[1, 1], [1, 1]], dtype)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/batchtospace_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Softplus and SoftplusGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SoftplusTest(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_softplus = self._npSoftplus(np_features)
with self.cached_session(use_gpu=use_gpu):
softplus = nn_ops.softplus(np_features)
tf_softplus = self.evaluate(softplus)
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
self.assertTrue(np.all(tf_softplus > 0))
self.assertShapeEqual(np_softplus, softplus)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testGradGrad(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad, [2, 5], x_init_value=x_init)
print("softplus (float) gradient of gradient err = ", err)
self.assertLess(err, 5e-5)
@test_util.run_deprecated_v1
def testGradGradGrad(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
(grad_grad,) = gradients_impl.gradients(grad, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad_grad, [2, 5], x_init_value=x_init)
print("softplus (float) third-order gradient err = ", err)
self.assertLess(err, 5e-5)
@test_util.run_deprecated_v1
def testNoInts(self):
with self.cached_session():
with self.assertRaisesRegexp(
TypeError,
"'features' has DataType int32 not in list of allowed values"):
nn_ops.softplus(constant_op.constant(42)).eval()
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/softplus_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numbers
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# The maximum input rank to test.
_MAX_RANK = 5
def _powerset(iterable):
"""Helper for generating all possible reduction_axes arguments.
Example:
powerset([0,1,2]): () (0,) (1,) (2,) (0,1) (0,2) (1,2) (0,1,2)
Args:
iterable: An iterable of items to generate the powerset of.
Returns:
The powerset of all items in iterable.
"""
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1))
class ReducedShapeTest(test.TestCase):
def _check(self, shape, axes, result):
output = math_ops.reduced_shape(shape, axes=axes)
self.assertAllEqual(output.eval(), result)
@test_util.run_deprecated_v1
def testSimple(self):
with self.cached_session():
self._check([3], [], [3])
self._check([3], [0], [1])
self._check([5, 3], [], [5, 3])
self._check([5, 3], [0], [1, 3])
self._check([5, 3], [1], [5, 1])
self._check([5, 3], [0, 1], [1, 1])
@test_util.run_deprecated_v1
def testZeros(self):
"""Check that reduced_shape does the right thing with zero dimensions."""
with self.cached_session():
self._check([0], [], [0])
self._check([0], [0], [1])
self._check([0, 3], [], [0, 3])
self._check([0, 3], [0], [1, 3])
self._check([0, 3], [1], [0, 1])
self._check([0, 3], [0, 1], [1, 1])
self._check([3, 0], [], [3, 0])
self._check([3, 0], [0], [1, 0])
self._check([3, 0], [1], [3, 1])
self._check([3, 0], [0, 1], [1, 1])
@test_util.run_deprecated_v1
def testNegAxes(self):
with self.cached_session():
self._check([10, 10, 10], [-1], [10, 10, 1])
self._check([10, 10, 10], [-1, 2], [10, 10, 1])
self._check([10, 10, 10], [-1, -1], [10, 10, 1])
self._check([10, 10, 10], [-1, 0], [1, 10, 1])
self._check([10, 10, 10], [-3], [1, 10, 10])
class ReductionUnknownShape(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
with self.cached_session():
for dtype, reductions in [(dtypes.float32,
(math_ops.reduce_sum, math_ops.reduce_mean,
math_ops.reduce_prod, math_ops.reduce_max,
math_ops.reduce_min,
math_ops.reduce_euclidean_norm)),
(dtypes.bool, (math_ops.reduce_all,
math_ops.reduce_any))]:
for reduction in reductions:
x = array_ops.placeholder(
dtype=dtype, shape=None) # Some tensor w/ unknown shape.
y = reduction(x)
self.assertEqual(y.shape, ())
class BaseReductionTest(test.TestCase):
def _tf_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
def _np_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
def _makeIncremental(self, shape, dtype):
data = np.arange(np.prod(shape)).reshape(shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 2j * data
return data
def _makeRandom(self, shape, dtype):
data = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 2j * data
return data
def _compare(self, x, reduction_axes, keepdims, feed_dict=None):
np_ans = self._np_reduce(x, reduction_axes, keepdims)
with self.cached_session(use_gpu=True) as sess:
tf_ans = self._tf_reduce(x, reduction_axes, keepdims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes, feed_dict=None):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, keepdims=False, feed_dict=feed_dict)
self._compare(x, reduction_axes, keepdims=True, feed_dict=feed_dict)
def _compareAllAxes(self, x, feed_dict=None):
self._compareAll(x, None)
for axes in _powerset(range(x.ndim)):
self._compareAll(x, axes, feed_dict)
def _compareGradient(self, x, reduction_axes, rtol=1e-8, atol=1e-8):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareGradient(x, reduction_axes[0], rtol=rtol, atol=atol)
with self.cached_session(use_gpu=True):
t = ops.convert_to_tensor(x)
su = self._tf_reduce(t, reduction_axes, False)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, x.shape, su, su.get_shape().as_list(), x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=rtol, atol=atol)
def _compareGradientAxes(self, x, rtol=1e-8, atol=1e-8):
self._compareGradient(x, None, rtol=rtol, atol=atol)
self._compareGradient(x, [], rtol=rtol, atol=atol)
self._compareGradient(x, 0, rtol=rtol, atol=atol)
self._compareGradient(x, [1], rtol=rtol, atol=atol)
self._compareGradient(x, [2], rtol=rtol, atol=atol)
self._compareGradient(x, [1, 2], rtol=rtol, atol=atol)
self._compareGradient(x, [0, 1, 2, 3], rtol=rtol, atol=atol)
class SumReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_sum(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
return np.sum(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_sum([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat16(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float16)
self._compareAllAxes(np_arr)
# test that mean doesn't overflow
# only on GPU, since it has the more accurate implementation
if not test.is_gpu_available():
return
arr = np.ones([68000], dtype=np.float16)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_arr = variables.Variable(arr)
variables.global_variables_initializer().run()
tf_mean = math_ops.reduce_mean(tf_arr, 0, False)
tf_out_mean = self.evaluate(tf_mean)
self.assertAllClose(tf_out_mean, 1.)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
for _ in range(10):
size_x = int(2**np.random.uniform(0, 15))
size_y = int(2**np.random.uniform(0, 15))
if size_x * size_y > 1e7:
size_y = int(1e7 / size_x)
arr = np.ones([size_x, size_y], dtype=np.float32)
col_sum = np.sum(arr, axis=0)
row_sum = np.sum(arr, axis=1)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_row_sum = self._tf_reduce(arr, 1, False)
tf_col_sum = self._tf_reduce(arr, 0, False)
tf_out_row, tf_out_col = self.evaluate([tf_row_sum, tf_col_sum])
self.assertAllClose(col_sum, tf_out_col)
self.assertAllClose(row_sum, tf_out_row)
for size_x in [1, 3, 16, 33]:
for size_y in [1, 3, 16, 33]:
for size_z in [1, 3, 16, 33]:
arr = np.ones([size_x, size_y, size_z], dtype=np.float32)
sum_y = np.sum(arr, axis=1)
sum_xz = np.sum(arr, axis=(0, 2))
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_sum_xz = self._tf_reduce(arr, [0, 2], False)
tf_sum_y = self._tf_reduce(arr, 1, False)
tf_out_sum_xz, tf_out_sum_y = self.evaluate([tf_sum_xz, tf_sum_y])
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testInvalidIndex(self):
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = ops.convert_to_tensor(np_arr)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [-3])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [2])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [0, 2])
@test_util.run_deprecated_v1
def testPartialShapes(self):
np.random.seed(1618)
# Input shape is unknown.
reduction_axes = [1, 2]
c_unknown = array_ops.placeholder(dtypes.float32)
s_unknown = math_ops.reduce_sum(c_unknown, reduction_axes)
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_unknown: np_input})
# Input shape only has known rank.
c_known_rank = array_ops.placeholder(dtypes.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(rank=3))
s_known_rank = math_ops.reduce_sum(
c_known_rank, reduction_axes, keepdims=True)
self.assertEqual(3, s_known_rank.get_shape().rank)
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_known_rank: np_input})
# Reduction indices are unknown.
unknown_indices = array_ops.placeholder(dtypes.int32)
c_unknown_indices = constant_op.constant([[10.0], [20.0]])
s_unknown_indices = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keepdims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keepdims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().rank)
@test_util.run_deprecated_v1
def testWrongShapeForReductionIndices(self):
reduction_axes = [[1], [2]]
c_unknown = array_ops.placeholder(dtypes.float32)
with self.assertRaisesWithPredicateMatch(ValueError,
".*must be at most rank 1.*"):
math_ops.reduce_sum(c_unknown, reduction_axes)
# Int64??
@test_util.run_deprecated_v1
def testGradient(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]:
x = self._makeIncremental([2, 3, 4, 2], dtype)
self._compareGradientAxes(x)
@test_util.run_deprecated_v1
def testHighRank(self):
# Do a bunch of random high dimensional reductions
np.random.seed(42)
for _ in range(20):
rank = np.random.randint(4, 10 + 1)
axes, = np.nonzero(np.random.randint(2, size=rank))
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
self._compareAll(data, axes)
# Check some particular axis patterns
for rank in 4, 7, 10:
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
for axes in ([], np.arange(rank), np.arange(0, rank, 2),
np.arange(1, rank, 2)):
self._compareAll(data, axes)
@test_util.run_deprecated_v1
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_sum(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_sum(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
class MeanReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_mean(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
elif isinstance(reduction_axes, numbers.Integral):
reduction_axes = (reduction_axes,)
if reduction_axes is None:
count = np.prod(x.shape)
else:
count = np.prod([x.shape[ax] for ax in reduction_axes])
# np.mean automatically converts integer inputs to float, while TensorFlow's
# reduce_mean does not. For integer inputs, we emulate TensorFlow's behavior
# using np.sum and truncating division.
np_sum = np.sum(x, axis=reduction_axes, keepdims=keepdims)
if np.issubdtype(x.dtype, np.integer):
return np_sum // count
return np_sum / count
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
for dtype in [dtypes.float32, dtypes.float64]:
x = self._makeIncremental(s, dtype)
self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_mean(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_mean(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertTrue(np.all(np.isnan(y)))
class EuclideanNormReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_euclidean_norm(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
if reduction_axes is None or reduction_axes != tuple():
np_fro = np.sqrt(
np.sum(x * np.conj(x), axis=reduction_axes, keepdims=keepdims))
else:
np_fro = x
if np.issubdtype(x.dtype, np.integer):
np_fro = np.floor(np_fro)
return np_fro
@test_util.run_deprecated_v1
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True):
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_euclidean_norm(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertAllEqual(y, np.zeros(9938))
class ProdReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_prod(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
return np.prod(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_prod([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
# Numpy automatically upgrades the type of np.prod from int32 to int64, so
# Numpy does not overflow an int32 np.prod while TensorFlow does. To avoid
# overflow, divide the incremental int32 array by 2.
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32) / 2
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testGradientWithZeros(self):
s = [2, 3, 4, 2]
x = self._makeIncremental(s, dtypes.float32) / 20.
# No zeros in input
self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)
# Zero at beginning
x1 = x.copy()
x1[:, :, 0, :] = 0
self._compareGradientAxes(x1, rtol=1e-3, atol=1e-3)
# Zero at end
x2 = x.copy()
x2[:, :, -1, :] = 0
self._compareGradientAxes(x2, rtol=1e-3, atol=1e-3)
# Zero in middle
x3 = x.copy()
x3[:, :, 2, :] = 0
self._compareGradientAxes(x3, rtol=1e-3, atol=1e-3)
# All zeros
x4 = x.copy()
x4[:, :, :, :] = 0
self._compareGradientAxes(x4, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_prod(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_prod(x, [0])
self.assertAllEqual(y.eval(), np.ones(9938))
class MinReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amin(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amin(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_min(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_min([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.cached_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_min(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class MaxReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amax(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amax(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_max(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_max([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testInt64Reduce3D(self):
# Create a 3D array of int64s and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.int64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.cached_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_max(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class AllReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.all(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.all(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_all(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.session(use_gpu=True) as sess:
v = math_ops.reduce_all([True, True],
constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, True)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.1).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
class AnyReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.any(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.any(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_any(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.session(use_gpu=True) as sess:
v = math_ops.reduce_any([True, True],
constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, True)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.9).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
class CountNonzeroReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False, zero=0,
feed_dict=None):
np_ans = (x != zero).astype(np.int32)
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keepdims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu) as sess:
tf_ans = math_ops.count_nonzero(x, reduction_axes, keepdims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes, feed_dict=None):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, False, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, False, use_gpu=False, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=False, feed_dict=feed_dict)
@test_util.run_deprecated_v1
def testBoolReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([False, False, True, False, False, True])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
@test_util.run_deprecated_v1
def testFloatReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([0.0, 1.0, -1.0, 0.0, 0.0, 3.0]).astype(np.float32)
self._compareAll(np_arr, [0])
@test_util.run_deprecated_v1
def testFloatReduce4D(self):
# Create a 4D array of floats and reduce across some
# dimensions
np_arr = np.floor(np.arange(0.0, 210.0) / 100.0).reshape([2, 3, 5,
7]).astype(
np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
@test_util.run_deprecated_v1
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
@test_util.run_deprecated_v1
def testDegenerate(self):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
for dtype in (dtypes.bool,):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.count_nonzero(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
def testStringReduce(self):
# Test case for GitHub issue 18712
with self.cached_session() as sess:
v = math_ops.count_nonzero(constant_op.constant(["test"]))
self.assertAllClose(self.evaluate(v), 1)
@test_util.run_deprecated_v1
def testStringReduce1D(self):
# Create a 1D array of strings
x = np.asarray(["", "", "a", "", "", "b"])
self._compare(x, None, keepdims=False, zero=np.str(""))
self._compare(x, [], keepdims=False, zero=np.str(""))
self._compare(x, [0], keepdims=False, zero=np.str(""))
self._compare(x, None, keepdims=True, zero=np.str(""))
self._compare(x, [], keepdims=True, zero=np.str(""))
self._compare(x, [0], keepdims=True, zero=np.str(""))
@test_util.run_deprecated_v1
def testStringReduce2D(self):
# Create a 2D array of strings
x = np.asarray([["", "", "a", "", "", "b"],
["", "c", "", "d", "", ""],
["e", "", "f", "", "", ""]])
self._compare(x, None, keepdims=False, zero=np.str(""))
self._compare(x, [], keepdims=False, zero=np.str(""))
self._compare(x, [0], keepdims=False, zero=np.str(""))
self._compare(x, [1], keepdims=False, zero=np.str(""))
self._compare(x, [0, 1], keepdims=False, zero=np.str(""))
self._compare(x, None, keepdims=True, zero=np.str(""))
self._compare(x, [], keepdims=True, zero=np.str(""))
self._compare(x, [0], keepdims=True, zero=np.str(""))
self._compare(x, [0, 1], keepdims=True, zero=np.str(""))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/reduction_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.gather_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class GatherNdTest(test.TestCase):
def _testSimpleDtype(self, dtype):
with self.cached_session(use_gpu=True):
params = constant_op.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype))
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertAllEqual(np.array([7, 7, 8], dtype=dtype), gather_nd_val)
self.assertEqual([3], gather_nd_t.get_shape())
def testSimpleDtype(self):
self._testSimpleDtype(np.float32)
self._testSimpleDtype(np.float64)
self._testSimpleDtype(np.int32)
self._testSimpleDtype(np.int64)
self._testSimpleDtype(np.complex64)
self._testSimpleDtype(np.complex128)
self._testSimpleDtype("|S") # byte strings in python2 + 3
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testEmptyIndicesAndParamsOKButJustEmptyParamsFails(self):
with self.session(use_gpu=True):
params = np.ones((3, 3), dtype=np.float32)
indices_empty = np.empty((0, 2), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
gather_nd_ok_val = self.evaluate(gather_nd_ok_t)
self.assertEqual([0], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
indices_empty = np.empty((0, 1), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
gather_nd_ok_val = self.evaluate(gather_nd_ok_t)
self.assertEqual([0, 3], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0, 3), dtype=np.float32), gather_nd_ok_val)
params_empty = np.empty((0, 3), dtype=np.float32)
indices_empty = np.empty((0, 2), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params_empty, indices_empty)
gather_nd_ok_val = self.evaluate(gather_nd_ok_t)
self.assertEqual([0], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
params_empty = np.empty((0, 3), dtype=np.float32)
indices_nonempty = np.zeros((1, 2), dtype=np.int32)
gather_nd_break_t = array_ops.gather_nd(params_empty, indices_nonempty)
with self.assertRaisesOpError(
r"Requested more than 0 entries, but params is empty."):
self.evaluate(gather_nd_break_t)
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
def testIndexScalar(self):
with self.session(use_gpu=True):
params = np.array(
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([4, 1])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([], gather_nd_t.get_shape())
self.assertAllEqual(np.array(7), gather_nd_val)
def testParamsRankLargerThanIndexIndexScalarSlices(self):
with self.session(use_gpu=True):
params = np.array(
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([4])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([2], gather_nd_t.get_shape())
self.assertAllEqual(np.array([-7, 7]), gather_nd_val)
def testParamsRankLargerThanIndexSlices(self):
with self.session(use_gpu=True):
params = np.array(
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([3, 2], gather_nd_t.get_shape())
self.assertAllEqual(np.array([[-7, 7], [-7, 7], [-8, 8]]), gather_nd_val)
def testHigherRankParamsLargerThanIndexSlices(self):
with self.session(use_gpu=True):
params = np.array(
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
params_t = constant_op.constant(params)
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([3, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(params[[4, 4, 0]], gather_nd_val)
def testEmptyIndicesLastRankMeansCopyEntireTensor(self):
with self.session(use_gpu=True):
params = np.array(
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
params_t = constant_op.constant(params)
indices = constant_op.constant(
[[], []], dtype=dtypes.int32) # Size (2, 0)
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([2, 6, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(
np.vstack((params[np.newaxis, :], params[np.newaxis, :])),
gather_nd_val)
def testHigherRankParamsAndIndicesLargerThanIndexSlices(self):
with self.session(use_gpu=True):
params = np.array(
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
params_t = constant_op.constant(params)
indices = constant_op.constant([[[3], [2], [1]], [[4], [4], [0]]])
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([2, 3, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(params[[3, 2, 1, 4, 4, 0]].reshape(2, 3, 2, 2),
gather_nd_val)
def testHigherRankParams(self):
with self.session(use_gpu=True):
shape = (10, 20, 5, 1, 17)
params = np.random.rand(*shape)
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
expected = params[tuple(indices.T)]
self.assertAllEqual(expected, gather_nd_val)
self.assertEqual([2000], gather_nd_t.get_shape())
def testHigherRankParamsAndIndices(self):
with self.session(use_gpu=True):
shape = (10, 20, 5, 1, 17)
params = np.random.rand(*shape)
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
indices_reshaped = indices.reshape([10, 10, 20, 5])
gather_nd_t = array_ops.gather_nd(params, indices_reshaped)
gather_nd_val = self.evaluate(gather_nd_t)
expected = params[tuple(indices.T)]
self.assertAllEqual(expected.reshape([10, 10, 20]), gather_nd_val)
self.assertEqual([10, 10, 20], gather_nd_t.get_shape())
def assertIndexedSlices(self, t):
self.assertIsInstance(t, ops.IndexedSlices)
@test_util.run_deprecated_v1
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32)
gather_nd_t = array_ops.gather_nd(params, indices)
shape = gather_nd_t.get_shape()
self.assertEqual(None, shape.ndims)
self.assertEqual(None, tensor_shape.dimension_value(shape[0]))
@test_util.run_deprecated_v1
def testBadIndicesCPU(self):
with self.session(use_gpu=False):
params = [0, 1, 2]
indices = [[[0], [7]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,1\] = \[7\] does not index into param shape \[3\]"):
self.evaluate(gather_nd)
def _disabledTestBadIndicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
# On GPU the bad indices do not raise error but fetch 0 values
if not test.is_gpu_available():
return
with self.session(use_gpu=True):
params = [0, 1, 2]
indices = [[[0], [7]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,1\] = \[7\] does not index into param shape \[3\]"):
self.evaluate(gather_nd)
@test_util.run_deprecated_v1
def testBadIndicesWithSlicesCPU(self):
with self.session(use_gpu=False):
params = [[0, 1, 2]]
indices = [[[0], [0], [1]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,2\] = \[1\] does not index into param shape \[1,3\]"):
self.evaluate(gather_nd)
def _disabledTestBadIndicesWithSlicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
# On GPU the bad indices do not raise error but fetch 0 values
if not test.is_gpu_available():
return
with self.session(use_gpu=True):
params = [[0, 1, 2]]
indices = [[[0], [0], [1]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,2\] = \[1\] does not index into param shape \[1,3\]"):
self.evaluate(gather_nd)
@test_util.run_deprecated_v1
def testGradientsRank2Elements(self):
indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)
inputs = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant([1, 2], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array([[1, 0], [0, 2]], dtype=np.float64)
with self.session(use_gpu=True):
assert np.array_equal(expected_grads, self.evaluate(grads))
@test_util.run_deprecated_v1
def testGradientsRank2Slices(self):
indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)
inputs = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array([[3, 4], [1, 2]], dtype=np.float64)
with self.session(use_gpu=True):
self.assertIndexedSlices(grads)
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads).eval())
@test_util.run_deprecated_v1
def testGradientsRank3Elements(self):
indices = constant_op.constant(
[[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int32)
inputs = constant_op.constant(
[[[1, 3], [5, 7]], [[2, 4], [6, 8]]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
with self.session(use_gpu=True):
self.assertAllEqual(expected_grads, self.evaluate(grads))
@test_util.run_deprecated_v1
def testGradientsRank7Elements(self):
# Shape [1,1,2,1,1,2,2]
indices = constant_op.constant(
[[[
[[[[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1]]]]
]]],
dtype=dtypes.int32)
inputs = constant_op.constant(
[[[
[[[[1, 3], [5, 7]]]],
[[[[2, 4], [6, 8]]]]
]]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[[
[[[[1, 2], [3, 4]]]],
[[[[5, 6], [7, 8]]]]
]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[[
[[[[5, 6], [1, 2]]]],
[[[[3, 4], [7, 8]]]]
]]], dtype=np.float64)
with self.session(use_gpu=True):
self.assertAllEqual(expected_grads, self.evaluate(grads))
@test_util.run_deprecated_v1
def testGradientsInt64Indices(self):
indices = constant_op.constant(
[[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int64)
inputs = constant_op.constant(
[[[1, 3], [5, 7]], [[2, 4], [6, 8]]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
with self.session(use_gpu=True):
self.assertAllEqual(expected_grads, self.evaluate(grads))
@test_util.run_deprecated_v1
def testGradientsRank2SlicesWithEmptySpace(self):
indices = constant_op.constant([[2], [0], [5]], dtype=dtypes.int32)
inputs = constant_op.constant(
[[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9]],
dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 3, 3, 3, 3, 3]],
dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[2, 2, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 3, 3, 3, 3]],
dtype=np.float64)
with self.session(use_gpu=True):
self.assertIndexedSlices(grads)
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads).eval())
@test_util.run_v1_only("RefVariable is not supported in v2")
def testGatherNdRefVariable(self):
with self.cached_session():
v = variables.RefVariable(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather_nd(v, [[0, 1], [2, 0]])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("GatherNd", gather.op.name)
self.assertAllEqual([2, 5], gather)
@test_util.run_in_graph_and_eager_modes
def testGatherNdResourceVariable(self):
with compat.forward_compatibility_horizon(2019, 4, 30):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather_nd(v, [[0, 1], [2, 0]])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("ResourceGatherNd", gather.op.inputs[0].op.type)
self.assertAllEqual([2, 5], gather)
class GatherNdOpBenchmark(test.Benchmark):
def benchmark_gather_nd_op(self):
shape = (100, 47, 18, 170, 13)
np.random.seed(127)
params = np.random.rand(*shape)
indices = np.vstack([np.random.randint(0, s, size=10000) for s in shape]).T
with session.Session():
t_params = variables.Variable(params)
t_indices = variables.Variable(indices)
gather_op = array_ops.gather_nd(t_params, t_indices)
variables.global_variables_initializer().run()
for _ in range(10):
self.evaluate(gather_op)
t1 = time.time()
for _ in range(1000):
self.evaluate(gather_op)
t2 = time.time()
self.report_benchmark(iters=1000, wall_time=(t2 - t1) / 1000.0)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/gather_nd_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bucketize_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class BucketizationOpTest(test.TestCase):
def testInt(self):
op = math_ops._bucketize(
constant_op.constant([-5, 0, 2, 3, 5, 8, 10, 11, 12]),
boundaries=[0, 3, 8, 11])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
with self.session(use_gpu=True) as sess:
self.assertAllEqual(expected_out, self.evaluate(op))
def testFloat(self):
op = math_ops._bucketize(
constant_op.constant([-5., 0., 2., 3., 5., 8., 10., 11., 12.]),
boundaries=[0., 3., 8., 11.])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
with self.session(use_gpu=True) as sess:
self.assertAllEqual(expected_out, self.evaluate(op))
def test2DInput(self):
op = math_ops._bucketize(
constant_op.constant([[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]),
boundaries=[0, 3, 8, 11])
expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]]
with self.session(use_gpu=True) as sess:
self.assertAllEqual(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def testInvalidBoundariesOrder(self):
op = math_ops._bucketize(
constant_op.constant([-5, 0]), boundaries=[0, 8, 3, 11])
with self.session(use_gpu=True) as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError, "Expected sorted boundaries"):
self.evaluate(op)
def testBoundariesNotList(self):
with self.assertRaisesRegexp(
TypeError, "Expected list.*"):
math_ops._bucketize(constant_op.constant([-5, 0]), boundaries=0)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/bucketize_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for inplace_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import inplace_ops
from tensorflow.python.platform import test as test_lib
class InplaceOpsTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasicUpdate(self):
for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
with self.session(use_gpu=True):
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] = 1
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, [-1],
array_ops.ones([1, 3], dtype) * 2)
y[-1, :] = 2
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] = 7
self.assertAllClose(x.eval(), y)
@test_util.run_deprecated_v1
def testBasicUpdateBool(self):
with self.session(use_gpu=True):
x = array_ops.ones([7, 3], dtypes.bool)
y = np.ones([7, 3], dtypes.bool.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, [3], array_ops.ones([1, 3],
dtypes.bool))
y[3, :] = True
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, [-1],
array_ops.zeros([1, 3], dtypes.bool))
y[-1, :] = False
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, 5, array_ops.zeros([3], dtypes.bool))
y[5, :] = False
self.assertAllClose(x.eval(), y)
@test_util.run_deprecated_v1
def testBasicAdd(self):
for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
with self.cached_session(use_gpu=True):
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
x = array_ops.inplace_add(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] += 1
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_add(x, [-1], array_ops.ones([1, 3], dtype) * 2)
y[-1, :] += 2
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_add(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] += 7
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_add(x, None, array_ops.ones([7, 3], dtype) * 99)
y[:, :] += 99
self.assertAllClose(x.eval(), y)
@test_util.run_deprecated_v1
def testBasicSub(self):
for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
with self.cached_session(use_gpu=True):
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_sub(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] -= 1
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_sub(x, [-1], array_ops.ones([1, 3], dtype) * 2)
y[-1, :] -= 2
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_sub(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] -= 7
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_sub(x, None, array_ops.ones([7, 3], dtype) * 99)
y[:, :] -= 99
self.assertAllClose(x.eval(), y)
@test_util.run_deprecated_v1
def testRandom(self):
with self.session(use_gpu=True):
d0, d1, d2 = 100, 3, 5
x = array_ops.zeros([d0, d1, d2])
y = np.zeros([d0, d1, d2])
for _ in xrange(20):
idx = np.random.choice(d0, d0 // 10, replace=False)
val = np.random.randint(10, size=(d0 // 10, d1, d2))
op = np.random.randint(3)
if op == 0:
x = inplace_ops.inplace_update(x, idx, val)
y[idx, :] = val
elif op == 1:
x = inplace_ops.inplace_add(x, idx, val)
y[idx, :] += val
elif op == 2:
x = inplace_ops.inplace_sub(x, idx, val)
y[idx, :] -= val
self.assertAllClose(x.eval(), y)
@test_util.run_deprecated_v1
def testRandom1D(self):
with self.session(use_gpu=True):
d0 = 100
x = array_ops.zeros([d0])
y = np.zeros([d0])
for _ in xrange(20):
idx = np.random.choice(d0, d0 // 10, replace=False)
val = np.random.randint(10, size=(d0 // 10))
op = np.random.randint(3)
if op == 0:
x = inplace_ops.inplace_update(x, idx, val)
y[idx] = val
elif op == 1:
x = inplace_ops.inplace_add(x, idx, val)
y[idx] += val
elif op == 2:
x = inplace_ops.inplace_sub(x, idx, val)
y[idx] -= val
self.assertAllClose(x.eval(), y)
def testAlias(self):
with self.session(use_gpu=True) as sess:
x = array_ops.ones([2, 3])
y = inplace_ops.alias_inplace_add(x, [0], [[1, 2, 3]])
with ops.control_dependencies([y]):
z = array_ops.identity(x)
_, vy, vz = self.evaluate([x, y, z])
self.assertAllClose(vy, vz)
def testError(self):
with self.cached_session():
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"must be a vector"):
_ = inplace_ops.inplace_update([[1.]], [[0]], [[10]]).eval()
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"x and v shape doesn't match"):
_ = inplace_ops.inplace_update([[1.]], [0], [10]).eval()
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"i and x shape doesn't match"):
_ = inplace_ops.inplace_update([[1.]], [0, 1], [[10]]).eval()
@test_util.run_deprecated_v1
def testEmpty(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64, dtypes.bool,
dtypes.uint8
]:
with self.cached_session(use_gpu=True):
test_shapes = [(), (1,), (2, 3), (0, 2), (2, 3, 5), (2, 0, 5)]
for shape in test_shapes:
val = inplace_ops.empty(shape, dtype).eval()
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
val = inplace_ops.empty(shape, dtype, init=True).eval()
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
self.assertAllEqual(val, np.zeros(shape, dtype.as_numpy_dtype))
val = inplace_ops.empty_like(array_ops.zeros(shape, dtype)).eval()
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
val = inplace_ops.empty_like(
array_ops.zeros(shape, dtype), init=True).eval()
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
self.assertAllEqual(val, np.zeros(shape, dtype.as_numpy_dtype))
with self.cached_session(use_gpu=True):
val = inplace_ops.empty((1, 2), dtypes.string, init=True).eval()
self.assertEqual(val.tolist(), [[b"", b""]])
val = inplace_ops.empty((1, 2), dtypes.string, init=False).eval()
self.assertEqual(val.tolist(), [[b"", b""]])
if __name__ == "__main__":
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/inplace_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom user ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.framework import errors
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class InvalidOpTest(test.TestCase):
def testBasic(self):
library_filename = os.path.join(resource_loader.get_data_files_path(),
'invalid_op.so')
with self.assertRaises(errors.InvalidArgumentError):
load_library.load_op_library(library_filename)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/invalid_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.variable_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_NP_TO_TF = {
np.float32: dtypes.float32,
np.float64: dtypes.float64,
np.int32: dtypes.int32,
np.int64: dtypes.int64,
}
class VariableOpTest(test.TestCase):
def _initFetch(self, x, tftype, use_gpu=None):
with self.test_session(use_gpu=use_gpu):
p = state_ops.variable_op(x.shape, tftype)
op = state_ops.assign(p, x)
op.op.run()
return self.evaluate(p)
def _testTypes(self, vals):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
self.setUp()
x = vals.astype(dtype)
tftype = _NP_TO_TF[dtype]
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
# NOTE(touts): the GPU test should pass for all types, whether the
# Variable op has an implementation for that type on GPU as we expect
# that Variable and Assign have GPU implementations for matching tf.
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))
@test_util.run_deprecated_v1
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
@test_util.run_deprecated_v1
def testset_shape(self):
p = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual([1, 2], p.get_shape())
p = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())
@test_util.run_deprecated_v1
def testAssign(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
@test_util.run_deprecated_v1
def testAssignNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
@test_util.run_deprecated_v1
def testAssignNoVarShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
@test_util.run_deprecated_v1
def testAssignNoVarShapeNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def _NewShapelessTensor(self):
tensor = array_ops.placeholder(dtypes.float32)
self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
return tensor
@test_util.run_deprecated_v1
def testAssignNoValueShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
assigned = state_ops.assign(var, value)
self.assertEqual(shape, var.get_shape())
self.assertEqual(shape, assigned.get_shape())
@test_util.run_deprecated_v1
def testAssignNoValueShapeNoValidateShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
self.assertEqual(shape, var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())
@test_util.run_deprecated_v1
def testAssignNoShape(self):
with self.cached_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(tensor_shape.unknown_shape(),
state_ops.assign(var, value).get_shape())
@test_util.run_deprecated_v1
def testAssignNoShapeNoValidateShape(self):
with self.cached_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(
tensor_shape.unknown_shape(),
state_ops.assign(
var, value, validate_shape=False).get_shape())
@test_util.run_deprecated_v1
def testAssignUpdate(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
@test_util.run_deprecated_v1
def testAssignUpdateNoVarShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
@test_util.run_deprecated_v1
def testAssignUpdateNoValueShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual([1, 2], subbed.get_shape())
@test_util.run_deprecated_v1
def testAssignUpdateNoShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
@test_util.run_deprecated_v1
def testTemporaryVariable(self):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="foo")
var = state_ops.assign(var, [[4.0, 5.0]])
var = state_ops.assign_add(var, [[6.0, 7.0]])
final = gen_state_ops.destroy_temporary_variable(var, var_name="foo")
self.assertAllClose([[10.0, 12.0]], self.evaluate(final))
@test_util.run_deprecated_v1
def testDestroyNonexistentTemporaryVariable(self):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable([1, 2], dtypes.float32)
final = gen_state_ops.destroy_temporary_variable(var, var_name="bad")
with self.assertRaises(errors.NotFoundError):
self.evaluate(final)
@test_util.run_deprecated_v1
def testDuplicateTemporaryVariable(self):
with test_util.use_gpu():
var1 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var1 = state_ops.assign(var1, [[1.0, 2.0]])
var2 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var2 = state_ops.assign(var2, [[3.0, 4.0]])
final = var1 + var2
with self.assertRaises(errors.AlreadyExistsError):
self.evaluate(final)
@test_util.run_deprecated_v1
def testDestroyTemporaryVariableTwice(self):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable([1, 2], dtypes.float32)
val1 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
val2 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
final = val1 + val2
with self.assertRaises(errors.NotFoundError):
self.evaluate(final)
@test_util.run_deprecated_v1
def testTemporaryVariableNoLeak(self):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="bar")
final = array_ops.identity(var)
self.evaluate(final)
@test_util.run_deprecated_v1
def testTwoTemporaryVariablesNoLeaks(self):
with test_util.use_gpu():
var1 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="var1")
var2 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="var2")
final = var1 + var2
self.evaluate(final)
@test_util.run_deprecated_v1
def testAssignDependencyAcrossDevices(self):
with test_util.use_gpu():
# The variable and an op to increment it are on the GPU.
var = state_ops.variable_op([1], dtypes.float32)
self.evaluate(state_ops.assign(var, [1.0]))
increment = state_ops.assign_add(var, [1.0])
with ops.control_dependencies([increment]):
with test_util.force_cpu():
# This mul op is pinned to the CPU, but reads the variable from the
# GPU. The test ensures that the dependency on 'increment' is still
# honored, i.e., the Send and Recv from GPU to CPU should take place
# only after the increment.
result = math_ops.multiply(var, var)
self.assertAllClose([4.0], self.evaluate(result))
@test_util.run_deprecated_v1
def testIsVariableInitialized(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
v0 = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual(False, variables.is_variable_initialized(v0).eval())
state_ops.assign(v0, [[2.0, 3.0]]).eval()
self.assertEqual(True, variables.is_variable_initialized(v0).eval())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/variable_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for various tensorflow.ops.tf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class ShapeOpsTest(test.TestCase):
def _compareShape(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.shape(x)
tf_ans_64 = array_ops.shape(x, out_type=dtypes.int64)
result = self.evaluate(tf_ans)
result_64 = self.evaluate(tf_ans_64)
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeSparse(self, x_np, use_gpu=False):
np_ans = np.array(np.shape(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.shape(x_tf)
result = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeN(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.cached_session(use_gpu=use_gpu) as sess:
tf_ans = array_ops.shape_n([x, x, x])
tf_ans_64 = array_ops.shape_n([x, x, x], out_type=dtypes.int64)
result = self.evaluate(tf_ans)
result_64 = self.evaluate(tf_ans_64)
for i in range(3):
self.assertAllEqual(np_ans, result[i])
self.assertAllEqual(np_ans, result_64[i])
self.assertShapeEqual(np_ans, tf_ans[i])
def _compareRank(self, x, use_gpu=False):
np_ans = np.asarray(np.ndim(x))
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.rank(x)
result = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareRankSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.ndim(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.rank(x_tf)
result = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSize(self, x, use_gpu=False):
np_ans = np.asarray(np.size(x))
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.size(x)
result = self.evaluate(tf_ans)
tf_ans_64 = array_ops.size(x, out_type=dtypes.int64)
result_64 = self.evaluate(tf_ans_64)
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSizeSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.size(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.size(x_tf)
result = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _testCpu(self, x):
self._compareShape(x, use_gpu=False)
self._compareShapeN(x, use_gpu=False)
self._compareRank(x, use_gpu=False)
self._compareSize(x, use_gpu=False)
self._compareShapeSparse(x, use_gpu=False)
self._compareRankSparse(x, use_gpu=False)
self._compareSizeSparse(x, use_gpu=False)
def _testGpu(self, x):
self._compareShape(x, use_gpu=True)
self._compareShapeN(x, use_gpu=True)
self._compareRank(x, use_gpu=True)
self._compareSize(x, use_gpu=True)
self._compareShapeSparse(x, use_gpu=True)
self._compareRankSparse(x, use_gpu=True)
self._compareSizeSparse(x, use_gpu=True)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testBasic(self):
self._testAll(np.random.randn(2))
self._testAll(np.random.randn(2, 3))
self._testAll(np.random.randn(2, 3, 5))
self._testAll(np.random.randn(2, 3, 5, 7))
self._testAll(np.random.randn(2, 3, 5, 7, 11))
self._testAll(np.random.randn(2, 3, 5, 7, 11, 13))
def testBool(self):
self._testAll(np.random.choice((False, True), size=(2,)))
self._testAll(np.random.choice((False, True), size=(2, 3)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7, 11)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7, 11, 13)))
# Disabled because it takes too long to run, but manually verified
# as passing at time of writing.
def _test64BitOutput(self):
with self.cached_session():
inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=dtypes.int64)
self.assertEqual(2**31, self.evaluate(num_elements))
# Too large for tf.int32 output.
with self.assertRaises(errors_impl.InvalidArgumentError):
with self.cached_session():
inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=dtypes.int32)
self.assertEqual(2**31, self.evaluate(num_elements))
def _compareExpandDims(self, x, dim, use_gpu):
np_ans = np.expand_dims(x, axis=dim)
with self.cached_session(use_gpu=use_gpu):
tensor = array_ops.expand_dims(x, dim)
tf_ans = self.evaluate(tensor)
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareExpandDimsAll(self, x, dim):
self._compareExpandDims(x, dim, False)
self._compareExpandDims(x, dim, True)
def testExpandDims(self):
self._compareExpandDimsAll(np.zeros([2]), 0)
self._compareExpandDimsAll(np.zeros([2]), 1)
self._compareExpandDimsAll(np.zeros([2]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), 0)
self._compareExpandDimsAll(np.zeros([2, 3]), 1)
self._compareExpandDimsAll(np.zeros([2, 3]), 2)
self._compareExpandDimsAll(np.zeros([2, 3]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 0)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -4)
def testExpandDimsBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
self._compareExpandDimsAll(choice([2]), 0)
self._compareExpandDimsAll(choice([2]), 1)
self._compareExpandDimsAll(choice([2]), -1)
self._compareExpandDimsAll(choice([2, 3]), 0)
self._compareExpandDimsAll(choice([2, 3]), 1)
self._compareExpandDimsAll(choice([2, 3]), 2)
self._compareExpandDimsAll(choice([2, 3]), -1)
self._compareExpandDimsAll(choice([2, 3]), -2)
self._compareExpandDimsAll(choice([2, 3, 5]), 0)
self._compareExpandDimsAll(choice([2, 3, 5]), 1)
self._compareExpandDimsAll(choice([2, 3, 5]), 2)
self._compareExpandDimsAll(choice([2, 3, 5]), 3)
self._compareExpandDimsAll(choice([2, 3, 5]), -1)
self._compareExpandDimsAll(choice([2, 3, 5]), -2)
self._compareExpandDimsAll(choice([2, 3, 5]), -3)
self._compareExpandDimsAll(choice([2, 3, 5]), -4)
@test_util.run_deprecated_v1
def testExpandDimsErrors(self):
with self.cached_session():
self.assertRaises(ValueError, array_ops.expand_dims,
np.zeros([2, 3, 5]), -5)
self.assertRaises(ValueError, array_ops.expand_dims,
[False, True, True], -5)
self.assertRaises(ValueError, array_ops.expand_dims,
np.zeros([2, 3, 5]), 4)
self.assertRaises(ValueError, array_ops.expand_dims,
[False, True, True], 4)
@test_util.run_deprecated_v1
def testExpandDimsGradient(self):
with self.cached_session():
inp = constant_op.constant(
np.random.rand(4, 2).astype("f"), dtype=dtypes.float32)
squeezed = array_ops.expand_dims(inp, 1)
err = gradient_checker.compute_gradient_error(inp, [4, 2], squeezed,
[4, 1, 2])
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testExpandDimsScalar(self):
with self.cached_session():
inp = constant_op.constant(7)
self.assertAllEqual([7], array_ops.expand_dims(inp, 0).eval())
self.assertAllEqual([7], array_ops.expand_dims(inp, -1).eval())
inp = constant_op.constant(True)
self.assertAllEqual([True], array_ops.expand_dims(inp, 0).eval())
self.assertAllEqual([True], array_ops.expand_dims(inp, -1).eval())
def testExpandDimsDimType(self):
for dtype in [dtypes.int32, dtypes.int64]:
x = np.zeros([2])
np_ans = np.expand_dims(x, axis=0)
with self.cached_session(use_gpu=True):
tensor = array_ops.expand_dims(x, constant_op.constant(0, dtype))
tf_ans = self.evaluate(tensor)
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueeze(self, x, squeeze_dims, use_gpu):
with self.cached_session(use_gpu=use_gpu):
if squeeze_dims:
np_ans = np.squeeze(x, axis=tuple(squeeze_dims))
tensor = array_ops.squeeze(x, squeeze_dims)
tf_ans = self.evaluate(tensor)
else:
np_ans = np.squeeze(x)
tensor = array_ops.squeeze(x)
tf_ans = self.evaluate(tensor)
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueezeAll(self, x, squeeze_dims=None):
if squeeze_dims is None:
squeeze_dims = []
self._compareSqueeze(x, squeeze_dims, False)
self._compareSqueeze(x, squeeze_dims, True)
def testSqueeze(self):
# Nothing to squeeze.
self._compareSqueezeAll(np.zeros([2]))
self._compareSqueezeAll(np.zeros([2, 3]))
# Squeeze the middle element away.
self._compareSqueezeAll(np.zeros([2, 1, 2]))
# Squeeze on both ends.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]))
def testSqueezeBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
# Nothing to squeeze.
self._compareSqueezeAll(choice([2]))
self._compareSqueezeAll(choice([2, 3]))
# Squeeze the middle element away.
self._compareSqueezeAll(choice([2, 1, 2]))
# Squeeze on both ends.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]))
def testSqueezeSpecificDimension(self):
# Positive squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [2, 4])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0, 4, 2])
# Negative squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-1])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5, -1])
def testSqueezeSpecificDimensionBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
# Positive squeeze dim index.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [0])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [2, 4])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [0, 4, 2])
# Negative squeeze dim index.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-1])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-3, -5])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-3, -5, -1])
def testSqueezeAllOnes(self):
# Numpy squeezes a 1 element tensor into a zero dimensional tensor.
# Verify that we do the same.
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
tensor = array_ops.squeeze(np.zeros([1, 1, 1]), [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = self.evaluate(tensor)
self.assertEqual(np.shape(1), tf_ans.shape)
def testSqueezeAllOnesBool(self):
# Numpy squeezes a 1 element tensor into a zero dimensional tensor.
# Verify that we do the same.
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
tensor = array_ops.squeeze([[[False]]], [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = self.evaluate(tensor)
self.assertEqual(np.shape(1), tf_ans.shape)
@test_util.run_deprecated_v1
def testSqueezeOnlyOnes(self):
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
input_1x1x3 = np.zeros([1, 1, 3])
self._compareSqueezeAll(input_1x1x3)
self._compareSqueezeAll(input_1x1x3, [0])
self._compareSqueezeAll(input_1x1x3, [1])
self.assertRaises(ValueError, array_ops.squeeze, input_1x1x3, [2])
@test_util.run_deprecated_v1
def testSqueezeErrors(self):
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [-4])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [0, -4])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [3])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [2, 3])
@test_util.run_deprecated_v1
def testSqueezeGradient(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = array_ops.reshape(inp, [4, 1, 2])
squeezed = array_ops.squeeze(a, [])
err = gradient_checker.compute_gradient_error(a, [4, 1, 2], squeezed,
[4, 2])
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testSqueezeGradientWithSqueezeDims(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = array_ops.reshape(inp, [4, 1, 2, 1])
squeezed = array_ops.squeeze(a, [1])
err = gradient_checker.compute_gradient_error(a, [4, 1, 2, 1], squeezed,
[4, 2, 1])
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testSqueezeWithUnknownShape(self):
with self.cached_session():
a = array_ops.placeholder(dtypes.float32, shape=[2, None])
squeezed = array_ops.squeeze(a, [1])
self.assertEqual([2], squeezed.get_shape().as_list())
squeezed = array_ops.squeeze(a)
self.assertEqual(None, squeezed.get_shape())
self.assertRaises(ValueError, array_ops.squeeze, a, [0])
self.assertRaises(ValueError, array_ops.squeeze, a, [100])
class TileTest(test.TestCase, parameterized.TestCase):
def testScalar(self):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
a = constant_op.constant(7, shape=[], dtype=dtypes.float32)
tiled = array_ops.tile(a, [])
result = self.evaluate(tiled)
self.assertEqual(result.shape, ())
self.assertEqual([], tiled.get_shape())
self.assertEqual(7, result)
def testSimple(self):
# multiples could be int32 or int64
for dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(use_gpu=True):
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, constant_op.constant([1, 4], dtype=dtype))
result = self.evaluate(tiled)
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 4))).all())
def testIdentityTileAndGrad(self):
with self.cached_session():
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [1, 1])
result = self.evaluate(tiled)
self.assertEqual(result.shape, (4, 1))
self.assertEqual([4, 1], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 1))).all())
def testEmpty(self):
with self.cached_session():
inp = np.random.rand(2, 3).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [5, 0])
result = self.evaluate(tiled)
self.assertEqual(result.shape, (10, 0))
self.assertEqual([10, 0], tiled.get_shape())
@test_util.run_deprecated_v1
def testUnknownInputShape(self):
"""Importing can call _TileShape without shape of <multiples> known."""
with self.cached_session():
inp = array_ops.placeholder(dtypes.float32) # unknown shape
multiples = constant_op.constant([1, 2, 3, 4], dtype=np.int32)
tiled = array_ops.tile(inp, multiples)
gdef = tiled.graph.as_graph_def()
# Move the tile op to the start of the graph so that shapes of its inputs
# are not available when the shape function runs on import.
swapped = False
for i, n in enumerate(gdef.node):
if n.op == "Tile":
# Swap tile op to be first in gdef.node
assert i != 0
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(gdef.node[i])
gdef.node[i].CopyFrom(gdef.node[0])
gdef.node[0].CopyFrom(new_node)
swapped = True
assert swapped
tiled_imported, = importer.import_graph_def(
gdef, return_elements=[tiled.name])
self.assertEqual(4, tiled_imported.get_shape().ndims)
def testTypes(self):
types_to_test = {
"bool": (dtypes.bool, bool),
"float32": (dtypes.float32, float),
"float64": (dtypes.float64, float),
"complex64": (dtypes.complex64, complex),
"complex128": (dtypes.complex128, complex),
"uint8": (dtypes.uint8, int),
"int8": (dtypes.int8, int),
"int16": (dtypes.int16, int),
"int32": (dtypes.int32, int),
"int64": (dtypes.int64, int),
bytes: (dtypes.string, bytes)
}
for dtype_np, (dtype_tf, cast) in types_to_test.items():
with self.cached_session(use_gpu=True):
inp = np.random.rand(4, 1).astype(dtype_np)
a = constant_op.constant(
[cast(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtype_tf)
tiled = array_ops.tile(a, [1, 4])
result = self.evaluate(tiled)
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertAllEqual(result, np.tile(inp, (1, 4)))
@test_util.run_deprecated_v1
def testInvalidDim(self):
with self.cached_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtypes.float32)
# Wrong length of multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [1, 4, 2])
# Wrong rank for multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [[2, 3], [3, 4]]).eval()
def _RunAndVerifyResult(self, rank, use_gpu):
with self.cached_session(use_gpu=use_gpu):
# Random dims of given rank
input_shape = np.random.randint(1, 4, size=rank)
inp = np.random.rand(*input_shape).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
multiples = np.random.randint(1, 4, size=rank).astype(np.int32)
tiled = array_ops.tile(a, multiples)
result = self.evaluate(tiled)
self.assertTrue((np.array(multiples) * np.array(inp.shape) == np.array(
result.shape)).all())
self.assertAllEqual(result, np.tile(inp, tuple(multiples)))
self.assertShapeEqual(result, tiled)
def testRandom(self):
# test low rank, like 5
for _ in range(5):
self._RunAndVerifyResult(5, use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(5, use_gpu=True)
# test high rank, like 10
for _ in range(5):
self._RunAndVerifyResult(10, use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(10, use_gpu=True)
@parameterized.parameters(dtypes.int32, dtypes.int64)
@test_util.run_deprecated_v1
def testGradientSimpleReduction(self, multiples_dtype):
with self.cached_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
multiples = constant_op.constant([1, 4], dtype=multiples_dtype)
tiled = array_ops.tile(a, multiples)
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = self.evaluate(grad)
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
@test_util.run_deprecated_v1
def testGradientStridedReduction(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = self.evaluate(grad)
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertTrue((np.abs(expected - result) < 1e-3).all())
@test_util.run_deprecated_v1
def testGradientSimpleReductionOnGPU(self):
with self.session(use_gpu=True):
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = self.evaluate(grad)
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
@test_util.run_deprecated_v1
def testGradientStridedReductionOnGPU(self):
with self.session(use_gpu=True):
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = self.evaluate(grad)
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertAllClose(expected, result, 1e-3)
def _RunAndVerifyGradientResult(self, input_shape, multiples):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
# Random values
inp = np.asarray(np.random.rand(*input_shape))
a = constant_op.constant(inp, dtype=dtypes.float64)
tiled = array_ops.tile(a, multiples)
grad_shape = list(np.array(multiples) * np.array(inp.shape))
err = gradient_checker.compute_gradient_error(
a, list(input_shape), tiled, grad_shape, x_init_value=inp)
print("tile(float) error = ", err)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testGradientRandomScalar(self):
self._RunAndVerifyGradientResult([], [])
@test_util.run_deprecated_v1
def testGradientRandom(self):
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 1, 1, 1, 1])
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 2, 1, 3, 1])
self._RunAndVerifyGradientResult([2, 3, 1, 1, 3], [3, 1, 1, 2, 2])
self._RunAndVerifyGradientResult([2, 1, 3, 3, 2], [1, 3, 3, 1, 2])
@test_util.run_deprecated_v1
def testGradientStridedReductionGC(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
err = gradient_checker.compute_gradient_error(a, [4, 2], tiled, [4, 4])
self.assertLess(err, 1e-3)
@parameterized.parameters(dtypes.int32, dtypes.int64)
@test_util.run_deprecated_v1
def testGradientWithSparseGradWithRank1(self, multiples_dtype):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0],
dtype=dtypes.float32)
multiples = constant_op.constant([3], dtype=dtypes.int64)
outputs = array_ops.gather(array_ops.tile(inputs, multiples),
[1, 5, 9, 3, 7, 2, 2, 2])
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithSparseGradWithRank3(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0],
dtype=dtypes.float32)
inputs = array_ops.reshape(inputs, [-1, 1, 1])
outputs = array_ops.gather(array_ops.tile(inputs, [3, 4, 2]),
[1, 5, 9, 3, 7, 2, 2, 2])
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# Unknown multiples shape.
inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, [2, 2, 2, 2])
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input and multiples shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertIs(None, tiled.get_shape().ndims)
# Known input and partially known multiples.
inp = constant_op.constant(0.0, shape=[1, 1])
tiled = array_ops.tile(inp, [array_ops.placeholder(dtypes.int32), 7])
self.assertEqual([None, 7], tiled.get_shape().as_list())
# Mismatched input rank and multiples length.
inp = array_ops.placeholder(dtypes.float32, shape=[None, None])
with self.assertRaises(ValueError):
tiled = array_ops.tile(
inp, array_ops.placeholder(
dtypes.int32, shape=[3]))
def testLargeTensor(self):
# Test case for GItHub issue 46911.
with self.assertRaises(errors_impl.InvalidArgumentError):
with self.cached_session():
tiled = array_ops.tile(
np.ones((1, 1, 1)), [100000000, 100000000, 100000000])
result = self.evaluate(tiled)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/shape_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for division with division imported from __future__.
This file should be exactly the same as division_past_test.py except
for the __future__ division line.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class DivisionTestCase(test.TestCase):
def testDivision(self):
"""Test all the different ways to divide."""
values = [1, 2, 7, 11]
functions = (lambda x: x), constant_op.constant
# TODO(irving): Test int8, int16 once we support casts for those.
dtypes = np.int32, np.int64, np.float32, np.float64
tensors = []
checks = []
def check(x, y):
x = ops.convert_to_tensor(x)
y = ops.convert_to_tensor(y)
tensors.append((x, y))
def f(x, y):
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x, y)
checks.append(f)
with self.cached_session() as sess:
for dtype in dtypes:
for x in map(dtype, values):
for y in map(dtype, values):
for fx in functions:
for fy in functions:
tf_x = fx(x)
tf_y = fy(y)
div = x / y
tf_div = tf_x / tf_y
check(div, tf_div)
floordiv = x // y
tf_floordiv = tf_x // tf_y
check(floordiv, tf_floordiv)
# Do only one sess.run for speed
for f, (x, y) in zip(checks, self.evaluate(tensors)):
f(x, y)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/division_future_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse_cross_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseCrossOpTest(test.TestCase):
@test_util.run_deprecated_v1
def test_simple(self):
"""Tests a simple scenario."""
op = sparse_ops.sparse_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['batch1-FC1-F1_X_batch1-FC2-F1'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_dense(self):
"""Tests only dense inputs."""
op = sparse_ops.sparse_cross([
constant_op.constant([['batch1-FC1-F1', 'batch1-FC1-F2'],
['batch2-FC1-F1', 'batch2-FC1-F2']],
dtypes.string),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2',
'batch1-FC1-F2_X_batch1-FC2-F1', 'batch1-FC1-F2_X_batch1-FC2-F2'
], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_integer_mixed_string_sparse(self):
"""Tests mixed type."""
op = sparse_ops.sparse_cross([
self._sparse_tensor([[11], [333, 55555]]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['11_X_batch1-FC2-F1'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '55555_X_batch2-FC2-F1',
'55555_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_integer_mixed_string_dense(self):
"""Tests mixed dense inputs."""
op = sparse_ops.sparse_cross([
constant_op.constant([[11, 333], [55555, 999999]], dtypes.int64),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2', '333_X_batch1-FC2-F1',
'333_X_batch1-FC2-F2'
], [
'55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2',
'999999_X_batch2-FC2-F1', '999999_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_sparse_cross_dense(self):
"""Tests sparse and dense inputs."""
op = sparse_ops.sparse_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_integer_sparse_input(self):
"""Tests mixed type sparse and dense inputs."""
op = sparse_ops.sparse_cross([
self._sparse_tensor([[11], [333, 5555]]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2',
'5555_X_batch2-FC2-F1', '5555_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_permutation_3x3x3(self):
"""Tests 3x3x3 permutation."""
op = sparse_ops.sparse_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor(
[['batch1-FC2-F1', 'batch1-FC2-F2', 'batch1-FC2-F3']]),
self._sparse_tensor(
[['batch1-FC3-F1', 'batch1-FC3-F2', 'batch1-FC3-F3']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F3'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_permutation_3x1x2(self):
"""Tests 3x1x2 permutation."""
op = sparse_ops.sparse_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_large_batch(self):
"""Tests with large batch size to force multithreading."""
batch_size = 5000
col1 = []
col2 = []
col3 = []
for b in range(batch_size):
col1.append(
['batch%d-FC1-F1' % b, 'batch%d-FC1-F2' % b, 'batch%d-FC1-F3' % b])
col2.append(['batch%d-FC2-F1' % b])
col3.append(['batch%d-FC3-F1' % b, 'batch%d-FC3-F2' % b])
op = sparse_ops.sparse_cross([
self._sparse_tensor(col1),
self._sparse_tensor(col2),
self._sparse_tensor(col3)
])
col_out = []
for b in range(batch_size):
col_out.append([
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b)
])
expected_out = self._sparse_tensor(col_out)
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_one_column_empty(self):
"""Tests when one column is empty.
The crossed tensor should be empty.
"""
op = sparse_ops.sparse_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']]),
self._sparse_tensor([], 1),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
with self.cached_session() as sess:
self._assert_sparse_tensor_empty(self.evaluate(op))
@test_util.run_deprecated_v1
def test_some_columns_empty(self):
"""Tests when more than one columns are empty.
Cross for the corresponding batch should be empty.
"""
op = sparse_ops.sparse_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']], 2),
self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1']], 2),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']], 2)
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2'
]], 2)
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_all_columns_empty(self):
"""Tests when all columns are empty.
The crossed tensor should be empty.
"""
op = sparse_ops.sparse_cross([
self._sparse_tensor([]),
self._sparse_tensor([]),
self._sparse_tensor([])
])
with self.cached_session() as sess:
self._assert_sparse_tensor_empty(self.evaluate(op))
@test_util.run_deprecated_v1
def test_hashed_zero_bucket_no_hash_key(self):
op = sparse_ops.sparse_cross_hashed([
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
])
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[1971693436396284976]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_hashed_zero_bucket(self):
op = sparse_ops.sparse_cross_hashed(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hash_key=sparse_ops._DEFAULT_HASH_KEY + 1)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[4847552627144134031]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
# TODO(sibyl-Aix6ihai): Add benchmark to compare Hashed vs Non-hashed.
@test_util.run_deprecated_v1
def test_hashed_no_hash_key(self):
op = sparse_ops.sparse_cross_hashed(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
num_buckets=100)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[83]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_hashed_output(self):
op = sparse_ops.sparse_cross_hashed(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
num_buckets=100,
hash_key=sparse_ops._DEFAULT_HASH_KEY + 1)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[31]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def test_hashed__has_no_collision(self):
"""Tests that fingerprint concatenation has no collisions."""
# Although the last 10 bits of 359 and 1024+359 are identical.
# As a result, all the crosses shouldn't collide.
t1 = constant_op.constant([[359], [359 + 1024]])
t2 = constant_op.constant([list(range(10)), list(range(10))])
cross = sparse_ops.sparse_cross_hashed(
[t2, t1], num_buckets=1024, hash_key=sparse_ops._DEFAULT_HASH_KEY + 1)
cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
with session.Session():
values = self.evaluate(cross_dense)
self.assertTrue(numpy.not_equal(values[0], values[1]).all())
def test_hashed_3x1x2(self):
"""Tests 3x1x2 permutation with hashed output."""
op = sparse_ops.sparse_cross_hashed(
[
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
],
num_buckets=1000)
with self.cached_session() as sess:
out = self.evaluate(op)
self.assertEqual(6, len(out.values))
self.assertAllEqual([[0, i] for i in range(6)], out.indices)
self.assertTrue(all(x < 1000 and x >= 0 for x in out.values))
all_values_are_different = len(out.values) == len(set(out.values))
self.assertTrue(all_values_are_different)
def _assert_sparse_tensor_empty(self, sp):
self.assertEquals(0, sp.indices.size)
self.assertEquals(0, sp.values.size)
# TODO(zakaria): check if we can ignore the first dim of the shape.
self.assertEquals(0, sp.dense_shape[1])
def _assert_sparse_tensor_equals(self, sp1, sp2):
self.assertAllEqual(sp1.indices.eval(), sp2.indices)
self.assertAllEqual(sp1.values.eval(), sp2.values)
self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape)
def _sparse_tensor(self, data, batch_size=-1):
"""Generates a SparseTensor.
Args:
data: Should be a list of list of strings or int64. Each item of the outer
list represents a batch. Each item of the batch is a feature of a
specific feature column.
batch_size: optional batch size, especially for cases when data has no
entry for some batches.
Returns:
A SparseTensor.
"""
indices = []
values = []
max_col_count = 0
for batch, batch_ix in zip(data, range(len(data))):
for column, column_ix in zip(batch, range(len(batch))):
indices.append([batch_ix, column_ix])
values.append(column)
max_col_count = max(max_col_count, column_ix + 1)
shape = [batch_size if batch_size != -1 else len(data), max_col_count]
value_type = (dtypes.string if not values or isinstance(values[0], str) else
dtypes.int64)
return sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64, [len(indices), 2]),
constant_op.constant(values, value_type, [len(indices)]),
constant_op.constant(shape, dtypes.int64))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/sparse_cross_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_strip_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringStripOpTest(test.TestCase):
""" Test cases for tf.strings.strip."""
def test_string_strip(self):
strings = ["pigs on the wing", "animals"]
with self.cached_session() as sess:
output = string_ops.string_strip(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [b"pigs on the wing", b"animals"])
def test_string_strip_2d(self):
strings = [["pigs on the wing", "animals"],
[" hello ", "\n\tworld \r \n"]]
with self.cached_session() as sess:
output = string_ops.string_strip(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [[b"pigs on the wing", b"animals"],
[b"hello", b"world"]])
def test_string_strip_with_empty_strings(self):
strings = [" hello ", "", "world ", " \t \r \n "]
with self.cached_session() as sess:
output = string_ops.string_strip(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [b"hello", b"", b"world", b""])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/string_strip_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for math_ops.bincount."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class BincountTest(test_util.TensorFlowTestCase):
def test_empty(self):
with self.session(use_gpu=True):
self.assertAllEqual(self.evaluate(math_ops.bincount([], minlength=5)),
[0, 0, 0, 0, 0])
self.assertAllEqual(self.evaluate(math_ops.bincount([], minlength=1)),
[0])
self.assertAllEqual(self.evaluate(math_ops.bincount([], minlength=0)),
[])
self.assertEqual(self.evaluate(math_ops.bincount([], minlength=0,
dtype=np.float32)).dtype,
np.float32)
self.assertEqual(self.evaluate(math_ops.bincount([], minlength=3,
dtype=np.float64)).dtype,
np.float64)
def test_values(self):
with self.session(use_gpu=True):
self.assertAllEqual(self.evaluate(math_ops.bincount([1, 1, 1, 2, 2, 3])),
[0, 3, 2, 1])
arr = [1, 1, 2, 1, 2, 3, 1, 2, 3, 4, 1, 2, 3, 4, 5]
self.assertAllEqual(self.evaluate(math_ops.bincount(arr)),
[0, 5, 4, 3, 2, 1])
arr += [0, 0, 0, 0, 0, 0]
self.assertAllEqual(self.evaluate(math_ops.bincount(arr)),
[6, 5, 4, 3, 2, 1])
self.assertAllEqual(self.evaluate(math_ops.bincount([])), [])
self.assertAllEqual(self.evaluate(math_ops.bincount([0, 0, 0])), [3])
self.assertAllEqual(self.evaluate(math_ops.bincount([5])),
[0, 0, 0, 0, 0, 1])
self.assertAllEqual(self.evaluate(math_ops.bincount(np.arange(10000))),
np.ones(10000))
def test_maxlength(self):
with self.session(use_gpu=True):
self.assertAllEqual(self.evaluate(math_ops.bincount([5], maxlength=3)),
[0, 0, 0])
self.assertAllEqual(self.evaluate(math_ops.bincount([1], maxlength=3)),
[0, 1])
self.assertAllEqual(self.evaluate(math_ops.bincount([], maxlength=3)),
[])
def test_random_with_weights(self):
num_samples = 10000
with self.session(use_gpu=True):
np.random.seed(42)
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
arr = np.random.randint(0, 1000, num_samples)
if dtype == dtypes.int32 or dtype == dtypes.int64:
weights = np.random.randint(-100, 100, num_samples)
else:
weights = np.random.random(num_samples)
self.assertAllClose(
self.evaluate(math_ops.bincount(arr, weights)),
np.bincount(arr, weights))
def test_random_without_weights(self):
num_samples = 10000
with self.session(use_gpu=True):
np.random.seed(42)
for dtype in [np.int32, np.float32]:
arr = np.random.randint(0, 1000, num_samples)
weights = np.ones(num_samples).astype(dtype)
self.assertAllClose(
self.evaluate(math_ops.bincount(arr, None)),
np.bincount(arr, weights))
def test_zero_weights(self):
with self.session(use_gpu=True):
self.assertAllEqual(
self.evaluate(math_ops.bincount(np.arange(1000), np.zeros(1000))),
np.zeros(1000))
def test_negative(self):
# unsorted_segment_sum will only report InvalidArgumentError on CPU
with self.cached_session():
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(math_ops.bincount([1, 2, 3, -1, 6, 8]))
@test_util.run_deprecated_v1
def test_shape_function(self):
# size must be scalar.
with self.assertRaisesRegexp(
ValueError, "Shape must be rank 0 but is rank 1 for 'Bincount'"):
gen_math_ops.bincount([1, 2, 3, -1, 6, 8], [1], [])
# size must be positive.
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
gen_math_ops.bincount([1, 2, 3, -1, 6, 8], -5, [])
# if size is a constant then the shape is known.
v1 = gen_math_ops.bincount([1, 2, 3, -1, 6, 8], 5, [])
self.assertAllEqual(v1.get_shape().as_list(), [5])
# if size is a placeholder then the shape is unknown.
s = array_ops.placeholder(dtype=dtypes.int32)
v2 = gen_math_ops.bincount([1, 2, 3, -1, 6, 8], s, [])
self.assertAllEqual(v2.get_shape().as_list(), [None])
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/bincount_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class ReverseSequenceTest(test.TestCase):
def _testReverseSequence(self,
x,
batch_axis,
seq_axis,
seq_lengths,
truth,
use_gpu=False,
expected_err_re=None):
with self.cached_session(use_gpu=use_gpu):
ans = array_ops.reverse_sequence(
x, batch_axis=batch_axis, seq_axis=seq_axis, seq_lengths=seq_lengths)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllClose(tf_ans, truth, atol=1e-10)
self.assertShapeEqual(truth, ans)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBothReverseSequence(self,
x,
batch_axis,
seq_axis,
seq_lengths,
truth,
expected_err_re=None):
self._testReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth, True,
expected_err_re)
self._testReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth,
False, expected_err_re)
def _testBasic(self, dtype, len_dtype=np.int64):
x = np.asarray(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
[[17, 18, 19, 20], [21, 22, 23, 24]]],
dtype=dtype)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
# reverse dim 2 up to (0:3, none, 0:4) along dim=0
seq_lengths = np.asarray([3, 0, 4], dtype=len_dtype)
truth_orig = np.asarray(
[
[[3, 2, 1, 4], [7, 6, 5, 8]], # reverse 0:3
[[9, 10, 11, 12], [13, 14, 15, 16]], # reverse none
[[20, 19, 18, 17], [24, 23, 22, 21]]
], # reverse 0:4 (all)
dtype=dtype)
truth_orig = truth_orig.reshape(3, 2, 4, 1, 1)
truth = truth_orig.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
seq_axis = 0 # permute seq_axis and batch_axis (originally 2 and 0, resp.)
batch_axis = 2
self._testBothReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth)
def testSeqLengthInt32(self):
self._testBasic(np.float32, np.int32)
def testFloatBasic(self):
self._testBasic(np.float32)
def testDoubleBasic(self):
self._testBasic(np.float64)
def testInt32Basic(self):
self._testBasic(np.int32)
def testInt64Basic(self):
self._testBasic(np.int64)
def testComplex64Basic(self):
self._testBasic(np.complex64)
def testComplex128Basic(self):
self._testBasic(np.complex128)
@test_util.run_deprecated_v1
def testFloatReverseSequenceGrad(self):
x = np.asarray(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
[[17, 18, 19, 20], [21, 22, 23, 24]]],
dtype=np.float)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # transpose axes 0 <=> 2
# reverse dim 0 up to (0:3, none, 0:4) along dim=2
seq_axis = 0
batch_axis = 2
seq_lengths = np.asarray([3, 0, 4], dtype=np.int64)
with self.cached_session():
input_t = constant_op.constant(x, shape=x.shape)
seq_lengths_t = constant_op.constant(seq_lengths, shape=seq_lengths.shape)
reverse_sequence_out = array_ops.reverse_sequence(
input_t,
batch_axis=batch_axis,
seq_axis=seq_axis,
seq_lengths=seq_lengths_t)
err = gradient_checker.compute_gradient_error(
input_t, x.shape, reverse_sequence_out, x.shape, x_init_value=x)
print("ReverseSequence gradient error = %g" % err)
self.assertLess(err, 1e-8)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
t = array_ops.reverse_sequence(
array_ops.placeholder(
dtypes.float32, shape=None),
seq_lengths=array_ops.placeholder(
dtypes.int64, shape=(32,)),
batch_axis=0,
seq_axis=1)
self.assertIs(t.get_shape().ndims, None)
# Batch size mismatched between input and seq_lengths.
with self.assertRaises(ValueError):
array_ops.reverse_sequence(
array_ops.placeholder(
dtypes.float32, shape=(32, 2, 3)),
seq_lengths=array_ops.placeholder(
dtypes.int64, shape=(33,)),
seq_axis=3)
# seq_axis out of bounds.
with self.assertRaisesRegexp(ValueError, "seq_dim must be < input rank"):
array_ops.reverse_sequence(
array_ops.placeholder(
dtypes.float32, shape=(32, 2, 3)),
seq_lengths=array_ops.placeholder(
dtypes.int64, shape=(32,)),
seq_axis=3)
# batch_axis out of bounds.
with self.assertRaisesRegexp(ValueError, "batch_dim must be < input rank"):
array_ops.reverse_sequence(
array_ops.placeholder(
dtypes.float32, shape=(32, 2, 3)),
seq_lengths=array_ops.placeholder(
dtypes.int64, shape=(32,)),
seq_axis=0,
batch_axis=3)
with self.cached_session():
inputs = array_ops.placeholder(dtypes.float32, shape=(32, 2, 3))
seq_lengths = array_ops.placeholder(dtypes.int64, shape=(32,))
output = array_ops.reverse_sequence(
inputs, seq_lengths=seq_lengths,
seq_axis=0) # batch_axis default is 0
with self.assertRaisesOpError("batch_dim == seq_dim"):
output.eval(feed_dict={
inputs: np.random.rand(32, 2, 3),
seq_lengths: xrange(32)
})
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/reverse_sequence_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for atrous convolution functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def upsample_filters(filters, rate):
"""Upsamples the filters by a factor of rate along the spatial dimensions.
Args:
filters: spatial_shape + [in_channels, out_channels]
Original filters.
rate: A list of len(spatial_shape) positive ints, specifying the
upsampling rate.
Returns:
filters_up: output_spatial_shape + [in_channels, out_channels].
Upsampled filters with
output_spatial_shape[i] = (spatial_shape[i] - 1) * rate[i] + 1
containing (rate[i] - 1) zeros between consecutive filter values along
spatial dimension i.
"""
num_spatial_dims = len(rate)
spatial_shape = np.array(filters.shape[:num_spatial_dims])
output_spatial_shape = (spatial_shape - 1) * rate + 1
output = np.zeros(
tuple(output_spatial_shape) + tuple(filters.shape[-2:]), filters.dtype)
output[tuple(np.s_[::rate[i]] for i in range(num_spatial_dims))] = filters
return output
class AtrousConvolutionTest(test.TestCase):
@contextlib.contextmanager
def _delay_checks(self):
"""Context manager for combining checks depending on tensor evaluations.
Each call to Session.run has some overhead, and this overhead can easily
account for the majority of the time spent in tests that call Session.run
(or Tensor.eval) many times.
This context manager provides a mechanism for registering callback functions
and associated tensors. When the context is exited, all of the tensors
associated with all of the registrations are evaluated with a single call to
Session.run, and then each registered callback function is called with the
values of its associated tensors.
Yields:
A function `add_check(check, *args, **kwargs)` where `check` is the
callback function to be invoked, and `*args` and `**kwargs` specify the
associated Tensors. When in EAGER mode, check is executed in add_check,
otherwise, it's delayed after the context.
"""
checks = []
def add_check(check, *args, **kwargs):
if context.executing_eagerly():
args_val, kwargs_val = self.evaluate([args, kwargs])
check(*args_val, **kwargs_val)
else:
checks.append((check, args, kwargs))
yield add_check
if not context.executing_eagerly():
all_values = self.evaluate([[args, kwargs] for _, args, kwargs in checks])
for (check, _, _), (args, kwargs) in zip(checks, all_values):
check(*args, **kwargs)
def _test_atrous_convolution(self, add_check, input_shape, filter_shape,
dilation_rate, **kwargs):
filters = np.arange(
np.prod(filter_shape), dtype=np.float32).reshape(filter_shape)
filters_upsampled = upsample_filters(filters, dilation_rate)
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)
y1 = nn_ops.convolution(
input=x, filter=filters, dilation_rate=dilation_rate, **kwargs)
y2 = nn_ops.convolution(input=x, filter=filters_upsampled, **kwargs)
def check(y1_eval, y2_eval):
self.assertAllClose(y1_eval, y2_eval, rtol=1e-2, atol=1e-2)
add_check(check, y1, y2)
@test_util.run_v1_only("b/120545219")
def test_unknown_spatial_dims_for_channel_last_format(self):
x = array_ops.placeholder(dtypes.float32, [1, None, None, 10])
w = array_ops.zeros([3, 3, 10, 20])
y = nn_ops.convolution(
x, w, "VALID", dilation_rate=[2, 2], data_format="NHWC")
self.assertEqual(y.shape.as_list(), [1, None, None, 20])
@test_util.run_v1_only("b/120545219")
def test_unknown_spatial_dims_for_channel_first_format(self):
x = array_ops.placeholder(dtypes.float32, [1, 10, None, None])
w = array_ops.zeros([3, 3, 10, 20])
y = nn_ops.convolution(
x, w, "VALID", dilation_rate=[2, 2], data_format="NCHW")
self.assertEqual(y.shape.as_list(), [1, 20, None, None])
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolution2D(self):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
for height, width in [[9, 9], [9, 10]]:
for kernel_height, kernel_width in [[1, 1], [2, 2], [2, 3]]:
for dilation_rate in [[1, 1], [3, 2], [2, 1]]:
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, height, width, 2],
filter_shape=[kernel_height, kernel_width, 2, 2],
padding=padding,
dilation_rate=dilation_rate,
)
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolution3D(self):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
for depth, height, width in [[9, 9, 10], [9, 10, 9]]:
for kernel_depth, kernel_height, kernel_width in [[3, 3,
3], [3, 2, 2],
[2, 1, 3]]:
for dilation_rate in [[1, 1, 1], [3, 3, 3], [3, 2, 3], [3, 1, 2]]:
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, depth, height, width, 2],
filter_shape=[
kernel_depth, kernel_height, kernel_width, 2, 2
],
padding=padding,
dilation_rate=dilation_rate,
)
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolution1D(self):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
for width in [9, 10]:
for kernel_width in range(1, 4):
for rate in range(1, 4):
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, width, 2],
filter_shape=[kernel_width, 2, 2],
padding=padding,
dilation_rate=[rate],
)
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolutionNC(self):
if test.is_gpu_available(cuda_only=True):
# "NCW" and "NCHW" formats are currently supported only on CUDA.
with test_util.device(use_gpu=True):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, 2, 9],
padding=padding,
filter_shape=[3, 2, 2],
dilation_rate=[2],
data_format="NCW",
)
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, 2, 9, 5],
padding=padding,
filter_shape=[3, 3, 2, 2],
dilation_rate=[2, 1],
data_format="NCHW",
)
@test_util.run_in_graph_and_eager_modes
def testAtrousSequence(self):
"""Tests optimization of sequence of atrous convolutions.
See the documentation of with_space_to_batch.
"""
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
for height in range(15, 17):
for width in range(15, 17):
x_shape = [3, height, width, 2]
x = np.random.random_sample(x_shape).astype(np.float32)
kernel_sizes = [1, 3] if padding == "SAME" else range(1, 3)
for kernel in kernel_sizes:
f_shape = [kernel, kernel, 2, 2]
f1 = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
f2 = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
def combined_op(converted_input, num_spatial_dims, padding_arg): # pylint: disable=unused-argument
# pylint: disable=cell-var-from-loop
result = nn_ops.convolution(
input=converted_input, filter=f1, padding=padding)
result = nn_ops.convolution(
input=result, filter=f2, padding=padding)
# pylint: enable=cell-var-from-loop
return result
for rate_height in range(2, 4):
for rate_width in range(2, 4):
dilation_rate = [rate_height, rate_width]
y1 = nn_ops.convolution(
input=x,
filter=f1,
padding=padding,
dilation_rate=dilation_rate)
y1 = nn_ops.convolution(
input=y1,
filter=f2,
padding=padding,
dilation_rate=dilation_rate)
y2 = nn_ops.with_space_to_batch(
input=x,
dilation_rate=dilation_rate,
op=combined_op,
padding="VALID")
def check(y1_eval, y2_eval):
self.assertAllClose(y1_eval, y2_eval, rtol=1e-2, atol=1e-2)
add_check(check, y1, y2)
def _test_gradient(self, x_shape, f_shape, dilation_rate, padding):
x_val = np.random.random_sample(x_shape).astype(np.float32)
f_val = np.random.random_sample(f_shape).astype(np.float32)
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.convolution(
input=x, filter=f, dilation_rate=dilation_rate, padding=padding)
y_shape = output.get_shape().as_list()
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
@test_util.run_v1_only("b/120545219")
def testGradient(self):
with self.cached_session():
for padding in ["SAME", "VALID"]:
for rate_width in range(1, 3):
for rate_height in range(1, 3):
self._test_gradient(
x_shape=[2, 5, 6, 2],
f_shape=[3, 3, 2, 2],
dilation_rate=[rate_height, rate_width],
padding=padding)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/atrous_convolution_test.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for unicode_decode and unicode_decode_with_splits."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.platform import test
def _nested_encode(x, encoding):
"""Encode each string in a nested list with `encoding`."""
if isinstance(x, list):
return [_nested_encode(v, encoding) for v in x]
else:
return x.encode(encoding)
def _nested_codepoints(x):
"""Replace each string in a nested list with a list of its codepoints."""
# Works for Python 2 and 3, and for both UCS2 and UCS4 builds
if isinstance(x, list):
return [_nested_codepoints(v) for v in x]
else:
b = list(x.encode("utf-32-be"))
if any(isinstance(c, str) for c in b):
b = [ord(c) for c in b]
return [(b0 << 24) + (b1 << 16) + (b2 << 8) + b3
for b0, b1, b2, b3 in zip(b[::4], b[1::4], b[2::4], b[3::4])]
def _nested_offsets(x, encoding):
"""Replace each string in a nested list with a list of start offsets."""
if isinstance(x, list):
return [_nested_offsets(v, encoding) for v in x]
else:
if not x:
return []
encoded_x = x.encode("utf-32-be")
encoded_chars = [encoded_x[i:i + 4] for i in range(0, len(encoded_x), 4)]
char_lens = [
len(c.decode("utf-32-be").encode(encoding)) for c in encoded_chars
]
return [0] + np.cumsum(char_lens).tolist()[:-1]
def _nested_splitchars(x, encoding):
"""Replace each string in a nested list with a list of char substrings."""
if isinstance(x, list):
return [_nested_splitchars(v, encoding) for v in x]
else:
b = x.encode("utf-32-be")
chars = zip(b[::4], b[1::4], b[2::4], b[3::4])
if str is bytes:
return [b"".join(c).decode("utf-32-be").encode(encoding) for c in chars]
else:
return [bytes(c).decode("utf-32-be").encode(encoding) for c in chars]
def _make_sparse_tensor(indices, values, dense_shape, dtype=np.int32):
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, dtype),
np.array(dense_shape, np.int64))
@test_util.run_all_in_graph_and_eager_modes
class UnicodeDecodeTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testScalarDecode(self):
text = constant_op.constant(u"仅今年前".encode("utf-8"))
chars = ragged_string_ops.unicode_decode(text, "utf-8")
self.assertAllEqual(chars, [ord(c) for c in u"仅今年前"])
def testScalarDecodeWithOffset(self):
text = constant_op.constant(u"仅今年前".encode("utf-8"))
chars, starts = ragged_string_ops.unicode_decode_with_offsets(text, "utf-8")
self.assertAllEqual(chars, [ord(c) for c in u"仅今年前"])
self.assertAllEqual(starts, [0, 3, 6, 9])
def testVectorDecode(self):
text = constant_op.constant([u"仅今年前".encode("utf-8"), b"hello"])
chars = ragged_string_ops.unicode_decode(text, "utf-8")
expected_chars = [[ord(c) for c in u"仅今年前"],
[ord(c) for c in u"hello"]]
self.assertAllEqual(chars, expected_chars)
def testVectorDecodeWithOffset(self):
text = constant_op.constant([u"仅今年前".encode("utf-8"), b"hello"])
chars, starts = ragged_string_ops.unicode_decode_with_offsets(text, "utf-8")
expected_chars = [[ord(c) for c in u"仅今年前"],
[ord(c) for c in u"hello"]]
self.assertAllEqual(chars, expected_chars)
self.assertAllEqual(starts, [[0, 3, 6, 9], [0, 1, 2, 3, 4]])
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": []}
]) # pyformat: disable
def testBasicDecode(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_decode(input_tensor, "UTF-8")
expected = _nested_codepoints(texts)
self.assertAllEqual(expected, result)
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": []}
]) # pyformat: disable
def testBasicDecodeWithOffsets(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_decode_with_offsets(
input_tensor, "UTF-8")
expected_codepoints = _nested_codepoints(texts)
expected_offsets = _nested_offsets(texts, "UTF-8")
self.assertAllEqual(expected_codepoints, result[0])
self.assertAllEqual(expected_offsets, result[1])
def testDocstringExamples(self):
texts = [s.encode("utf8") for s in [u"G\xf6\xf6dnight", u"\U0001f60a"]]
codepoints1 = ragged_string_ops.unicode_decode(texts, "UTF-8")
codepoints2, offsets = ragged_string_ops.unicode_decode_with_offsets(
texts, "UTF-8")
self.assertAllEqual(
codepoints1, [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]])
self.assertAllEqual(
codepoints2, [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]])
self.assertAllEqual(offsets, [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]])
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=_make_sparse_tensor(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1],
[1, 2], [1, 3], [1, 4], [3, 0]],
values=[72, 101, 108, 108, 111, 119, 111, 114, 108, 100, 128077],
dense_shape=[4, 5])),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=_make_sparse_tensor(
indices=[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[0, 1, 3], [0, 1, 4], [1, 1, 0]],
values=[72, 105, 116, 104, 101, 114, 101, 128522],
dense_shape=[2, 2, 5])),
dict(
texts=[],
expected=_make_sparse_tensor(np.zeros([0, 2], np.int64), [], [0, 0])),
])
def testDecodeWithSparseOutput(self, texts, expected):
input_tensor = np.array(_nested_encode(texts, "UTF-8"), dtype=bytes)
result = ragged_string_ops.unicode_decode(input_tensor, "UTF-8").to_sparse()
self.assertIsInstance(result, sparse_tensor.SparseTensor)
self.assertAllEqual(expected.indices, result.indices)
self.assertAllEqual(expected.values, result.values)
self.assertAllEqual(expected.dense_shape, result.dense_shape)
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=[[72, 101, 108, 108, 111], [119, 111, 114, 108, 100],
[-1, -1, -1, -1, -1], [128077, -1, -1, -1, -1]]),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=[[[72, 105, -1, -1, -1], [116, 104, 101, 114, 101]],
[[-1, -1, -1, -1, -1], [128522, -1, -1, -1, -1]]],
ragged_rank=0),
dict(
texts=[["Hi", "there", ""], [u"😊"]],
expected=[[[72, 105, -1, -1, -1],
[116, 104, 101, 114, 101],
[-1, -1, -1, -1, -1]],
[[128522, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1]]]),
dict(
texts=[[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]],
expected=[
[[[128522, -1, -1], [129312, 129488, -1]],
[[-1, -1, -1], [-1, -1, -1]]],
[[[129299, 128123, 129302], [-1, -1, -1]],
[[-1, -1, -1], [-1, -1, -1]]]]),
dict(texts=[], expected=np.zeros([0, 0], np.int64)),
]) # pyformat: disable
def testDecodeWithPaddedOutput(self, texts, expected, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_decode(
input_tensor, "UTF-8").to_tensor(default_value=-1)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[65533], [104, 101, 108, 108, 111],
[61, 61, 65533, 61, 61], [119, 111, 114, 108, 100]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[0], [104, 101, 108, 108, 111],
[61, 61, 0, 61, 61], [119, 111, 114, 108, 100]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[], [104, 101, 108, 108, 111],
[61, 61, 61, 61], [119, 111, 114, 108, 100]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
expected=[[65533], [104, 101, 108, 108, 111],
[61, 61, 65533, 61, 61], [119, 111, 114, 108, 100]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
replacement_char=0,
expected=[[0], [104, 101, 108, 108, 111],
[61, 61, 0, 61, 61], [119, 111, 114, 108, 100]]),
]) # pyformat: disable
def testErrorModes(self, expected=None, **args):
result = ragged_string_ops.unicode_decode(**args)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[65533], [104, 101, 108, 108, 111],
[61, 61, 65533, 61, 61], [119, 111, 114, 108, 100]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[0], [104, 101, 108, 108, 111],
[61, 61, 0, 61, 61], [119, 111, 114, 108, 100]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[], [104, 101, 108, 108, 111],
[61, 61, 61, 61], [119, 111, 114, 108, 100]],
expected_offsets=[[], [0, 1, 2, 3, 4],
[0, 1, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
expected=[[65533], [104, 101, 108, 108, 111],
[61, 61, 65533, 61, 61], [119, 111, 114, 108, 100]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
replacement_char=0,
expected=[[0], [104, 101, 108, 108, 111],
[61, 61, 0, 61, 61], [119, 111, 114, 108, 100]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
]) # pyformat: disable
def testErrorModesWithOffsets(self,
expected=None,
expected_offsets=None,
**args):
result = ragged_string_ops.unicode_decode_with_offsets(**args)
self.assertAllEqual(result[0], expected)
self.assertAllEqual(result[1], expected_offsets)
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
("US-ASCII", [u"Hello", "world"]),
("ISO-8859-1", [u"ÀÈÓ", "AEO"]),
("SHIFT-JIS", [u"Hello", u"こんにちは"]),
)
def testDecodeWithDifferentEncodings(self, encoding, texts):
expected = _nested_codepoints(texts)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_decode(input_tensor, encoding)
self.assertAllEqual(expected, result)
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
("US-ASCII", [u"Hello", "world"]),
("ISO-8859-1", [u"ÀÈÓ", "AEO"]),
("SHIFT-JIS", [u"Hello", u"こんにちは"]),
)
def testDecodeWithOffsetsWithDifferentEncodings(self, encoding, texts):
expected_codepoints = _nested_codepoints(texts)
expected_offsets = _nested_offsets(texts, encoding)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_decode_with_offsets(
input_tensor, encoding)
self.assertAllEqual(expected_codepoints, result[0])
self.assertAllEqual(expected_offsets, result[1])
@parameterized.parameters([
dict(input=[b"\xFEED"],
errors="strict",
input_encoding="UTF-8",
exception=errors.InvalidArgumentError,
message="Invalid formatting on input string"),
dict(input="x",
input_encoding="UTF-8",
replacement_char=11141111,
exception=errors.InvalidArgumentError,
message="replacement_char out of unicode codepoint range"),
dict(input="x",
input_encoding="UTF-8",
errors="oranguatan",
exception=(ValueError, errors.InvalidArgumentError)),
]) # pyformat: disable
def testExceptions(self, exception=None, message=None, **args):
with self.assertRaisesRegexp(exception, message):
self.evaluate(ragged_string_ops.unicode_decode(**args))
def testUnknownRankError(self):
if context.executing_eagerly():
return
s = array_ops.placeholder(dtypes.string)
message = "Rank of `input` must be statically known."
with self.assertRaisesRegexp(ValueError, message):
self.evaluate(ragged_string_ops.unicode_decode(s, input_encoding="UTF-8"))
@parameterized.parameters([
dict(
doc="Single string",
input=_nested_encode([u"仅今年前"], "utf-8"),
input_encoding="UTF-8",
expected_char_values=_nested_codepoints(u"仅今年前"),
expected_row_splits=[0, 4],
expected_char_to_byte_starts=[0, 3, 6, 9]),
dict(
doc="Multiple strings",
input=_nested_encode([u"仅今年前", u"你好"], "utf-8"),
input_encoding="UTF-8",
expected_char_values=_nested_codepoints(u"仅今年前你好"),
expected_row_splits=[0, 4, 6],
expected_char_to_byte_starts=[0, 3, 6, 9, 0, 3]),
dict(
doc="errors=replace",
input=b"=\xFE=",
input_encoding="UTF-8",
errors="replace",
expected_char_values=[61, 65533, 61],
expected_row_splits=[0, 3],
expected_char_to_byte_starts=[0, 1, 2]),
dict(
doc="errors=ignore",
input=b"=\xFE=",
input_encoding="UTF-8",
errors="ignore",
expected_char_values=[61, 61],
expected_row_splits=[0, 2],
expected_char_to_byte_starts=[0, 2]),
])
def testDecodeGenOp(self,
doc,
expected_row_splits=None,
expected_char_values=None,
expected_char_to_byte_starts=None,
**args):
"""Test for the c++ interface (gen_string_ops.unicode_decode)."""
result = gen_string_ops.unicode_decode_with_offsets(**args)
self.assertAllEqual(expected_row_splits, result.row_splits)
self.assertAllEqual(expected_char_values, result.char_values)
self.assertAllEqual(expected_char_to_byte_starts,
result.char_to_byte_starts)
@test_util.run_all_in_graph_and_eager_modes
class UnicodeSplitTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testScalarSplit(self):
text = constant_op.constant(u"仅今年前".encode("UTF-8"))
chars = ragged_string_ops.unicode_split(text, "UTF-8")
self.assertAllEqual(chars, [c.encode("UTF-8") for c in u"仅今年前"])
def testScalarSplitWithOffset(self):
text = constant_op.constant(u"仅今年前".encode("UTF-8"))
chars, starts = ragged_string_ops.unicode_split_with_offsets(text, "UTF-8")
self.assertAllEqual(chars, [c.encode("UTF-8") for c in u"仅今年前"])
self.assertAllEqual(starts, [0, 3, 6, 9])
def testVectorSplit(self):
text = constant_op.constant([u"仅今年前".encode("UTF-8"), b"hello"])
chars = ragged_string_ops.unicode_split(text, "UTF-8")
expected_chars = [[c.encode("UTF-8") for c in u"仅今年前"],
[c.encode("UTF-8") for c in u"hello"]]
self.assertAllEqual(chars, expected_chars)
def testVectorSplitWithOffset(self):
text = constant_op.constant([u"仅今年前".encode("UTF-8"), b"hello"])
chars, starts = ragged_string_ops.unicode_split_with_offsets(text, "UTF-8")
expected_chars = [[c.encode("UTF-8") for c in u"仅今年前"],
[c.encode("UTF-8") for c in u"hello"]]
self.assertAllEqual(chars, expected_chars)
self.assertAllEqual(starts, [[0, 3, 6, 9], [0, 1, 2, 3, 4]])
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": []}
]) # pyformat: disable
def testBasicSplit(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_split(input_tensor, "UTF-8")
expected = _nested_splitchars(texts, "UTF-8")
self.assertAllEqual(expected, result)
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": []}
]) # pyformat: disable
def testBasicSplitWithOffsets(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_split_with_offsets(input_tensor, "UTF-8")
expected_codepoints = _nested_splitchars(texts, "UTF-8")
expected_offsets = _nested_offsets(texts, "UTF-8")
self.assertAllEqual(expected_codepoints, result[0])
self.assertAllEqual(expected_offsets, result[1])
def testDocstringExamples(self):
texts = [s.encode("utf8") for s in [u"G\xf6\xf6dnight", u"\U0001f60a"]]
codepoints1 = ragged_string_ops.unicode_split(texts, "UTF-8")
codepoints2, offsets = ragged_string_ops.unicode_split_with_offsets(
texts, "UTF-8")
self.assertAllEqual(
codepoints1,
[[b"G", b"\xc3\xb6", b"\xc3\xb6", b"d", b"n", b"i", b"g", b"h", b"t"],
[b"\xf0\x9f\x98\x8a"]])
self.assertAllEqual(
codepoints2,
[[b"G", b"\xc3\xb6", b"\xc3\xb6", b"d", b"n", b"i", b"g", b"h", b"t"],
[b"\xf0\x9f\x98\x8a"]])
self.assertAllEqual(offsets, [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]])
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=_make_sparse_tensor(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1],
[1, 2], [1, 3], [1, 4], [3, 0]],
values=[b"H", b"e", b"l", b"l", b"o",
b"w", b"o", b"r", b"l", b"d", b"\xf0\x9f\x91\x8d"],
dense_shape=[4, 5],
dtype=bytes)),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=_make_sparse_tensor(
indices=[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[0, 1, 3], [0, 1, 4], [1, 1, 0]],
values=[b"H", b"i", b"t", b"h", b"e", b"r", b"e",
b"\xf0\x9f\x98\x8a"],
dense_shape=[2, 2, 5],
dtype=bytes)),
dict(
texts=[],
expected=_make_sparse_tensor(
np.zeros([0, 2], np.int64), [], [0, 0], dtype=bytes)),
]) # pyformat: disable
def testSplitWithSparseOutput(self, texts, expected):
input_tensor = np.array(_nested_encode(texts, "UTF-8"), dtype=bytes)
result = ragged_string_ops.unicode_split(input_tensor, "UTF-8").to_sparse()
self.assertIsInstance(result, sparse_tensor.SparseTensor)
self.assertAllEqual(expected.indices, result.indices)
self.assertAllEqual(expected.values, result.values)
self.assertAllEqual(expected.dense_shape, result.dense_shape)
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=[[b"H", b"e", b"l", b"l", b"o"],
[b"w", b"o", b"r", b"l", b"d"],
["", "", "", "", ""],
[b"\xf0\x9f\x91\x8d", "", "", "", ""]]),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=[[[b"H", b"i", "", "", ""],
[b"t", b"h", b"e", b"r", b"e"]],
[["", "", "", "", ""],
[b"\xf0\x9f\x98\x8a", "", "", "", ""]]],
ragged_rank=0),
dict(
texts=[["Hi", "there", ""], [u"😊"]],
expected=[[[b"H", b"i", "", "", ""],
[b"t", b"h", b"e", b"r", b"e"],
["", "", "", "", ""]],
[[b"\xf0\x9f\x98\x8a", "", "", "", ""],
["", "", "", "", ""],
["", "", "", "", ""]]]),
dict(
texts=[[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]],
expected=[[[[b"\xf0\x9f\x98\x8a", "", ""],
[b"\xf0\x9f\xa4\xa0", b"\xf0\x9f\xa7\x90", ""]],
[["", "", ""],
["", "", ""]]],
[[[b"\xf0\x9f\xa4\x93", b"\xf0\x9f\x91\xbb",
b"\xf0\x9f\xa4\x96"],
["", "", ""]],
[["", "", ""],
["", "", ""]]]]),
dict(texts=[], expected=np.zeros([0, 0], np.int64)),
]) # pyformat: disable
def testSplitWithPaddedOutput(self, texts, expected, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_split(
input_tensor, "UTF-8").to_tensor(default_value="")
self.assertAllEqual(np.array(expected, dtype=bytes), result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[b"\xef\xbf\xbd"],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"\xef\xbf\xbd", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[b"\x00"],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"\x00", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]]),
]) # pyformat: disable
def testErrorModes(self, expected=None, **args):
result = ragged_string_ops.unicode_split(**args)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[b"\xef\xbf\xbd"],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"\xef\xbf\xbd", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[b"\x00"],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"\x00", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]],
expected_offsets=[[], [0, 1, 2, 3, 4],
[0, 1, 3, 4], [0, 1, 2, 3, 4]]),
]) # pyformat: disable
def testErrorModesWithOffsets(self,
expected=None,
expected_offsets=None,
**args):
result = ragged_string_ops.unicode_split_with_offsets(**args)
self.assertAllEqual(expected, result[0])
self.assertAllEqual(expected_offsets, result[1])
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
)
def testSplitWithDifferentEncodings(self, encoding, texts):
expected = _nested_splitchars(texts, encoding)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_split(input_tensor, encoding)
self.assertAllEqual(expected, result)
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
)
def testSplitWithOffsetsWithDifferentEncodings(self, encoding, texts):
expected_codepoints = _nested_splitchars(texts, encoding)
expected_offsets = _nested_offsets(texts, encoding)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_split_with_offsets(
input_tensor, encoding)
self.assertAllEqual(expected_codepoints, result[0])
self.assertAllEqual(expected_offsets, result[1])
@parameterized.parameters([
dict(input=[b"\xFEED"],
errors="strict",
input_encoding="UTF-8",
exception=errors.InvalidArgumentError,
message="Invalid formatting on input string"),
dict(input="x",
input_encoding="UTF-8",
replacement_char=11141111,
exception=errors.InvalidArgumentError,
message="replacement_char out of unicode codepoint range"),
dict(input="x",
input_encoding="UTF-8",
errors="oranguatan",
exception=(ValueError, errors.InvalidArgumentError)),
]) # pyformat: disable
def testExceptions(self, exception=None, message=None, **args):
with self.assertRaisesRegexp(exception, message):
self.evaluate(ragged_string_ops.unicode_split(**args))
def testUnknownRankError(self):
if context.executing_eagerly():
return
s = array_ops.placeholder(dtypes.string)
message = "Rank of `input` must be statically known."
with self.assertRaisesRegexp(ValueError, message):
self.evaluate(ragged_string_ops.unicode_decode(s, input_encoding="UTF-8"))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/unicode_decode_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class ConstantTest(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with self.cached_session(use_gpu=False):
tf_ans = ops.convert_to_tensor(x).eval()
dtype = dtypes_lib.as_dtype(np_ans.dtype)
if dtype.is_floating or dtype.is_complex:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
np_ans = np.array(x)
with self.cached_session(use_gpu=True):
tf_ans = ops.convert_to_tensor(x).eval()
dtype = dtypes_lib.as_dtype(np_ans.dtype)
if dtype.is_floating or dtype.is_complex:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testInvalidDType(self):
# Test case for GitHub issue 18474
with self.assertRaises(TypeError):
constant_op.constant(dtypes_lib.string, "[,]")
@test_util.run_deprecated_v1
def testBFloat16(self):
bfloat16 = dtypes_lib.bfloat16.as_numpy_dtype
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(bfloat16))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(bfloat16))
self._testAll(np.empty((2, 0, 5)).astype(bfloat16))
@test_util.run_deprecated_v1
def testHalf(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float16))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float16))
self._testAll(np.empty((2, 0, 5)).astype(np.float16))
@test_util.run_deprecated_v1
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
@test_util.run_deprecated_v1
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
@test_util.run_deprecated_v1
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
@test_util.run_deprecated_v1
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
@test_util.run_deprecated_v1
def testComplex64(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
@test_util.run_deprecated_v1
def testComplex128(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex128))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
@test_util.run_deprecated_v1
def testString(self):
self._testCpu(
np.array([compat.as_bytes(str(x)) for x in np.arange(-15, 15)]).reshape(
[2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
@test_util.run_deprecated_v1
def testVariant(self):
# TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant
# copying between CPU and GPU is supported.
with self.session(use_gpu=False):
variant_tensor = tensor_pb2.TensorProto(
dtype=dtypes_lib.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(1, dtype=np.int32).tobytes())
])
const = constant_op.constant(variant_tensor)
const_value = const.op.get_attr("value")
# Ensure we stored the tensor proto properly.
self.assertProtoEquals(variant_tensor, const_value)
# Smoke test -- ensure this executes without trouble.
# Right now, non-numpy-compatible objects cannot be returned from a
# session.run call; similarly, objects that can't be converted to
# native numpy types cannot be passed to ops.convert_to_tensor.
# TODO(ebrevdo): Add registration mechanism for
# ops.convert_to_tensor and for session.run output.
logging_const_op = logging_ops.Print(
const, [const],
message="Variant storing an int, decoded const value:").op
logging_const_op.run()
@test_util.run_deprecated_v1
def testStringWithNulls(self):
with self.cached_session():
val = ops.convert_to_tensor(b"\0\0\0\0").eval()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
with self.cached_session():
val = ops.convert_to_tensor(b"xx\0xx").eval()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
with self.cached_session():
val = ops.convert_to_tensor(nested).eval()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
with ops.Graph().as_default():
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testEagerMemory(self):
"""Tests PyObject refs are managed correctly when executing eagerly."""
constant_op.constant([[1.]])
def testImplicitShapeNumPy(self):
with ops.Graph().as_default():
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testImplicitShapeList(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
with ops.Graph().as_default():
c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
with ops.Graph().as_default():
c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeInconsistent(self):
with ops.Graph().as_default():
c = constant_op.constant_v1([1, 2, 3, 4, 5, 6, 7], shape=[10])
self.assertEqual(c.get_shape(), [10])
with ops.Graph().as_default():
with self.assertRaisesRegexp(
TypeError, "Expected Tensor's shape"):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
def testPromotionShapes(self):
with ops.Graph().as_default():
c = constant_op.constant([7], shape=[10])
self.assertEqual(c.get_shape(), [10])
with ops.Graph().as_default():
c = constant_op.constant(3, shape=[10])
self.assertEqual(c.get_shape(), [10])
# pylint: disable=g-long-lambda
def testShapeWrong(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "Too many elements provided."):
constant_op.constant_v1([1, 2, 3, 4, 5, 6, 7], shape=[5])
with self.assertRaisesRegexp(TypeError, "Expected Tensor's shape"):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
# pylint: enable=g-long-lambda
# TODO(b/35396543): Temporarily disable: suspicion that
# this is causing test timeouts.
def _testTooLargeConstant(self):
with ops.Graph().as_default():
large_array = np.zeros((512, 1024, 1024), dtype=np.float32)
with self.assertRaisesRegexp(
ValueError,
"Cannot create a tensor proto whose content is larger than 2GB."):
c = constant_op.constant(large_array)
# TODO(b/35396543): Temporarily disable: suspicion that
# this is causing test timeouts.
def _testTooLargeGraph(self):
with ops.Graph().as_default() as g:
large_array = np.zeros((256, 1024, 1024), dtype=np.float32)
c = constant_op.constant(large_array)
d = constant_op.constant(large_array)
with self.assertRaisesRegexp(ValueError,
"GraphDef cannot be larger than 2GB."):
g.as_graph_def()
@test_util.run_deprecated_v1
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegexp(ValueError,
"setting an array element with a sequence"):
c = constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = constant_op.constant([[1, 2], [3], [4, 5]])
class AsTensorTest(test.TestCase):
def testAsTensorForTensorInput(self):
with ops.Graph().as_default():
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
with ops.Graph().as_default():
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, ops.Tensor))
def testAsTensorForShapeInput(self):
with self.cached_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([2**31 - 1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3]),
dtype=dtypes_lib.int32)
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([2**31 - 1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]))
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([2**31, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]),
dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([2**31, 2, 3], self.evaluate(x))
with self.assertRaisesRegexp(
ValueError, "a dimension is too large .2147483648."):
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]),
dtype=dtypes_lib.int32)
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([1, 2, 3], self.evaluate(x))
x = array_ops.reshape(
array_ops.zeros([6]), tensor_shape.TensorShape([2, 3]))
self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], self.evaluate(x))
with self.assertRaisesRegexp(ValueError, "partially known"):
ops.convert_to_tensor(tensor_shape.TensorShape(None))
with self.assertRaisesRegexp(ValueError, "partially known"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64]))
with self.assertRaises(TypeError):
ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.float32)
@test_util.run_deprecated_v1
def testAsTensorForDimensionInput(self):
with self.cached_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3])[1])
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual(2, self.evaluate(x))
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual(2, self.evaluate(x))
shape = tensor_shape.TensorShape(None)
if shape._v2_behavior:
with self.assertRaisesRegexp(ValueError, "None values not supported"):
ops.convert_to_tensor(shape[1])
with self.assertRaisesRegexp(ValueError, "None values not supported"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
else:
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
ops.convert_to_tensor(shape[1])
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
class IdentityOpTest(test.TestCase):
def testIdTensor(self):
with ops.Graph().as_default():
x = constant_op.constant(2.0, shape=[6], name="input")
id_op = array_ops.identity(x, name="id")
self.assertTrue(isinstance(id_op.op.inputs[0], ops.Tensor))
self.assertProtoEquals("name: 'id' op: 'Identity' input: 'input' "
"attr { key: 'T' value { type: DT_FLOAT } }",
id_op.op.node_def)
class ZerosTest(test.TestCase):
def _Zeros(self, shape):
with self.cached_session():
ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return self.evaluate(ret)
def testConst(self):
self.assertTrue(
np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
with self.cached_session():
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, self.evaluate(scalar))
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
with self.cached_session():
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = array_ops.zeros(array_ops.shape(d))
out = self.evaluate(z)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
@test_util.run_deprecated_v1
def testDtype(self):
with self.cached_session():
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool, dtypes_lib.string
]:
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = self.evaluate(z)
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = self.evaluate(z)
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
class ZerosLikeTest(test.TestCase):
def _compareZeros(self, dtype, fully_defined_shape, use_gpu):
with self.cached_session(use_gpu=use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
# NOTE(kearnes): The default numpy dtype associated with tf.string is
# np.object (and can't be changed without breaking a lot things), which
# causes a TypeError in constant_op.constant below. Here we catch the
# special case of tf.string and set the numpy dtype appropriately.
if dtype == dtypes_lib.string:
numpy_dtype = np.string_
else:
numpy_dtype = dtype.as_numpy_dtype
if fully_defined_shape:
d = constant_op.constant(
np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
else:
d = array_ops.placeholder(dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
# Test that the shape is correct
if fully_defined_shape:
self.assertEqual([2, 3], z_var.get_shape())
# Test that the value is correct
feed_dict = {}
if not fully_defined_shape:
feed_dict[d] = np.ones((2, 3), dtype=numpy_dtype)
z_value = z_var.eval(feed_dict=feed_dict)
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
@test_util.run_deprecated_v1
def testZerosLikeCPU(self):
for dtype in [
dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.int8, dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.uint16,
dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.bool,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.string
]:
self._compareZeros(dtype, fully_defined_shape=False, use_gpu=False)
self._compareZeros(dtype, fully_defined_shape=True, use_gpu=False)
@test_util.run_deprecated_v1
def testZerosLikeGPU(self):
for dtype in [
dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.complex64,
dtypes_lib.complex128, dtypes_lib.bool
]:
self._compareZeros(dtype, fully_defined_shape=False, use_gpu=True)
self._compareZeros(dtype, fully_defined_shape=True, use_gpu=True)
@test_util.run_deprecated_v1
def testZerosLikePartialShape(self):
d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
z = array_ops.zeros_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
@test_util.run_deprecated_v1
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
with self.cached_session():
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = array_ops.zeros_like(x, dtype=out_type).eval()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
@test_util.run_deprecated_v1
def testZerosLikeVariant(self):
# TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant
# copying between CPU and GPU is supported AND we register a
# ZerosLike callback for GPU for Variant storing primitive types
# in variant_op_registry.cc.
with self.session(use_gpu=False):
variant_tensor = tensor_pb2.TensorProto(
dtype=dtypes_lib.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(1, dtype=np.int32).tobytes())
])
const_variant = constant_op.constant(variant_tensor)
zeros_like = array_ops.zeros_like(const_variant)
zeros_like_op = logging_ops.Print(
zeros_like, [const_variant, zeros_like],
message="Variant storing an int, input and output of zeros_like:").op
# Smoke test -- ensure this executes without trouble.
# Right now, non-numpy-compatible objects cannot be returned from a
# session.run call; similarly, objects that can't be converted to
# native numpy types cannot be passed to ops.convert_to_tensor.
# TODO(ebrevdo): Add registration mechanism for
# ops.convert_to_tensor and for session.run output.
zeros_like_op.run()
class OnesTest(test.TestCase):
def _Ones(self, shape):
with self.cached_session():
ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return self.evaluate(ret)
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testScalar(self):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
with self.cached_session():
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, self.evaluate(scalar))
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
with self.cached_session():
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = array_ops.ones(array_ops.shape(d))
out = self.evaluate(z)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
@test_util.run_deprecated_v1
def testAutoPack(self):
with self.cached_session():
h = array_ops.placeholder(dtypes_lib.int32, shape=[])
w = array_ops.placeholder(dtypes_lib.int32, shape=[])
z = array_ops.ones([h, w])
out = z.eval(feed_dict={h: 4, w: 16})
self.assertAllEqual(out, np.array([[1] * 16] * 4))
@test_util.run_deprecated_v1
def testDtype(self):
with self.cached_session():
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.ones([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
# Test explicit type control
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128,
dtypes_lib.int64, dtypes_lib.bool):
z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
class OnesLikeTest(test.TestCase):
def testOnesLike(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int8,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.uint16, dtypes_lib.int32,
dtypes_lib.int64, dtypes_lib.bool, dtypes_lib.complex64,
dtypes_lib.complex128
]:
numpy_dtype = dtype.as_numpy_dtype
with self.cached_session():
# Creates a tensor of non-zero values with shape 2 x 3.
d = constant_op.constant(
np.ones(
(2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = self.evaluate(z_var)
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
@test_util.run_deprecated_v1
def testOnesLikePartialShape(self):
d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
z = array_ops.ones_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.fill(dims, val, name="fill")
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out)
# Fill does not set the shape.
# self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15 + 0.3j] * 3] * 2).astype(np.complex64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex128(self):
np_ans = np.array([[0.15 + 0.3j] * 3] * 2).astype(np.complex128)
self._compareAll([2, 3], np_ans[0][0], np_ans)
@test_util.run_deprecated_v1
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
with self.session(use_gpu=False):
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").eval()
self.assertAllEqual(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testFillNegative(self):
with self.cached_session():
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(ValueError):
array_ops.fill(shape, 7)
# Using a placeholder so this won't be caught in static analysis.
dims = array_ops.placeholder(dtypes_lib.int32)
fill_t = array_ops.fill(dims, 3.0)
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
fill_t.eval({dims: shape})
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(ValueError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(ValueError):
array_ops.fill([3, 2], [1.0, 2.0])
# Partial dimension information.
f = array_ops.fill(array_ops.placeholder(dtypes_lib.int32, shape=(4,)), 3.0)
self.assertEqual([None, None, None, None], f.get_shape().as_list())
f = array_ops.fill(
[array_ops.placeholder(
dtypes_lib.int32, shape=()), 17], 1.0)
self.assertEqual([None, 17], f.get_shape().as_list())
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
in_v = constant_op.constant(5.0)
out_shape = [3, 2]
out_filled = array_ops.fill(out_shape, in_v)
err = gradient_checker.compute_gradient_error(in_v, [], out_filled,
out_shape)
self.assertLess(err, 1e-3)
class PlaceholderTest(test.TestCase):
@test_util.run_deprecated_v1
def testDtype(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=(10, 10), name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
self.evaluate(p_identity)
@test_util.run_deprecated_v1
def testShape(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=(10, 10), name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float and "
r"shape \[10,10\]"):
self.evaluate(p_identity)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :5]})
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=None, name="p")
p_identity = array_ops.identity(p)
# can feed anything
feed_array = np.random.rand(10, 3)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
feed_array = np.random.rand(4, 2, 5)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
@test_util.run_deprecated_v1
def testScalarShape(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=[], name="p")
p_identity = array_ops.identity(p)
self.assertAllClose(p_identity.eval(feed_dict={p: 5}), 5)
@test_util.run_deprecated_v1
def testPartialShape(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=[None, 3], name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 3)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :2]})
@test_util.run_deprecated_v1
def testPartialShapeWhenNotFed(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=[None, 3], name="p")
p_identity = array_ops.identity(p)
# Should trigger an operator error, not a shape error.
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
self.evaluate(p_identity)
@test_util.run_deprecated_v1
def testControlDependency(self):
with self.cached_session():
p = array_ops.placeholder(dtypes_lib.int32, shape=[], name="p")
with ops.control_dependencies([p]):
c = constant_op.constant(5, dtypes_lib.int32)
d = math_ops.multiply(p, c)
val = np.array(2).astype(np.int)
self.assertEqual(10, d.eval(feed_dict={p: val}))
@test_util.run_deprecated_v1
def testBadShape(self):
with self.assertRaises(ValueError):
array_ops.placeholder(dtypes_lib.float32, shape=(-1, 10))
@test_util.run_deprecated_v1
def testTensorStr(self):
a = array_ops.placeholder(dtypes_lib.float32, shape=None, name="a")
self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a))
b = array_ops.placeholder(dtypes_lib.int32, shape=(32, 40), name="b")
self.assertEqual("<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>", repr(b))
c = array_ops.placeholder(dtypes_lib.qint32, shape=(32, None, 2), name="c")
if c.shape._v2_behavior:
self.assertEqual(
"<tf.Tensor 'c:0' shape=(32, None, 2) dtype=qint32>", repr(c))
else:
self.assertEqual(
"<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>", repr(c))
@test_util.run_deprecated_v1
def testOldGraph(self):
# Load graph generated from earlier version of TF where
# placeholder shape was not set.
#
# a = tf.compat.v1.placeholder(tf.float32)
# b = a + 1.0
#
# Older graph's default shape is 'shape {}', not 'shape {
# unknown_rank: true }'
graph = """
node {
name: "Placeholder"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
}
}
}
}
node {
name: "add/y"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
}
float_val: 1.0
}
}
}
}
node {
name: "add"
op: "Add"
input: "Placeholder"
input: "add/y"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
versions {
producer: 21
}
"""
gdef = graph_pb2.GraphDef()
text_format.Merge(graph, gdef)
with self.cached_session():
p, ret = importer.import_graph_def(
gdef, return_elements=["Placeholder:0", "add:0"])
# Feed in a vector of two elements. Since the producer version
# of 21, a shape of {} is interpreted as "any shape". If
# producer version were 22, then we'd get a shape mismatch
# error.
self.assertAllEqual([2.0, 3.0], ret.eval(feed_dict={p: [1.0, 2.0]}))
class PlaceholderWithDefaultTest(test.TestCase):
@test_util.run_deprecated_v1
def testFullShape(self):
with self.session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2])
a = array_ops.identity(p)
self.assertAllEqual([[2, 2], [2, 2]], self.evaluate(a))
self.assertAllEqual(
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[6, 6, 6], [6, 6, 6]]})
@test_util.run_deprecated_v1
def testPartialShape(self):
with self.session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([1, 2, 3], shape=[None])
a = array_ops.identity(p)
self.assertAllEqual([1, 2, 3], self.evaluate(a))
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[2, 2], [2, 2]]})
@test_util.run_deprecated_v1
def testNoShape(self):
with self.session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([17], shape=None)
a = array_ops.identity(p)
self.assertAllEqual([17], self.evaluate(a))
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
self.assertAllEqual(
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
@test_util.run_deprecated_v1
def testGradient(self):
with self.session(force_gpu=test_util.is_gpu_available()):
x = array_ops.placeholder(dtypes_lib.float32, [5, 7])
y = array_ops.placeholder_with_default(x, None)
err = gradient_checker.compute_gradient_error(x, [5, 7], y, [5, 7])
self.assertLess(err, 1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/constant_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for morphological filtering operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class DilationTest(test.TestCase):
def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu):
"""Verifies the output values of the dilation function.
Args:
image: Input tensor with shape: [batch, in_height, in_width, channels].
kernel: Filter tensor with shape: [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
out: Expected output.
use_gpu: Whether we are running on GPU.
"""
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
out_tensor = nn_ops.dilation2d(
constant_op.constant(image),
constant_op.constant(kernel),
strides=strides,
rates=rates,
padding=padding,
name="dilation2d")
self.assertAllClose(out, self.evaluate(out_tensor))
def _testDilationValidPadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 1, 1, 1]
out = [[[[.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testDilationSamePadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 2, 2, 1]
out = [[[[.5], [.6]], [[.7], [.8]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationSamePaddingDepth(self, use_gpu):
# [1, 2, 2, 3]
image = [[[[.1, .2, .0], [.2, .3, .1]], [[.3, .4, .2], [.4, .5, .3]]]]
# [2, 2, 3]
kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]]
# [1, 2, 2, 3]
out = [[[[.5, .7, .3], [.6, .8, .4]], [[.7, .9, .5], [.8, 1., .6]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationSamePaddingBatch(self, use_gpu):
# [2, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]], [[[.2], [.3]], [[.4], [.5]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [2, 2, 2, 1]
out = [[[[.5], [.6]], [[.7], [.8]]], [[[.6], [.7]], [[.8], [.9]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationValidPaddingNonSquareWindow(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [1, 2, 1]
kernel = [[[.4], [.3]]]
# [1, 2, 1, 1]
out = [[[[.5]], [[.7]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testDilationSamePaddingRate(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3]], [[.4], [.5], [.6]], [[.7], [.8], [.9]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# Because rate = 2, the effective kernel is [3, 3, 1]:
# kernel_eff = [[[.4], [.0], [.3]],
# [[.0], [.0], [.0]],
# [[.1], [.0], [.2]]]
# [1, 3, 3, 1]
out = [[[[.7], [.8], [.6]], [[1.0], [1.1], [.9]], [[.8], [.9], [.9]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[2, 2],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationValidPaddingUnevenStride(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3], [.4]], [[.5], [.6], [.7], [.8]],
[[.9], [1.0], [1.1], [1.2]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# [1, 2, 2, 1]
out = [[[[.8], [1.0]], [[1.2], [1.4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 2],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def testDilation(self):
for use_gpu in True, False:
self._testDilationValidPadding(use_gpu)
self._testDilationSamePadding(use_gpu)
self._testDilationSamePaddingDepth(use_gpu)
self._testDilationSamePaddingBatch(use_gpu)
self._testDilationValidPaddingNonSquareWindow(use_gpu)
self._testDilationSamePaddingRate(use_gpu)
self._testDilationValidPaddingUnevenStride(use_gpu)
def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates,
padding, use_gpu):
"""Verifies the gradients of the dilation function.
Args:
image_shape: Input shape, [batch, in_height, in_width, channels].
kernel_shape: Filter shape, [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
use_gpu: Whether we are running on GPU.
"""
assert image_shape[3] == kernel_shape[2]
np.random.seed(1) # Make it reproducible.
image = np.random.random_sample(image_shape).astype(np.float32)
kernel = np.random.random_sample(kernel_shape).astype(np.float32)
image_init = np.random.random_sample(image_shape).astype(np.float32)
kernel_init = np.random.random_sample(kernel_shape).astype(np.float32)
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
image_tensor = constant_op.constant(
image, shape=image_shape, name="input")
kernel_tensor = constant_op.constant(
kernel, shape=kernel_shape, name="filter")
out_tensor = nn_ops.dilation2d(
image_tensor,
kernel_tensor,
strides=strides,
rates=rates,
padding=padding,
name="dilation2d")
out_shape = self.evaluate(out_tensor).shape
# Small delta is necessary for argmax to remain the same.
err = gradient_checker.compute_gradient_error(
[image_tensor, kernel_tensor], [image_shape, kernel_shape],
out_tensor,
out_shape, [image_init, kernel_init],
delta=1e-3)
print("Dilation gradient error = %f" % err)
self.assertLess(err, 1e-4)
def _testDilationGradValidPadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testDilationGradSamePadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradSamePadding_1x1x2(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 2],
kernel_shape=[1, 1, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradValidPadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testDilationGradSamePadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradSamePaddingBatch_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[4, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradSamePadding_2x2x4(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 4],
kernel_shape=[2, 2, 4],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testDilationGrad(self):
for use_gpu in True, False:
self._testDilationGradValidPadding_1x1x1(use_gpu)
self._testDilationGradSamePadding_1x1x1(use_gpu)
self._testDilationGradSamePadding_1x1x2(use_gpu)
self._testDilationGradValidPadding_2x2x1(use_gpu)
self._testDilationGradSamePadding_2x2x1(use_gpu)
self._testDilationGradSamePaddingBatch_2x2x1(use_gpu)
self._testDilationGradSamePadding_2x2x4(use_gpu)
class ErosionTest(test.TestCase):
def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu):
"""Verifies the output values of the erosion function.
Args:
image: Input tensor with shape: [batch, in_height, in_width, channels].
kernel: Filter tensor with shape: [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
out: Expected output.
use_gpu: Whether we are running on GPU.
"""
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
out_tensor = nn_ops.erosion2d(
constant_op.constant(image),
constant_op.constant(kernel),
strides=strides,
rates=rates,
padding=padding,
name="erosion2d")
self.assertAllClose(out, self.evaluate(out_tensor))
def _testErosionValidPadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 1, 1, 1]
out = [[[[.0]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testErosionSamePadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 2, 2, 1]
out = [[[[.0], [.1]], [[.3], [.4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingDepth(self, use_gpu):
# [1, 2, 2, 3]
image = [[[[.1, .2, .0], [.2, .3, .1]], [[.3, .4, .2], [.4, .5, .3]]]]
# [2, 2, 3]
kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]]
# [1, 2, 2, 3]
out = [[[[.0, .0, .0], [.1, .1, .1]], [[.3, .3, .3], [.4, .4, .4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingBatch(self, use_gpu):
# [2, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]], [[[.2], [.3]], [[.4], [.5]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [2, 2, 2, 1]
out = [[[[.0], [.1]], [[.3], [.4]]], [[[.1], [.2]], [[.4], [.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionValidPaddingNonSquareWindow(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [1, 2, 1]
kernel = [[[.4], [.3]]]
# [1, 2, 1, 1]
out = [[[[-.2]], [[.0]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingRate(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3]], [[.4], [.5], [.6]], [[.7], [.8], [.9]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# Because rate = 2, the effective kernel is [3, 3, 1]:
# kernel_eff = [[[.4], [.0], [.3]],
# [[.0], [.0], [.0]],
# [[.1], [.0], [.2]]]
# [1, 3, 3, 1]
out = [[[[.1], [.1], [.2]], [[0.1], [-.1], [.0]], [[.4], [.2], [.3]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[2, 2],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionValidPaddingUnevenStride(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3], [.4]], [[.5], [.6], [.7], [.8]],
[[.9], [1.0], [1.1], [1.2]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# [1, 2, 2, 1]
out = [[[[-.1], [.1]], [[.3], [.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 2],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def testErosion(self):
for use_gpu in True, False:
self._testErosionValidPadding(use_gpu)
self._testErosionSamePadding(use_gpu)
self._testErosionSamePaddingDepth(use_gpu)
self._testErosionSamePaddingBatch(use_gpu)
self._testErosionValidPaddingNonSquareWindow(use_gpu)
self._testErosionSamePaddingRate(use_gpu)
self._testErosionValidPaddingUnevenStride(use_gpu)
def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates,
padding, use_gpu):
"""Verifies the gradients of the erosion function.
Args:
image_shape: Input shape, [batch, in_height, in_width, channels].
kernel_shape: Filter shape, [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
use_gpu: Whether we are running on GPU.
"""
assert image_shape[3] == kernel_shape[2]
np.random.seed(1) # Make it reproducible.
image = np.random.random_sample(image_shape).astype(np.float32)
kernel = np.random.random_sample(kernel_shape).astype(np.float32)
image_init = np.random.random_sample(image_shape).astype(np.float32)
kernel_init = np.random.random_sample(kernel_shape).astype(np.float32)
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
image_tensor = constant_op.constant(
image, shape=image_shape, name="input")
kernel_tensor = constant_op.constant(
kernel, shape=kernel_shape, name="filter")
out_tensor = nn_ops.erosion2d(
image_tensor,
kernel_tensor,
strides=strides,
rates=rates,
padding=padding,
name="erosion2d")
out_shape = self.evaluate(out_tensor).shape
# Small delta is necessary for argmax to remain the same.
err = gradient_checker.compute_gradient_error(
[image_tensor, kernel_tensor], [image_shape, kernel_shape],
out_tensor,
out_shape, [image_init, kernel_init],
delta=1e-3)
print("Erosion gradient error = %f" % err)
self.assertLess(err, 1e-4)
def _testErosionGradValidPadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testErosionGradSamePadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePadding_1x1x2(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 2],
kernel_shape=[1, 1, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradValidPadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testErosionGradSamePadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePaddingBatch_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[4, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePadding_2x2x4(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 4],
kernel_shape=[2, 2, 4],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testErosionGrad(self):
for use_gpu in True, False:
self._testErosionGradValidPadding_1x1x1(use_gpu)
self._testErosionGradSamePadding_1x1x1(use_gpu)
self._testErosionGradSamePadding_1x1x2(use_gpu)
self._testErosionGradValidPadding_2x2x1(use_gpu)
self._testErosionGradSamePadding_2x2x1(use_gpu)
self._testErosionGradSamePaddingBatch_2x2x1(use_gpu)
self._testErosionGradSamePadding_2x2x4(use_gpu)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/morphological_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReorder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SparseReorderTest(test.TestCase):
def _SparseTensorPlaceholder(self):
return sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.float64),
array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.float64)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def testStaticShapeInfoPreserved(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_5x6(np.arange(6)))
self.assertAllEqual((5, 6), sp_input.get_shape())
sp_output = sparse_ops.sparse_reorder(sp_input)
self.assertAllEqual((5, 6), sp_output.get_shape())
def testAlreadyInOrder(self):
with self.session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
sp_output = sparse_ops.sparse_reorder(input_val)
output_val = self.evaluate(sp_output)
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
@test_util.run_deprecated_v1
def testFeedAlreadyInOrder(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.arange(6))
sp_output = sparse_ops.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testOutOfOrder(self):
expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
with self.session(use_gpu=False) as sess:
for _ in range(5): # To test various random permutations
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_output = sparse_ops.sparse_reorder(input_val)
output_val = self.evaluate(sp_output)
self.assertAllEqual(output_val.indices, expected_output_val.indices)
self.assertAllEqual(output_val.values, expected_output_val.values)
self.assertAllEqual(output_val.dense_shape,
expected_output_val.dense_shape)
@test_util.run_deprecated_v1
def testFeedOutOfOrder(self):
expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
with self.session(use_gpu=False) as sess:
for _ in range(5): # To test various random permutations
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_output = sparse_ops.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, expected_output_val.indices)
self.assertAllEqual(output_val.values, expected_output_val.values)
self.assertAllEqual(output_val.dense_shape,
expected_output_val.dense_shape)
@test_util.run_deprecated_v1
def testGradients(self):
with self.session(use_gpu=False):
for _ in range(5): # To test various random permutations
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_input = sparse_tensor.SparseTensor(input_val.indices,
input_val.values,
input_val.dense_shape)
sp_output = sparse_ops.sparse_reorder(sp_input)
err = gradient_checker.compute_gradient_error(
sp_input.values,
input_val.values.shape,
sp_output.values,
input_val.values.shape,
x_init_value=input_val.values)
self.assertLess(err, 1e-11)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/sparse_reorder_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.bitcast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class BitcastTest(test.TestCase):
def _testBitcast(self, x, datatype, shape):
with test_util.use_gpu():
tf_ans = array_ops.bitcast(x, datatype)
out = self.evaluate(tf_ans)
buff_after = memoryview(out).tobytes()
buff_before = memoryview(x).tobytes()
self.assertEqual(buff_before, buff_after)
self.assertEqual(tf_ans.get_shape(), shape)
self.assertEqual(tf_ans.dtype, datatype)
def testSmaller(self):
x = np.random.rand(3, 2)
datatype = dtypes.int8
shape = [3, 2, 8]
self._testBitcast(x, datatype, shape)
def testLarger(self):
x = np.arange(16, dtype=np.int8).reshape([4, 4])
datatype = dtypes.int32
shape = [4]
self._testBitcast(x, datatype, shape)
def testSameDtype(self):
x = np.random.rand(3, 4)
shape = [3, 4]
self._testBitcast(x, x.dtype, shape)
def testSameSize(self):
x = np.random.rand(3, 4)
shape = [3, 4]
self._testBitcast(x, dtypes.int64, shape)
@test_util.run_deprecated_v1
def testErrors(self):
x = np.zeros([1, 1], np.int8)
datatype = dtypes.int32
with self.assertRaisesRegexp(ValueError, "Cannot bitcast due to shape"):
array_ops.bitcast(x, datatype, None)
def testEmpty(self):
x = np.ones([], np.int32)
datatype = dtypes.int8
shape = [4]
self._testBitcast(x, datatype, shape)
@test_util.run_deprecated_v1
def testUnknown(self):
x = array_ops.placeholder(dtypes.float32)
datatype = dtypes.int8
array_ops.bitcast(x, datatype, None)
def testQuantizedType(self):
shape = [3, 4]
x = np.zeros(shape, np.uint16)
datatype = dtypes.quint16
self._testBitcast(x, datatype, shape)
def testUnsignedType(self):
shape = [3, 4]
x = np.zeros(shape, np.int64)
datatype = dtypes.uint64
self._testBitcast(x, datatype, shape)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/bitcast_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.numerics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.platform import test
class VerifyTensorAllFiniteTest(test.TestCase):
def testVerifyTensorAllFiniteSucceeds(self):
x_shape = [5, 4]
x = np.random.random_sample(x_shape).astype(np.float32)
with test_util.use_gpu():
t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
t_verified = numerics.verify_tensor_all_finite(t,
"Input is not a number.")
self.assertAllClose(x, self.evaluate(t_verified))
def testVerifyTensorAllFiniteFails(self):
x_shape = [5, 4]
x = np.random.random_sample(x_shape).astype(np.float32)
my_msg = "Input is not a number."
# Test NaN.
x[0] = np.nan
with test_util.use_gpu():
with self.assertRaisesOpError(my_msg):
t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
t_verified = numerics.verify_tensor_all_finite(t, my_msg)
self.evaluate(t_verified)
# Test Inf.
x[0] = np.inf
with test_util.use_gpu():
with self.assertRaisesOpError(my_msg):
t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
t_verified = numerics.verify_tensor_all_finite(t, my_msg)
self.evaluate(t_verified)
@test_util.run_v1_only("b/120545219")
class NumericsTest(test.TestCase):
def testInf(self):
with self.session(graph=ops.Graph()):
t1 = constant_op.constant(1.0)
t2 = constant_op.constant(0.0)
a = math_ops.div(t1, t2)
check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("Inf"):
self.evaluate(a)
def testNaN(self):
with self.session(graph=ops.Graph()):
t1 = constant_op.constant(0.0)
t2 = constant_op.constant(0.0)
a = math_ops.div(t1, t2)
check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("NaN"):
self.evaluate(a)
def testBoth(self):
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([1.0, 0.0])
t2 = constant_op.constant([0.0, 0.0])
a = math_ops.div(t1, t2)
check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("Inf and NaN"):
self.evaluate(a)
def testPassThrough(self):
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
checked = array_ops.check_numerics(t1, message="pass through test")
value = self.evaluate(checked)
self.assertAllEqual(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), value)
self.assertEqual([2, 3], checked.get_shape())
def testControlFlowCond(self):
predicate = array_ops.placeholder(dtypes.bool, shape=[])
_ = control_flow_ops.cond(predicate,
lambda: constant_op.constant([37.]),
lambda: constant_op.constant([42.]))
with self.assertRaisesRegexp(
ValueError,
r"`tf\.add_check_numerics_ops\(\) is not compatible with "
r"TensorFlow control flow operations such as `tf\.cond\(\)` "
r"or `tf.while_loop\(\)`\."):
numerics.add_check_numerics_ops()
def testControlFlowWhile(self):
predicate = array_ops.placeholder(dtypes.bool, shape=[])
_ = control_flow_ops.while_loop(lambda _: predicate,
lambda _: constant_op.constant([37.]),
[constant_op.constant([42.])])
with self.assertRaisesRegexp(
ValueError,
r"`tf\.add_check_numerics_ops\(\) is not compatible with "
r"TensorFlow control flow operations such as `tf\.cond\(\)` "
r"or `tf.while_loop\(\)`\."):
numerics.add_check_numerics_ops()
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/numerics_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.unique_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.platform import test
class UniqueTest(test.TestCase):
def testInt32(self):
x = np.random.randint(2, high=10, size=7000)
with self.cached_session() as sess:
y, idx = array_ops.unique(x)
tf_y, tf_idx = self.evaluate([y, idx])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
def testInt32OutIdxInt64(self):
x = np.random.randint(2, high=10, size=7000)
with self.cached_session() as sess:
y, idx = array_ops.unique(x, out_idx=dtypes.int64)
tf_y, tf_idx = self.evaluate([y, idx])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
def testString(self):
indx = np.random.randint(65, high=122, size=7000)
x = [chr(i) for i in indx]
with self.cached_session() as sess:
y, idx = array_ops.unique(x)
tf_y, tf_idx = self.evaluate([y, idx])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]].decode('ascii'))
def testInt32Axis(self):
for dtype in [np.int32, np.int64]:
x = np.array([[1, 0, 0], [1, 0, 0], [2, 0, 0]])
with self.cached_session() as sess:
y0, idx0 = gen_array_ops.unique_v2(x, axis=np.array([0], dtype))
tf_y0, tf_idx0 = self.evaluate([y0, idx0])
y1, idx1 = gen_array_ops.unique_v2(x, axis=np.array([1], dtype))
tf_y1, tf_idx1 = self.evaluate([y1, idx1])
self.assertAllEqual(tf_y0, np.array([[1, 0, 0], [2, 0, 0]]))
self.assertAllEqual(tf_idx0, np.array([0, 0, 1]))
self.assertAllEqual(tf_y1, np.array([[1, 0], [1, 0], [2, 0]]))
self.assertAllEqual(tf_idx1, np.array([0, 1, 1]))
def testInt32V2(self):
# This test is only temporary, once V2 is used
# by default, the axis will be wrapped to allow `axis=None`.
x = np.random.randint(2, high=10, size=7000)
with self.cached_session() as sess:
y, idx = gen_array_ops.unique_v2(x, axis=np.array([], np.int32))
tf_y, tf_idx = self.evaluate([y, idx])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
def testBool(self):
x = np.random.choice([True, False], size=7000)
with self.cached_session() as sess:
y, idx = array_ops.unique(x)
tf_y, tf_idx = self.evaluate([y, idx])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
def testBoolV2(self):
x = np.random.choice([True, False], size=7000)
with self.cached_session() as sess:
y, idx = gen_array_ops.unique_v2(x, axis=np.array([], np.int32))
tf_y, tf_idx = self.evaluate([y, idx])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
class UniqueWithCountsTest(test.TestCase):
def testInt32(self):
x = np.random.randint(2, high=10, size=7000)
with self.cached_session() as sess:
y, idx, count = array_ops.unique_with_counts(x)
tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
for value, count in zip(tf_y, tf_count):
self.assertEqual(count, np.sum(x == value))
def testInt32OutIdxInt64(self):
x = np.random.randint(2, high=10, size=7000)
with self.cached_session() as sess:
y, idx, count = array_ops.unique_with_counts(x, out_idx=dtypes.int64)
tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
for value, count in zip(tf_y, tf_count):
self.assertEqual(count, np.sum(x == value))
def testString(self):
indx = np.random.randint(65, high=122, size=7000)
x = [chr(i) for i in indx]
with self.cached_session() as sess:
y, idx, count = array_ops.unique_with_counts(x)
tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]].decode('ascii'))
for value, count in zip(tf_y, tf_count):
v = [1 if x[i] == value.decode('ascii') else 0 for i in range(7000)]
self.assertEqual(count, sum(v))
def testInt32Axis(self):
for dtype in [np.int32, np.int64]:
x = np.array([[1, 0, 0], [1, 0, 0], [2, 0, 0]])
with self.cached_session() as sess:
y0, idx0, count0 = gen_array_ops.unique_with_counts_v2(
x, axis=np.array([0], dtype))
tf_y0, tf_idx0, tf_count0 = self.evaluate([y0, idx0, count0])
y1, idx1, count1 = gen_array_ops.unique_with_counts_v2(
x, axis=np.array([1], dtype))
tf_y1, tf_idx1, tf_count1 = self.evaluate([y1, idx1, count1])
self.assertAllEqual(tf_y0, np.array([[1, 0, 0], [2, 0, 0]]))
self.assertAllEqual(tf_idx0, np.array([0, 0, 1]))
self.assertAllEqual(tf_count0, np.array([2, 1]))
self.assertAllEqual(tf_y1, np.array([[1, 0], [1, 0], [2, 0]]))
self.assertAllEqual(tf_idx1, np.array([0, 1, 1]))
self.assertAllEqual(tf_count1, np.array([1, 2]))
def testInt32V2(self):
# This test is only temporary, once V2 is used
# by default, the axis will be wrapped to allow `axis=None`.
x = np.random.randint(2, high=10, size=7000)
with self.cached_session() as sess:
y, idx, count = gen_array_ops.unique_with_counts_v2(
x, axis=np.array([], np.int32))
tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
for value, count in zip(tf_y, tf_count):
self.assertEqual(count, np.sum(x == value))
def testBool(self):
x = np.random.choice([True, False], size=7000)
with self.cached_session() as sess:
y, idx, count = array_ops.unique_with_counts(x)
tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
for value, count in zip(tf_y, tf_count):
self.assertEqual(count, np.sum(x == value))
def testBoolV2(self):
x = np.random.choice([True, False], size=7000)
with self.cached_session() as sess:
y, idx, count = gen_array_ops.unique_with_counts_v2(
x, axis=np.array([], np.int32))
tf_y, tf_idx, tf_count = self.evaluate([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
for value, count in zip(tf_y, tf_count):
self.assertEqual(count, np.sum(x == value))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/unique_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image.extract_glimpse()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
class ExtractGlimpseTest(test.TestCase):
def _VerifyValues(self, tensor_in_sizes, glimpse_sizes, offsets,
expected_rows, expected_cols):
"""Verifies the output values of the glimpse extraction kernel.
Args:
tensor_in_sizes: Input tensor dimensions in [input_rows, input_cols].
glimpse_sizes: Dimensions of the glimpse in [glimpse_rows, glimpse_cols].
offsets: Relative location of the center of the glimpse in the input
image expressed as [row_offset, col_offset].
expected_rows: A list containing the expected row numbers (None for
out of bound entries that are expected to be replaced by uniform
random entries in [0,1) ).
expected_cols: Same as expected_rows, but for column numbers.
"""
rows = tensor_in_sizes[0]
cols = tensor_in_sizes[1]
# Row Tensor with entries by row.
# [[ 1 1 1 ... ]
# [ 2 2 2 ... ]
# [ 3 3 3 ... ]
# [ ...
# ]
t_rows = array_ops.tile(
[[1.0 * r] for r in range(1, rows + 1)], [1, cols], name='tile_rows')
# Shuffle to switch to a convention of (batch_size, height, width, depth).
t_rows_4d = array_ops.transpose(
array_ops.expand_dims(array_ops.expand_dims(t_rows, 0), 3),
[0, 2, 1, 3])
# Column Tensor with entries by column.
# [[ 1 2 3 4 ... ]
# [ 1 2 3 4 ... ]
# [ 1 2 3 4 ... ]
# [ ... ]
# ]
t_cols = array_ops.tile(
[[1.0 * r for r in range(1, cols + 1)]], [rows, 1], name='tile_cols')
# Shuffle to switch to a convention of (batch_size, height, width, depth).
t_cols_4d = array_ops.transpose(
array_ops.expand_dims(array_ops.expand_dims(t_cols, 0), 3),
[0, 2, 1, 3])
# extract_glimpses from Row and Column Tensor, respectively.
# Switch order for glimpse_sizes and offsets to switch from (row, col)
# convention to tensorflows (height, width) convention.
t1 = constant_op.constant([glimpse_sizes[1], glimpse_sizes[0]], shape=[2])
t2 = constant_op.constant([offsets[1], offsets[0]], shape=[1, 2])
glimpse_rows = (array_ops.transpose(
image_ops.extract_glimpse(t_rows_4d, t1, t2), [0, 2, 1, 3]))
glimpse_cols = (array_ops.transpose(
image_ops.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3]))
# Evaluate the TensorFlow Graph.
with self.cached_session() as sess:
value_rows, value_cols = self.evaluate([glimpse_rows, glimpse_cols])
# Check dimensions of returned glimpse.
self.assertEqual(value_rows.shape[1], glimpse_sizes[0])
self.assertEqual(value_rows.shape[2], glimpse_sizes[1])
self.assertEqual(value_cols.shape[1], glimpse_sizes[0])
self.assertEqual(value_cols.shape[2], glimpse_sizes[1])
# Check entries.
min_random_val = 0
max_random_val = max(rows, cols)
for i in range(glimpse_sizes[0]):
for j in range(glimpse_sizes[1]):
if expected_rows[i] is None or expected_cols[j] is None:
self.assertGreaterEqual(value_rows[0][i][j][0], min_random_val)
self.assertLessEqual(value_rows[0][i][j][0], max_random_val)
self.assertGreaterEqual(value_cols[0][i][j][0], min_random_val)
self.assertLessEqual(value_cols[0][i][j][0], max_random_val)
else:
self.assertEqual(value_rows[0][i][j][0], expected_rows[i])
self.assertEqual(value_cols[0][i][j][0], expected_cols[j])
def testCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[3, 5],
offsets=[0.0, 0.0],
expected_rows=[20, 21, 22],
expected_cols=[29, 30, 31, 32, 33])
def testEmptyTensor(self):
empty_image = np.zeros((0, 4, 3, 0))
offsets = np.zeros((0, 2))
with self.cached_session():
result = image_ops.extract_glimpse(empty_image, [1, 1], offsets)
self.assertAllEqual(
np.zeros((0, 1, 1, 0), dtype=np.float32), self.evaluate(result))
def testLargeCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[41, 61],
offsets=[0.0, 0.0],
expected_rows=list(range(1, 42)),
expected_cols=list(range(1, 62)))
def testTooLargeCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[43, 63],
offsets=[0.0, 0.0],
expected_rows=[None] + list(range(1, 42)) + [None],
expected_cols=[None] + list(range(1, 62)) + [None])
def testGlimpseFullOverlap(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[3, 5],
offsets=[0.1, 0.3],
expected_rows=[22, 23, 24],
expected_cols=[38, 39, 40, 41, 42])
def testGlimpseFullOverlap2(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 3],
offsets=[-0.7, -0.7],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[8, 9, 10])
def testGlimpseBeforeLeftMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 5],
offsets=[-0.7, -0.9],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[1, 2, 3, 4, 5])
def testGlimpseLowerRightCorner(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[7, 5],
offsets=[1.0, 1.0],
expected_rows=[38, 39, 40, 41, None, None, None],
expected_cols=[59, 60, 61, None, None])
def testGlimpseNoOverlap(self):
self._VerifyValues(
tensor_in_sizes=[20, 30],
glimpse_sizes=[3, 3],
offsets=[-2.0, 2.0],
expected_rows=[None, None, None],
expected_cols=[None, None, None])
def testGlimpseOnLeftMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 7],
offsets=[-0.7, -1.0],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[None, None, None, 1, 2, 3, 4])
def testGlimpseUpperMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[7, 5],
offsets=[-1, 0.9],
expected_rows=[None, None, None, 1, 2, 3, 4],
expected_cols=[56, 57, 58, 59, 60])
def testGlimpseNoiseZero(self):
# Image:
# [ 0. 1. 2. 3. 4.]
# [ 5. 6. 7. 8. 9.]
# [ 10. 11. 12. 13. 14.]
# [ 15. 16. 17. 18. 19.]
# [ 20. 21. 22. 23. 24.]
img = constant_op.constant(
np.arange(25).reshape((1, 5, 5, 1)), dtype=dtypes.float32)
with self.test_session():
# Result 1:
# [ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]
result1 = image_ops.extract_glimpse_v2(
img, [3, 3], [[-2, 2]],
centered=False,
normalized=False,
noise='zero')
self.assertAllEqual(
np.asarray([[0, 0, 0], [0, 0, 0], [0, 0, 0]]),
self.evaluate(result1)[0, :, :, 0])
# Result 2:
# [ 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 1. 2. 3. 4. 0.]
# [ 0. 5. 6. 7. 8. 9. 0.]
# [ 0. 10. 11. 12. 13. 14. 0.]
# [ 0. 15. 16. 17. 18. 19. 0.]
# [ 0. 20. 21. 22. 23. 24. 0.]
# [ 0. 0. 0. 0. 0. 0. 0.]
result2 = image_ops.extract_glimpse_v2(
img, [7, 7], [[0, 0]], normalized=False, noise='zero')
self.assertAllEqual(
np.asarray([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 3, 4, 0],
[0, 5, 6, 7, 8, 9, 0], [0, 10, 11, 12, 13, 14, 0],
[0, 15, 16, 17, 18, 19, 0], [0, 20, 21, 22, 23, 24, 0],
[0, 0, 0, 0, 0, 0, 0]]),
self.evaluate(result2)[0, :, :, 0])
def testGlimpseNegativeInput(self):
img = np.arange(9).reshape([1,3,3,1])
with self.test_session():
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
result = image_ops.extract_glimpse_v2(
img, size=[1023, -63], offsets=[1023, 63],
centered=False, normalized=False)
self.evaluate(result)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/attention_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeBmpOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
class DecodeBmpOpTest(test.TestCase):
def testex1(self):
img_bytes = [[[0, 0, 255], [0, 255, 0]], [[255, 0, 0], [255, 255, 255]]]
# Encoded BMP bytes from Wikipedia
encoded_bytes = [
0x42, 0x40,
0x46, 0, 0, 0,
0, 0,
0, 0,
0x36, 0, 0, 0,
0x28, 0, 0, 0,
0x2, 0, 0, 0,
0x2, 0, 0, 0,
0x1, 0,
0x18, 0,
0, 0, 0, 0,
0x10, 0, 0, 0,
0x13, 0xb, 0, 0,
0x13, 0xb, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0xff,
0xff, 0xff, 0xff,
0, 0,
0xff, 0, 0,
0, 0xff, 0,
0, 0,
]
byte_string = bytes(bytearray(encoded_bytes))
img_in = constant_op.constant(byte_string, dtype=dtypes.string)
decode = array_ops.squeeze(image_ops.decode_bmp(img_in))
with self.cached_session():
decoded = self.evaluate(decode)
self.assertAllEqual(decoded, img_bytes)
def testGrayscale(self):
img_bytes = [[[255], [0]], [[255], [0]]]
encoded_bytes = [
0x42,
0x40,
0x3d,
0,
0,
0,
0,
0,
0,
0,
0x36,
0,
0,
0,
0x28,
0,
0,
0,
0x2,
0,
0,
0,
0x2,
0,
0,
0,
0x1,
0,
0x8,
0,
0,
0,
0,
0,
0x10,
0,
0,
0,
0x13,
0xb,
0,
0,
0x13,
0xb,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0xff,
0,
0,
0,
0xff,
0,
0,
0,
]
byte_string = bytes(bytearray(encoded_bytes))
img_in = constant_op.constant(byte_string, dtype=dtypes.string)
decode = image_ops.decode_bmp(img_in)
with self.cached_session():
decoded = self.evaluate(decode)
self.assertAllEqual(decoded, img_bytes)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/decode_bmp_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BAvSIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for summary V1 tensor op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.summary import summary as summary_lib
class SummaryV1TensorOpTest(test.TestCase):
def _SummarySingleValue(self, s):
summ = summary_pb2.Summary()
summ.ParseFromString(s)
self.assertEqual(len(summ.value), 1)
return summ.value[0]
def _AssertNumpyEq(self, actual, expected):
self.assertTrue(np.array_equal(actual, expected))
def testTags(self):
with self.cached_session() as sess:
c = constant_op.constant(1)
s1 = summary_lib.tensor_summary("s1", c)
with ops.name_scope("foo"):
s2 = summary_lib.tensor_summary("s2", c)
with ops.name_scope("zod"):
s3 = summary_lib.tensor_summary("s3", c)
s4 = summary_lib.tensor_summary("TensorSummary", c)
summ1, summ2, summ3, summ4 = self.evaluate([s1, s2, s3, s4])
v1 = self._SummarySingleValue(summ1)
self.assertEqual(v1.tag, "s1")
v2 = self._SummarySingleValue(summ2)
self.assertEqual(v2.tag, "foo/s2")
v3 = self._SummarySingleValue(summ3)
self.assertEqual(v3.tag, "foo/zod/s3")
v4 = self._SummarySingleValue(summ4)
self.assertEqual(v4.tag, "foo/zod/TensorSummary")
def testScalarSummary(self):
with self.cached_session() as sess:
const = constant_op.constant(10.0)
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, 10)
def testStringSummary(self):
s = six.b("foobar")
with self.cached_session() as sess:
const = constant_op.constant(s)
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, s)
def testManyScalarSummary(self):
with self.cached_session() as sess:
const = array_ops.ones([5, 5, 5])
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, np.ones([5, 5, 5]))
def testManyStringSummary(self):
strings = [[six.b("foo bar"), six.b("baz")], [six.b("zoink"), six.b("zod")]]
with self.cached_session() as sess:
const = constant_op.constant(strings)
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, strings)
def testManyBools(self):
bools = [True, True, True, False, False, False]
with self.cached_session() as sess:
const = constant_op.constant(bools)
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, bools)
def testSummaryDescriptionAndDisplayName(self):
with self.cached_session() as sess:
def get_description(summary_op):
summ_str = self.evaluate(summary_op)
summ = summary_pb2.Summary()
summ.ParseFromString(summ_str)
return summ.value[0].metadata
const = constant_op.constant(1)
# Default case; no description or display name
simple_summary = summary_lib.tensor_summary("simple", const)
descr = get_description(simple_summary)
self.assertEqual(descr.display_name, "")
self.assertEqual(descr.summary_description, "")
# Values are provided via function args
with_values = summary_lib.tensor_summary(
"simple",
const,
display_name="my name",
summary_description="my description")
descr = get_description(with_values)
self.assertEqual(descr.display_name, "my name")
self.assertEqual(descr.summary_description, "my description")
# Values are provided via the SummaryMetadata arg
metadata = summary_pb2.SummaryMetadata()
metadata.display_name = "my name"
metadata.summary_description = "my description"
with_metadata = summary_lib.tensor_summary(
"simple", const, summary_metadata=metadata)
descr = get_description(with_metadata)
self.assertEqual(descr.display_name, "my name")
self.assertEqual(descr.summary_description, "my description")
# If both SummaryMetadata and explicit args are provided, the args win
overwrite = summary_lib.tensor_summary(
"simple",
const,
summary_metadata=metadata,
display_name="overwritten",
summary_description="overwritten")
descr = get_description(overwrite)
self.assertEqual(descr.display_name, "overwritten")
self.assertEqual(descr.summary_description, "overwritten")
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/summary_v1_tensor_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.linalg_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.platform import test
def _AddTest(test_class, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test_class, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test_class, test_name, fn)
def _RandomPDMatrix(n, rng, dtype=np.float64):
"""Random positive definite matrix."""
temp = rng.randn(n, n).astype(dtype)
if dtype in [np.complex64, np.complex128]:
temp.imag = rng.randn(n, n)
return np.conj(temp).dot(temp.T)
class CholeskySolveTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
@test_util.run_deprecated_v1
def test_works_with_five_different_random_pos_def_matrices(self):
for n in range(1, 6):
for np_type, atol in [(np.float32, 0.05), (np.float64, 1e-5)]:
with self.session(use_gpu=True):
# Create 2 x n x n matrix
array = np.array(
[_RandomPDMatrix(n, self.rng),
_RandomPDMatrix(n, self.rng)]).astype(np_type)
chol = linalg_ops.cholesky(array)
for k in range(1, 3):
rhs = self.rng.randn(2, n, k).astype(np_type)
x = linalg_ops.cholesky_solve(chol, rhs)
self.assertAllClose(
rhs, math_ops.matmul(array, x).eval(), atol=atol)
class LogdetTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(42)
@test_util.run_deprecated_v1
def test_works_with_five_different_random_pos_def_matrices(self):
for n in range(1, 6):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
(np.complex64, 0.05), (np.complex128, 1e-5)]:
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
_, logdet_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
# Create 2 x n x n matrix
# matrix = np.array(
# [_RandomPDMatrix(n, self.rng, np_dtype),
# _RandomPDMatrix(n, self.rng, np_dtype)]).astype(np_dtype)
logdet_tf = linalg.logdet(matrix)
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
def test_works_with_underflow_case(self):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
(np.complex64, 0.05), (np.complex128, 1e-5)]:
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
_, logdet_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
logdet_tf = linalg.logdet(matrix)
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
class SlogdetTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(42)
@test_util.run_deprecated_v1
def test_works_with_five_different_random_pos_def_matrices(self):
for n in range(1, 6):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
(np.complex64, 0.05), (np.complex128, 1e-5)]:
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
self.assertAllClose(
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
self.assertAllClose(sign_np, self.evaluate(sign_tf), atol=atol)
def test_works_with_underflow_case(self):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
(np.complex64, 0.05), (np.complex128, 1e-5)]:
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
self.assertAllClose(
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
self.assertAllClose(sign_np, self.evaluate(sign_tf), atol=atol)
class AdjointTest(test.TestCase):
def test_compare_to_numpy(self):
for dtype in np.float64, np.float64, np.complex64, np.complex128:
matrix_np = np.array([[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j,
6 + 6j]]).astype(dtype)
expected_transposed = np.conj(matrix_np.T)
with self.session():
matrix = ops.convert_to_tensor(matrix_np)
transposed = linalg.adjoint(matrix)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, self.evaluate(transposed))
class EyeTest(parameterized.TestCase, test.TestCase):
def testShapeInferenceNoBatch(self):
self.assertEqual((2, 2), linalg_ops.eye(num_rows=2).shape)
self.assertEqual((2, 3), linalg_ops.eye(num_rows=2, num_columns=3).shape)
def testShapeInferenceStaticBatch(self):
batch_shape = (2, 3)
self.assertEqual(
(2, 3, 2, 2),
linalg_ops.eye(num_rows=2, batch_shape=batch_shape).shape)
self.assertEqual(
(2, 3, 2, 3),
linalg_ops.eye(
num_rows=2, num_columns=3, batch_shape=batch_shape).shape)
@parameterized.named_parameters(
("DynamicRow",
lambda: array_ops.placeholder_with_default(2, shape=None),
lambda: None),
("DynamicRowStaticColumn",
lambda: array_ops.placeholder_with_default(2, shape=None),
lambda: 3),
("StaticRowDynamicColumn",
lambda: 2,
lambda: array_ops.placeholder_with_default(3, shape=None)),
("DynamicRowDynamicColumn",
lambda: array_ops.placeholder_with_default(2, shape=None),
lambda: array_ops.placeholder_with_default(3, shape=None)))
def testShapeInferenceStaticBatchWith(self, num_rows_fn, num_columns_fn):
num_rows = num_rows_fn()
num_columns = num_columns_fn()
batch_shape = (2, 3)
identity_matrix = linalg_ops.eye(
num_rows=num_rows,
num_columns=num_columns,
batch_shape=batch_shape)
self.assertEqual(4, identity_matrix.shape.ndims)
self.assertEqual((2, 3), identity_matrix.shape[:2])
if num_rows is not None and not isinstance(num_rows, ops.Tensor):
self.assertEqual(2, identity_matrix.shape[-2])
if num_columns is not None and not isinstance(num_columns, ops.Tensor):
self.assertEqual(3, identity_matrix.shape[-1])
@parameterized.parameters(
itertools.product(
# num_rows
[0, 1, 2, 5],
# num_columns
[None, 0, 1, 2, 5],
# batch_shape
[None, [], [2], [2, 3]],
# dtype
[
dtypes.int32,
dtypes.int64,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128
])
)
def test_eye_no_placeholder(self, num_rows, num_columns, batch_shape, dtype):
eye_np = np.eye(num_rows, M=num_columns, dtype=dtype.as_numpy_dtype)
if batch_shape is not None:
eye_np = np.tile(eye_np, batch_shape + [1, 1])
eye_tf = self.evaluate(linalg_ops.eye(
num_rows,
num_columns=num_columns,
batch_shape=batch_shape,
dtype=dtype))
self.assertAllEqual(eye_np, eye_tf)
@parameterized.parameters(
itertools.product(
# num_rows
[0, 1, 2, 5],
# num_columns
[0, 1, 2, 5],
# batch_shape
[[], [2], [2, 3]],
# dtype
[
dtypes.int32,
dtypes.int64,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128
])
)
@test_util.run_deprecated_v1
def test_eye_with_placeholder(
self, num_rows, num_columns, batch_shape, dtype):
eye_np = np.eye(num_rows, M=num_columns, dtype=dtype.as_numpy_dtype)
eye_np = np.tile(eye_np, batch_shape + [1, 1])
num_rows_placeholder = array_ops.placeholder(
dtypes.int32, name="num_rows")
num_columns_placeholder = array_ops.placeholder(
dtypes.int32, name="num_columns")
batch_shape_placeholder = array_ops.placeholder(
dtypes.int32, name="batch_shape")
eye = linalg_ops.eye(
num_rows_placeholder,
num_columns=num_columns_placeholder,
batch_shape=batch_shape_placeholder,
dtype=dtype)
with self.session(use_gpu=True) as sess:
eye_tf = sess.run(
eye,
feed_dict={
num_rows_placeholder: num_rows,
num_columns_placeholder: num_columns,
batch_shape_placeholder: batch_shape
})
self.assertAllEqual(eye_np, eye_tf)
class _MatrixRankTest(object):
def test_batch_default_tolerance(self):
x_ = np.array(
[
[
[2, 3, -2], # = row2+row3
[-1, 1, -2],
[3, 2, 0]
],
[
[0, 2, 0], # = 2*row2
[0, 1, 0],
[0, 3, 0]
], # = 3*row2
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
],
self.dtype)
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
self.assertAllEqual([2, 1, 3], self.evaluate(linalg.matrix_rank(x)))
def test_custom_tolerance_broadcasts(self):
q = linalg.qr(random_ops.random_uniform([3, 3], dtype=self.dtype))[0]
e = constant_op.constant([0.1, 0.2, 0.3], dtype=self.dtype)
a = linalg.solve(q, linalg.transpose(a=e * q), adjoint=True)
self.assertAllEqual([3, 2, 1, 0],
self.evaluate(
linalg.matrix_rank(
a, tol=[[0.09], [0.19], [0.29], [0.31]])))
def test_nonsquare(self):
x_ = np.array(
[
[
[2, 3, -2, 2], # = row2+row3
[-1, 1, -2, 4],
[3, 2, 0, -2]
],
[
[0, 2, 0, 6], # = 2*row2
[0, 1, 0, 3],
[0, 3, 0, 9]
]
], # = 3*row2
self.dtype)
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
self.assertAllEqual([2, 1], self.evaluate(linalg.matrix_rank(x)))
@test_util.run_all_in_graph_and_eager_modes
class MatrixRankStatic32Test(test.TestCase, _MatrixRankTest):
dtype = np.float32
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class MatrixRankDynamic64Test(test.TestCase, _MatrixRankTest):
dtype = np.float64
use_static_shape = False
class _PinvTest(object):
def expected_pinv(self, a, rcond):
"""Calls `np.linalg.pinv` but corrects its broken batch semantics."""
if a.ndim < 3:
return np.linalg.pinv(a, rcond)
if rcond is None:
rcond = 10. * max(a.shape[-2], a.shape[-1]) * np.finfo(a.dtype).eps
s = np.concatenate([a.shape[:-2], [a.shape[-1], a.shape[-2]]])
a_pinv = np.zeros(s, dtype=a.dtype)
for i in np.ndindex(a.shape[:(a.ndim - 2)]):
a_pinv[i] = np.linalg.pinv(
a[i], rcond=rcond if isinstance(rcond, float) else rcond[i])
return a_pinv
def test_symmetric(self):
a_ = self.dtype([[1., .4, .5], [.4, .2, .25], [.5, .25, .35]])
a_ = np.stack([a_ + 1., a_], axis=0) # Batch of matrices.
a = array_ops.placeholder_with_default(
a_, shape=a_.shape if self.use_static_shape else None)
if self.use_default_rcond:
rcond = None
else:
rcond = self.dtype([0., 0.01]) # Smallest 1 component is forced to zero.
expected_a_pinv_ = self.expected_pinv(a_, rcond)
a_pinv = linalg.pinv(a, rcond, validate_args=True)
a_pinv_ = self.evaluate(a_pinv)
self.assertAllClose(expected_a_pinv_, a_pinv_, atol=2e-5, rtol=2e-5)
if not self.use_static_shape:
return
self.assertAllEqual(expected_a_pinv_.shape, a_pinv.shape)
def test_nonsquare(self):
a_ = self.dtype([[1., .4, .5, 1.], [.4, .2, .25, 2.], [.5, .25, .35, 3.]])
a_ = np.stack([a_ + 0.5, a_], axis=0) # Batch of matrices.
a = array_ops.placeholder_with_default(
a_, shape=a_.shape if self.use_static_shape else None)
if self.use_default_rcond:
rcond = None
else:
# Smallest 2 components are forced to zero.
rcond = self.dtype([0., 0.25])
expected_a_pinv_ = self.expected_pinv(a_, rcond)
a_pinv = linalg.pinv(a, rcond, validate_args=True)
a_pinv_ = self.evaluate(a_pinv)
self.assertAllClose(expected_a_pinv_, a_pinv_, atol=1e-5, rtol=1e-4)
if not self.use_static_shape:
return
self.assertAllEqual(expected_a_pinv_.shape, a_pinv.shape)
@test_util.run_all_in_graph_and_eager_modes
class PinvTestDynamic32DefaultRcond(test.TestCase, _PinvTest):
dtype = np.float32
use_static_shape = False
use_default_rcond = True
@test_util.run_all_in_graph_and_eager_modes
class PinvTestStatic64DefaultRcond(test.TestCase, _PinvTest):
dtype = np.float64
use_static_shape = True
use_default_rcond = True
@test_util.run_all_in_graph_and_eager_modes
class PinvTestDynamic32CustomtRcond(test.TestCase, _PinvTest):
dtype = np.float32
use_static_shape = False
use_default_rcond = False
@test_util.run_all_in_graph_and_eager_modes
class PinvTestStatic64CustomRcond(test.TestCase, _PinvTest):
dtype = np.float64
use_static_shape = True
use_default_rcond = False
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/linalg_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.argmax_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ArgMaxTest(test.TestCase):
def _testArg(self,
method,
x,
axis,
expected_values,
use_gpu=False,
expected_err_re=None):
with self.session(use_gpu=use_gpu):
ans = method(x, axis=axis)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
# Defaults to int64 output.
self.assertEqual(np.int64, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
self.assertShapeEqual(expected_values, ans)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBothArg(self,
method,
x,
axis,
expected_values,
expected_err_re=None):
self._testArg(method, x, axis, expected_values, True, expected_err_re)
self._testArg(method, x, axis, expected_values, False, expected_err_re)
def _testBasic(self, dtype):
x = np.asarray(100 * np.random.randn(200), dtype=dtype)
# Check that argmin and argmax match numpy along the primary axis
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
def _testDim(self, dtype):
x = np.asarray(100 * np.random.randn(3, 2, 4, 5, 6), dtype=dtype)
# Check that argmin and argmax match numpy along all axes
for axis in range(-5, 5):
self._testBothArg(math_ops.argmax, x, axis, x.argmax(axis))
self._testBothArg(math_ops.argmin, x, axis, x.argmin(axis))
def testFloat(self):
self._testBasic(np.float32)
self._testDim(np.float32)
def testFloatInt32Output(self):
x = np.asarray(100 * np.random.randn(200), dtype=np.float32)
expected_values = x.argmax()
with self.session(use_gpu=True):
ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
# The values are equal when comparing int32 to int64 because
# the values don't have a range that exceeds 32-bit integers.
self.assertAllEqual(tf_ans, expected_values)
expected_values = x.argmin()
with self.session(use_gpu=True):
ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
def testDouble(self):
self._testBasic(np.float64)
self._testDim(np.float64)
def testInt32(self):
self._testBasic(np.int32)
self._testDim(np.int32)
def testInt64(self):
self._testBasic(np.int64)
self._testDim(np.int64)
def testEmpty(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
with self.assertRaisesOpError(
r"Reduction axis 0 is empty in shape \[0\]"):
op([], 0).eval()
@test_util.run_deprecated_v1
def testDefaultAxis(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
ans = op([1]).eval()
self.assertAllEqual(ans, 0)
@test_util.run_deprecated_v1
def testOutputEmpty(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
ret = op(array_ops.zeros(shape=[1, 0, 2]), axis=-1).eval()
self.assertEqual(ret.shape, (1, 0))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/argmax_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeRaw op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import zlib
from six import BytesIO
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
class DecodeCompressedOpTest(test.TestCase):
def _compress(self, bytes_in, compression_type):
if not compression_type:
return bytes_in
elif compression_type == "ZLIB":
return zlib.compress(bytes_in)
else:
out = BytesIO()
with gzip.GzipFile(fileobj=out, mode="wb") as f:
f.write(bytes_in)
return out.getvalue()
@test_util.run_deprecated_v1
def testDecompress(self):
for compression_type in ["ZLIB", "GZIP", ""]:
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[2])
decompressed = parsing_ops.decode_compressed(
in_bytes, compression_type=compression_type)
self.assertEqual([2], decompressed.get_shape().as_list())
result = decompressed.eval(
feed_dict={in_bytes: [self._compress(b"AaAA", compression_type),
self._compress(b"bBbb", compression_type)]})
self.assertAllEqual([b"AaAA", b"bBbb"], result)
@test_util.run_deprecated_v1
def testDecompressWithRaw(self):
for compression_type in ["ZLIB", "GZIP", ""]:
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decompressed = parsing_ops.decode_compressed(
in_bytes, compression_type=compression_type)
decode = parsing_ops.decode_raw(decompressed, out_type=dtypes.int16)
result = decode.eval(
feed_dict={in_bytes: [self._compress(b"AaBC", compression_type)]})
self.assertAllEqual(
[[ord("A") + ord("a") * 256, ord("B") + ord("C") * 256]], result)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/decode_compressed_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
TIMEOUT = 5
class StageTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea([dtypes.float32])
stage = stager.put([v])
y = stager.get()
y = math_ops.reduce_max(math_ops.matmul(y, y))
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(4 * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
@test_util.run_deprecated_v1
def testMultiple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea([dtypes.float32, dtypes.float32])
stage = stager.put([x, v])
z, y = stager.get()
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
@test_util.run_deprecated_v1
def testDictionary(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put({'x': x, 'v': v})
ret = stager.get()
z = ret['x']
y = ret['v']
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testColocation(self):
gpu_dev = test.gpu_device_name()
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(gpu_dev):
stager = data_flow_ops.StagingArea([dtypes.float32])
y = stager.put([v])
expected_name = gpu_dev if 'gpu' not in gpu_dev else '/device:GPU:0'
self.assertEqual(y.device, expected_name)
with ops.device('/cpu:0'):
x = stager.get()[0]
self.assertEqual(x.device, '/device:CPU:0')
G.finalize()
@test_util.run_deprecated_v1
def testPeek(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
p = array_ops.placeholder(dtypes.int32, name='p')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[
dtypes.int32,
], shapes=[[]])
stage = stager.put([x])
peek = stager.peek(p)
ret = stager.get()
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
for i in range(10):
sess.run(stage, feed_dict={x: i})
for i in range(10):
self.assertTrue(sess.run(peek, feed_dict={p: i}) == [i])
@test_util.run_deprecated_v1
def testSizeAndClear(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32, name='x')
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put({'x': x, 'v': v})
ret = stager.get()
size = stager.size()
clear = stager.clear()
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
self.assertEqual(sess.run(size), 1)
sess.run(stage, feed_dict={x: -1})
self.assertEqual(sess.run(size), 2)
sess.run(clear)
self.assertEqual(sess.run(size), 0)
@test_util.run_deprecated_v1
def testCapacity(self):
self.skipTest('b/123423516 this test is flaky on gpu.')
capacity = 3
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[
dtypes.int32,
], capacity=capacity, shapes=[[]])
stage = stager.put([x])
ret = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
queue = Queue.Queue()
n = 8
with self.session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
self.assertTrue(sess.run(ret) == [i])
# It should now be empty
self.assertTrue(sess.run(size) == 0)
@test_util.run_deprecated_v1
def testMemoryLimit(self):
memory_limit = 512 * 1024 # 512K
chunk = 200 * 1024 # 256K
capacity = memory_limit // chunk
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.uint8, name='x')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[
dtypes.uint8,
], memory_limit=memory_limit, shapes=[[]])
stage = stager.put([x])
ret = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
import numpy as np
queue = Queue.Queue()
n = 8
with self.session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: np.full(chunk, i, dtype=np.uint8)})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
self.assertTrue(np.all(sess.run(ret)[0] == i))
self.assertTrue(sess.run(size) == 0)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/stage_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StringToNumber op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
_ERROR_MESSAGE = "StringToNumberOp could not correctly convert string: "
class StringToNumberOpTest(test.TestCase):
def _test(self, tf_type, good_pairs, bad_pairs):
with self.cached_session():
# Build a small testing graph.
input_string = array_ops.placeholder(dtypes.string)
output = parsing_ops.string_to_number(
input_string, out_type=tf_type)
# Check all the good input/output pairs.
for instr, outnum in good_pairs:
result, = output.eval(feed_dict={input_string: [instr]})
self.assertAllClose([outnum], [result])
# Check that the bad inputs produce the right errors.
for instr, outstr in bad_pairs:
with self.assertRaisesOpError(outstr):
output.eval(feed_dict={input_string: [instr]})
@test_util.run_deprecated_v1
def testToFloat(self):
self._test(dtypes.float32,
[("0", 0), ("3", 3), ("-1", -1),
("1.12", 1.12), ("0xF", 15), (" -10.5", -10.5),
("3.40282e+38", 3.40282e+38),
# Greater than max value of float.
("3.40283e+38", float("INF")),
("-3.40283e+38", float("-INF")),
# Less than min value of float.
("NAN", float("NAN")),
("INF", float("INF"))],
[("10foobar", _ERROR_MESSAGE + "10foobar")])
@test_util.run_deprecated_v1
def testToDouble(self):
self._test(dtypes.float64,
[("0", 0), ("3", 3), ("-1", -1),
("1.12", 1.12), ("0xF", 15), (" -10.5", -10.5),
("3.40282e+38", 3.40282e+38),
# Greater than max value of float.
("3.40283e+38", 3.40283e+38),
# Less than min value of float.
("-3.40283e+38", -3.40283e+38),
("NAN", float("NAN")),
("INF", float("INF"))],
[("10foobar", _ERROR_MESSAGE + "10foobar")])
@test_util.run_deprecated_v1
def testToInt32(self):
self._test(dtypes.int32,
[("0", 0), ("3", 3), ("-1", -1),
(" -10", -10),
("-2147483648", -2147483648),
("2147483647", 2147483647)],
[ # Less than min value of int32.
("-2147483649", _ERROR_MESSAGE + "-2147483649"),
# Greater than max value of int32.
("2147483648", _ERROR_MESSAGE + "2147483648"),
("2.9", _ERROR_MESSAGE + "2.9"),
("10foobar", _ERROR_MESSAGE + "10foobar")])
@test_util.run_deprecated_v1
def testToInt64(self):
self._test(dtypes.int64,
[("0", 0), ("3", 3), ("-1", -1),
(" -10", -10),
("-2147483648", -2147483648),
("2147483647", 2147483647),
("-2147483649", -2147483649), # Less than min value of int32.
("2147483648", 2147483648)], # Greater than max value of int32.
[("2.9", _ERROR_MESSAGE + "2.9"),
("10foobar", _ERROR_MESSAGE + "10foobar")])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/string_to_number_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_join_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringJoinOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testStringJoin(self):
input0 = ["a", "b"]
input1 = "a"
input2 = [["b"], ["c"]]
with self.cached_session():
output = string_ops.string_join([input0, input1])
self.assertAllEqual(output.eval(), [b"aa", b"ba"])
output = string_ops.string_join([input0, input1], separator="--")
self.assertAllEqual(output.eval(), [b"a--a", b"b--a"])
output = string_ops.string_join([input0, input1, input0], separator="--")
self.assertAllEqual(output.eval(), [b"a--a--a", b"b--a--b"])
output = string_ops.string_join([input1] * 4, separator="!")
self.assertEqual(output.eval(), b"a!a!a!a")
output = string_ops.string_join([input2] * 2, separator="")
self.assertAllEqual(output.eval(), [[b"bb"], [b"cc"]])
with self.assertRaises(ValueError): # Inconsistent shapes
string_ops.string_join([input0, input2]).eval()
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/string_join_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
indx = i[:-1] + (indx,)
ref[indx] = updates[i]
_TF_OPS_TO_NUMPY = {
state_ops.batch_scatter_update: _NumpyUpdate,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False,
method=False):
np.random.seed(8)
with self.cached_session(use_gpu=False):
for indices_shape in (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
sparse_dim = len(indices_shape) - 1
indices = np.random.randint(
indices_shape[sparse_dim], size=indices_shape, dtype=itype)
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
old = _AsType(np.random.randn(*(indices_shape + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = variables.Variable(old)
ref.initializer.run()
if method:
ref.batch_scatter_update(ops.IndexedSlices(indices, updates))
else:
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
@test_util.run_deprecated_v1
def testVariableRankUpdate(self):
vtypes = [np.float32, np.float64]
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(
state_ops.batch_scatter_update, vtype, itype)
@test_util.run_deprecated_v1
def testBooleanScatterUpdate(self):
with self.session(use_gpu=False) as session:
var = variables.Variable([True, False])
update0 = state_ops.batch_scatter_update(var, [1], [True])
update1 = state_ops.batch_scatter_update(
var, constant_op.constant(
[0], dtype=dtypes.int64), [False])
var.initializer.run()
session.run([update0, update1])
self.assertAllEqual([False, True], self.evaluate(var))
@test_util.run_deprecated_v1
def testScatterOutOfRange(self):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.session(use_gpu=False):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
state_ops.batch_scatter_update(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = \[-1\] does not index into shape \[6\]'):
state_ops.batch_scatter_update(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = \[6\] does not index into '
r'shape \[6\]'):
state_ops.batch_scatter_update(ref, indices, updates).eval()
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/batch_scatter_ops_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for deterministic cuDNN functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
# Notes:
#
# Deterministic cuDNN operation is selected by setting either of the two
# environment variables TF_CUDNN_DETERMINISTIC or TF_DETERMINISTIC_OPS to 'true'
# or '1' while also not setting the environment variable TF_CUDNN_USE_AUTOTUNE
# to 'false' or '0'.
#
# Where both deterministic and non-deterministic cuDNN algorithms are available,
# selecting determinitic operation will lead to only the deterministic
# algorithms being chosen. Additionally, selecting deterministic operation will
# result in a deterministic, or reproducible, selection of algorithms (for any
# given layer configuration) for each of the forward and the two backward paths.
#
# These tests intend to confirm that deterministic algorithms are chosen (for
# the back-prop paths) when desterministic operation is selected. The tested
# configurations were first confirmed to produce non-deterministic results when
# the above-mentioned environment variables are not set.
#
# Even though selecting determinitic operation should ensure that the same
# algorithms, for a given layer configuration, are always used (i.e. that
# algorithm selection is deterministic / reproducible), this is not tested.
# TODO(duncanriach): Add test for deterministic cuDNN max-pooling
LayerShapeNHWC = collections.namedtuple('LayerShapeNHWC',
'batch, height, width, channels')
FilterShape2D = collections.namedtuple(
'FilterShape2D', 'height, width, in_channels, out_channels')
LayerShapeNCDHW = collections.namedtuple('LayerShapeNCDHW',
'batch, channels, depth, height, width')
FilterShape3D = collections.namedtuple(
'FilterShape3D', 'depth, height, width, in_channels, out_channels')
class ConvolutionTest(test.TestCase):
def _random_data_op(self, shape):
# np.random.random_sample can properly interpret either tf.TensorShape or
# namedtuple as a list.
return constant_op.constant(
2 * np.random.random_sample(shape) - 1, dtype=dtypes.float32)
def _random_out_op(self, in_shape, filter_shape, strides, padding):
# Choosing not to use array_op.zeros() to prevent possible removal by
# optimization
in_op = self._random_data_op(in_shape)
filter_op = self._random_data_op(filter_shape)
# Use the forward op's shape-inference
conv_op = nn_ops.conv2d(
in_op, filter_op, strides=strides, padding=padding)
out_shape = conv_op.get_shape()
out_op = self._random_data_op(out_shape)
return out_op
def _assert_reproducible(self, operation):
with self.cached_session(force_gpu=True):
result_1 = self.evaluate(operation)
result_2 = self.evaluate(operation)
self.assertAllEqual(result_1, result_2)
# The default forward algorithm choice, when using cuDNN 7, does not support
# the following layer configuration. This test case intends to confirm that
# an alternative algorithm is selected. Note that, in cuDNN 7, all forward
# algorithms are determnistic.
@test_util.run_cuda_only
def testForward(self):
np.random.seed(3)
in_shape = LayerShapeNCDHW(batch=2, channels=3, depth=5, height=7, width=6)
filter_shape = FilterShape3D(depth=3, height=3, width=3, in_channels=3,
out_channels=2)
in_op = self._random_data_op(in_shape)
filter_op = self._random_data_op(filter_shape)
strides = [1, 1, 1, 1, 1]
padding = 'VALID'
dilations = [1, 1, 2, 2, 2]
out_op = nn_ops.conv3d(in_op, filter_op, strides=strides, padding=padding,
data_format='NCDHW', dilations=dilations)
self._assert_reproducible(out_op)
@test_util.run_cuda_only
def testBackwardFilterGradient(self):
np.random.seed(1)
in_shape = LayerShapeNHWC(batch=8, height=128, width=128, channels=8)
filter_shape = FilterShape2D(height=3, width=3, in_channels=8,
out_channels=8)
in_op = self._random_data_op(in_shape)
strides = [1, 1, 1, 1]
padding = 'SAME'
out_op = self._random_out_op(in_shape, filter_shape, strides, padding)
filter_gradient_op = nn_ops.conv2d_backprop_filter(
in_op, filter_shape, out_op, strides=strides, padding=padding)
self._assert_reproducible(filter_gradient_op)
@test_util.run_cuda_only
def testBackwardInputGradient(self):
np.random.seed(2)
in_shape = LayerShapeNHWC(batch=8, height=32, width=32, channels=8)
filter_shape = FilterShape2D(height=7, width=7, in_channels=8,
out_channels=128)
filter_op = self._random_data_op(filter_shape)
strides = [1, 1, 1, 1]
padding = 'SAME'
out_op = self._random_out_op(in_shape, filter_shape, strides, padding)
input_gradient_op = nn_ops.conv2d_backprop_input(
in_shape, filter_op, out_op, strides=strides, padding=padding)
self._assert_reproducible(input_gradient_op)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/cudnn_deterministic_base.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class Conv1DTest(test.TestCase):
def testBasic(self):
"""Test that argument passing to conv1d is handled properly."""
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for dtype in [dtypes.float16, dtypes.float32] + optional_float64:
x = constant_op.constant([1, 2, 3, 4], dtype=dtype)
x = array_ops.expand_dims(x, 0) # Add batch dimension
x = array_ops.expand_dims(x, 2) # And depth dimension
filters = constant_op.constant([2, 1], dtype=dtype)
filters = array_ops.expand_dims(filters, 1) # in_channels
filters = array_ops.expand_dims(filters, 2) # out_channels
# Filters is 2x1x1
for stride in [1, 2]:
with self.cached_session(use_gpu=test.is_gpu_available()):
c = nn_ops.conv1d(x, filters, stride, padding="VALID")
reduced = array_ops.squeeze(c)
output = self.evaluate(reduced)
if stride == 1:
self.assertEqual(len(output), 3)
self.assertAllClose(output,
[2 * 1 + 1 * 2, 2 * 2 + 1 * 3, 2 * 3 + 1 * 4])
else:
self.assertEqual(len(output), 2)
self.assertAllClose(output, [2 * 1 + 1 * 2, 2 * 3 + 1 * 4])
def testConv1DTranspose(self):
with self.cached_session():
stride = 2
# Input, output: [batch, width, depth]
x_shape = [2, 4, 3]
y_shape = [2, 9, 2]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=stride, padding="VALID")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[1]):
for w in xrange(pad, y_shape[1] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
w_in = w % stride == 0 and w > pad and w < y_shape[1] - 1 - pad
if w_in:
target += 3.0
cache_values[n, w, k] = target
# copy values in the border
cache_values[n, 0, k] = cache_values[n, 1, k]
cache_values[n, -1, k] = cache_values[n, -2, k]
self.assertAllClose(cache_values, value)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/conv1d_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Lu."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class LuOpTest(test.TestCase):
@property
def float_types(self):
return set((np.float64, np.float32, np.complex64, np.complex128))
def _verifyLuBase(self, x, lower, upper, perm, verification,
output_idx_type):
lower_np, upper_np, perm_np, verification_np = self.evaluate(
[lower, upper, perm, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, lower)
self.assertShapeEqual(x, upper)
self.assertAllEqual(x.shape[:-1], perm.shape.as_list())
# Check dtypes are as expected.
self.assertEqual(x.dtype, lower_np.dtype)
self.assertEqual(x.dtype, upper_np.dtype)
self.assertEqual(output_idx_type.as_numpy_dtype, perm_np.dtype)
# Check that the permutation is valid.
if perm_np.shape[-1] > 0:
perm_reshaped = np.reshape(perm_np, (-1, perm_np.shape[-1]))
for perm_vector in perm_reshaped:
self.assertAllClose(np.arange(len(perm_vector)), np.sort(perm_vector))
def _verifyLu(self, x, output_idx_type=dtypes.int64):
# Verify that Px = LU.
lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)
# Prepare the lower factor of shape num_rows x num_rows
lu_shape = np.array(lu.shape.as_list())
batch_shape = lu_shape[:-2]
num_rows = lu_shape[-2]
num_cols = lu_shape[-1]
lower = array_ops.matrix_band_part(lu, -1, 0)
if num_rows > num_cols:
eye = linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=lower.dtype)
lower = array_ops.concat([lower, eye[..., num_cols:]], axis=-1)
elif num_rows < num_cols:
lower = lower[..., :num_rows]
# Fill the diagonal with ones.
ones_diag = array_ops.ones(
np.append(batch_shape, num_rows), dtype=lower.dtype)
lower = array_ops.matrix_set_diag(lower, ones_diag)
# Prepare the upper factor.
upper = array_ops.matrix_band_part(lu, 0, -1)
with ops.device("/cpu:0"):
verification = math_ops.matmul(lower, upper)
# Permute the rows of product of the Cholesky factors.
if num_rows > 0:
# Reshape the product of the triangular factors and permutation indices
# to a single batch dimension. This makes it easy to apply
# invert_permutation and gather_nd ops.
perm_reshaped = array_ops.reshape(perm, [-1, num_rows])
verification_reshaped = array_ops.reshape(verification,
[-1, num_rows, num_cols])
# Invert the permutation in each batch.
inv_perm_reshaped = map_fn.map_fn(array_ops.invert_permutation,
perm_reshaped)
batch_size = perm_reshaped.shape.as_list()[0]
# Prepare the batch indices with the same shape as the permutation.
# The corresponding batch index is paired with each of the `num_rows`
# permutation indices.
batch_indices = math_ops.cast(
array_ops.broadcast_to(
math_ops.range(batch_size)[:, None], perm_reshaped.shape),
dtype=output_idx_type)
permuted_verification_reshaped = array_ops.gather_nd(
verification_reshaped,
array_ops.stack([batch_indices, inv_perm_reshaped], axis=-1))
# Reshape the verification matrix back to the original shape.
verification = array_ops.reshape(permuted_verification_reshaped,
lu_shape)
self._verifyLuBase(x, lower, upper, perm, verification,
output_idx_type)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [10., 0., 5.]])
for dtype in (np.float32, np.float64):
for output_idx_type in (dtypes.int32, dtypes.int64):
self._verifyLu(data.astype(dtype), output_idx_type=output_idx_type)
if not test.is_built_with_rocm():
# ROCm does not support BLAS operations for complex types
for dtype in (np.complex64, np.complex128):
for output_idx_type in (dtypes.int32, dtypes.int64):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyLu(complex_data, output_idx_type=output_idx_type)
def testPivoting(self):
# This matrix triggers partial pivoting because the first diagonal entry
# is small.
data = np.array([[1e-9, 1., 0.], [1., 0., 0], [0., 1., 5]])
self._verifyLu(data.astype(np.float32))
for dtype in (np.float32, np.float64):
self._verifyLu(data.astype(dtype))
_, p = linalg_ops.lu(data)
p_val = self.evaluate([p])
# Make sure p_val is not the identity permutation.
self.assertNotAllClose(np.arange(3), p_val)
if not test.is_built_with_rocm():
# ROCm does not support BLAS operations for complex types
for dtype in (np.complex64, np.complex128):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyLu(complex_data)
_, p = linalg_ops.lu(data)
p_val = self.evaluate([p])
# Make sure p_val is not the identity permutation.
self.assertNotAllClose(np.arange(3), p_val)
def testInvalidMatrix(self):
# LU factorization gives an error when the input is singular.
# Note: A singular matrix may return without error but it won't be a valid
# factorization.
for dtype in self.float_types:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
linalg_ops.lu(
np.array([[1., 2., 3.], [2., 4., 6.], [2., 3., 4.]],
dtype=dtype)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
linalg_ops.lu(
np.array([[[1., 2., 3.], [2., 4., 6.], [1., 2., 3.]],
[[1., 2., 3.], [3., 4., 5.], [5., 6., 7.]]],
dtype=dtype)))
def testBatch(self):
simple_array = np.array([[[1., -1.], [2., 5.]]]) # shape (1, 2, 2)
self._verifyLu(simple_array)
self._verifyLu(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyLu(np.vstack((odd_sized_array, odd_sized_array)))
batch_size = 200
# Generate random matrices.
np.random.seed(42)
matrices = np.random.rand(batch_size, 5, 5)
self._verifyLu(matrices)
if not test.is_built_with_rocm():
# ROCm does not support BLAS operations for complex types
# Generate random complex valued matrices.
np.random.seed(52)
matrices = np.random.rand(batch_size, 5,
5) + 1j * np.random.rand(batch_size, 5, 5)
self._verifyLu(matrices)
def testLargeMatrix(self):
# Generate random matrices.
n = 500
np.random.seed(64)
data = np.random.rand(n, n)
self._verifyLu(data)
if not test.is_built_with_rocm():
# ROCm does not support BLAS operations for complex types
# Generate random complex valued matrices.
np.random.seed(129)
data = np.random.rand(n, n) + 1j * np.random.rand(n, n)
self._verifyLu(data)
@test_util.run_v1_only("b/120545219")
def testEmpty(self):
self._verifyLu(np.empty([0, 2, 2]))
self._verifyLu(np.empty([2, 0, 0]))
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
lu1, p1 = linalg_ops.lu(matrix1)
lu2, p2 = linalg_ops.lu(matrix2)
lu1_val, p1_val, lu2_val, p2_val = self.evaluate([lu1, p1, lu2, p2])
self.assertAllEqual(lu1_val, lu2_val)
self.assertAllEqual(p1_val, p2_val)
class LuBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(4096, 4096),
(513, 2, 2),
(513, 8, 8),
(513, 256, 256),
(4, 513, 2, 2),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (2.0 * n) + np.diag(
np.ones(n).astype(np.float32))
return np.tile(matrix, batch_shape + (1, 1))
def benchmarkLuOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
lu, p = linalg_ops.lu(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(lu, p),
min_iters=25,
name="lu_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
lu, p = linalg_ops.lu(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(lu, p),
min_iters=25,
name="lu_gpu_{shape}".format(shape=shape))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/lu_op_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.Einsum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class EinsumOpTest(test.TestCase):
def _check(self, s, *input_shapes, **kwargs):
dtype = kwargs.pop('dtype', np.float32)
r = np.random.RandomState(0)
inputs = []
for shape in input_shapes:
arr = np.array(r.randn(*shape)).astype(dtype)
if dtype == np.complex64 or dtype == np.complex128:
arr += 1j * np.array(r.randn(*shape)).astype(dtype)
inputs.append(arr)
input_tensors = [constant_op.constant(x, shape=x.shape) for x in inputs]
a = np.einsum(s, *inputs)
with ops.device("/cpu:0"):
b = self.evaluate(gen_linalg_ops.einsum(input_tensors, s))
self.assertAllClose(a, b, atol=1e-4, rtol=1e-4)
def testUnary(self):
self._check('->', ())
self._check('aa->', (3, 3))
self._check('aa->a', (3, 3))
self._check('aaa->', (3, 3, 3))
self._check('aaa->a', (3, 3, 3))
self._check('aab->a', (3, 3, 4))
self._check('ab->', (3, 3))
self._check('ab->ab', (3, 3))
self._check('abc->b', (3, 4, 5))
self._check('abc->ca', (3, 4, 5))
self._check('abc->cab', (3, 4, 5))
self._check('aabcc->a', (3, 3, 5, 4, 4))
self._check('aabcc->ac', (3, 3, 5, 4, 4))
self._check('aabcd->ad', (3, 3, 5, 4, 4))
def testUnaryEllipsis(self):
self._check('...->...', ())
self._check('...->', ())
self._check('->...', ())
# Tests from dask
self._check('a...a->a...', (2, 2))
self._check('a...a->', (2, 2))
self._check('a...a->...', (2, 5, 1, 2))
self._check('a...a->a...', (2, 1, 2))
self._check('a...a->a...', (2, 3, 4, 5, 2))
self._check('...ijk->...ki', (3, 4, 5))
self._check('...ijk->...ki', (1, 3, 4, 5))
self._check('...ijk->...ki', (2, 2, 3, 4, 5))
# Repeated indices.
self._check('i...ii->...i', (3, 2, 3, 3))
def testBinary(self):
self._check(',->', (), ())
self._check('a,a->', (3,), (3,))
self._check('a,a->a', (3,), (3,))
self._check('ba,b->', (3, 2), (3,))
self._check('ab,b->a', (3, 4), (4,))
self._check('ab,ab->', (3, 4), (3, 4))
self._check('nij,jk->nik', (5, 2, 3), (3, 4))
self._check('abc,bad->abcd', (1, 2, 3), (2, 1, 4))
# Repeated indices.
self._check('ijj,k->ik', (2, 3, 3), (4,))
self._check('aba,a->b', (3, 4, 3), (3,))
# From https://github.com/dask/dask/pull/3412#discussion_r182413444
self._check('aab,bc->ac', (2, 2, 3), (3, 4))
self._check('aab,bcc->ac', (2, 2, 3), (3, 4, 4))
# Based on https://github.com/google/jax/issues/37#issuecomment-448572187
self._check('sa,shb->shab', (2, 1), (2, 3, 4))
def testBroadcasting(self):
# Batch matmul without broadcasting.
self._check('...ij,...jk->...ik', (5, 1, 2, 3), (5, 1, 3, 4))
# Batch matmul with broadcasting.
self._check('...ij,...jk->...ik', (1, 2, 3), (3, 5))
self._check('...ij,...jk->...ik', (2, 3), (1, 3, 5))
self._check('...ij,...jk->...ik', (5, 2, 3), (3, 5))
self._check('...ij,...jk->...ik', (2, 3), (5, 3, 5))
self._check('...ij,...jk->...ik', (3, 1, 2, 3), (1, 1, 7, 3, 5))
self._check('i...j,j...k->...ik', (2, 1, 3, 1, 3), (3, 1, 7, 5))
# Broadcasting with repeated indices.
self._check('ij,jk...k->i...', (3, 2), (2, 4, 1, 4))
self._check('ij,jk...k->...i', (3, 2), (2, 4, 5, 4))
self._check('ijj,jk...k->i...', (3, 2, 2), (2, 4, 1, 4))
self._check('i...jj,jk...k->i...', (3, 3, 1, 2, 2), (2, 4, 1, 5, 4))
# Following 2 from # https://stackoverflow.com/a/19203475/1611416
self._check('...abc,...abcd->...d', (1, 1, 2, 3, 4), (5, 2, 3, 4, 6))
self._check('ab...,b->ab...', (2, 3, 1, 1, 5), (3,))
def testDtypes(self):
for dtype in [np.float64, np.float32, np.complex64, np.complex128]:
self._check('ij,jk->ik', (2, 2), (2, 2), dtype=dtype)
self._check('ji,jk->ik', (2, 2), (2, 2), dtype=dtype)
self._check('ji,kj->ik', (2, 2), (2, 2), dtype=dtype)
self._check('ij,jk->ki', (2, 2), (2, 2), dtype=dtype)
self._check('ji,kj->ki', (2, 2), (2, 2), dtype=dtype)
@test_util.run_in_graph_and_eager_modes
def testInvalid(self):
r = np.random.RandomState(0)
cases = [
# incorrect rank.
('ij,jk->ik', r.randn(1, 2, 3), r.randn(3, 4)),
('...ij,jk->ik', r.randn(3), r.randn(3, 4)),
# inconsistent dimensions.
('ij,jk->ik', r.randn(2, 3), r.randn(4, 4)),
# broadcasting is invalid
('...ij,...jk->...ik', r.randn(5, 2, 3), r.randn(7, 3, 4)),
# output should have ellipsis when broadcasting shape is
# non-empty.
('...ij,...jk->ik', r.randn(2, 2, 3), r.randn(3, 4)),
]
for args in cases:
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
_ = self.evaluate(gen_linalg_ops.einsum(args[1:], args[0]))
placeholders = [
array_ops.placeholder_with_default(x, shape=None) for x in args[1:]
]
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
_ = self.evaluate(gen_linalg_ops.einsum(placeholders, args[0]))
@test_util.run_in_graph_and_eager_modes
def testPlaceholder(self):
def check(equation, *input_and_placeholder_shapes):
r = np.random.RandomState(0)
inputs = []
input_placeholders = []
for actual_shape, placeholder_shape in input_and_placeholder_shapes:
input_np = np.array(r.randn(*actual_shape))
inputs.append(input_np)
input_placeholders.append(
array_ops.placeholder_with_default(input_np, placeholder_shape))
a = np.einsum(equation, *inputs)
b = self.evaluate(gen_linalg_ops.einsum(input_placeholders, equation))
self.assertAllClose(a, b, atol=1e-4, rtol=1e-4)
check('bijl,bjkm->bik', ((9, 2, 3, 5), (None, None, None, 5)),
((9, 3, 4, 7), (None, None, 4, None)))
check('bijl,bjkm->bik', ((9, 2, 3, 5), None), ((9, 3, 4, 7), None))
check('...ij,...->...i', ((4, 3, 1, 2), (None, 3, None, 2)),
((4, 3), (None, 3)))
check('...ij,...jk->...ik', ((3, 1, 2, 3), None), ((1, 7, 3, 4), None))
def testOutputRepeatedLabels(self):
# This is the reverse operation of repeated input labels, to be used for
# computing symbolic gradients of einsum.
r = np.random.RandomState(0)
a = r.randn(2, 2)
s = 'a->aa'
diag_a = np.diag(np.diag(a))
b = self.evaluate(gen_linalg_ops.einsum([np.diag(a)], s))
self.assertAllClose(diag_a, b, atol=1e-4, rtol=1e-4)
class EinsumBenchmark(test.Benchmark):
cases = [
# Unary cases.
['ijk->i', 100],
['ijk->kji', 100],
# Regular matmul or batch matmul.
['ij,jk->ik', 1000],
['ji,kj->ik', 1000],
['ab,ab->', 100],
['ab,ba->', 100],
['abc,abc->', 100],
['abc,bac->', 100],
['abc,cba->', 100],
['bij,bjk->bik', 100],
['bji,bjk->bki', 100],
['ikl,kji->kl', 100],
['klj,lki->ij', 100],
['ijk,ilj->kli', 100],
['kij,mkb->ijmb', 100],
['abcd,ad->bc', 40],
# Larger binary contractions.
['ijk,jklm->il', 40],
['efabc,eabcd->efd', 30],
['fabec,abcde->fde', 30],
['efabc,edabc->efd', 30],
['eadbf,dfebc->ecfad', 30],
['abcdef,bcdfg->abcdeg', 30],
]
def benchmarkEinsum(self):
for equation, dim in self.cases:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device('/cpu:0'):
r = np.random.RandomState(0)
input_subscripts = equation.split('->')[0].split(',')
input_vars = []
for subscript in input_subscripts:
input_shape = (dim,) * len(subscript)
input_vars.append(
variables.Variable(np.array(r.randn(*input_shape), np.float32)))
variables.global_variables_initializer().run()
# Call einsum_v1.
self.run_op_benchmark(
sess,
special_math_ops.einsum(equation, *input_vars),
min_iters=50,
name='einsum_v1_cpu_({})_{}'.format(equation, dim))
# Call gen_linalg_ops.einsum.
self.run_op_benchmark(
sess,
gen_linalg_ops.einsum(input_vars, equation),
min_iters=50,
name='einsum_v2_cpu_({})_{}'.format(equation, dim))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/einsum_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyAdd(ref, indices, updates):
# Since numpy advanced assignment does not support repeated indices,
# we run a simple loop to perform scatter_add.
for i, indx in np.ndenumerate(indices):
ref[indx] += updates[i]
def _NumpyAddScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] += update
def _NumpySub(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] -= updates[i]
def _NumpySubScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] -= update
def _NumpyMul(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] *= updates[i]
def _NumpyMulScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] *= update
def _NumpyDiv(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] /= updates[i]
def _NumpyDivScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] /= update
def _NumpyMin(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], updates[i])
def _NumpyMinScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], update)
def _NumpyMax(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], updates[i])
def _NumpyMaxScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], update)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = updates[i]
def _NumpyUpdateScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = update
_TF_OPS_TO_NUMPY = {
state_ops.scatter_update: _NumpyUpdate,
state_ops.scatter_add: _NumpyAdd,
state_ops.scatter_sub: _NumpySub,
state_ops.scatter_mul: _NumpyMul,
state_ops.scatter_div: _NumpyDiv,
state_ops.scatter_min: _NumpyMin,
state_ops.scatter_max: _NumpyMax,
}
_TF_OPS_TO_NUMPY_SCALAR = {
state_ops.scatter_update: _NumpyUpdateScalar,
state_ops.scatter_add: _NumpyAddScalar,
state_ops.scatter_sub: _NumpySubScalar,
state_ops.scatter_mul: _NumpyMulScalar,
state_ops.scatter_div: _NumpyDivScalar,
state_ops.scatter_min: _NumpyMinScalar,
state_ops.scatter_max: _NumpyMaxScalar,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False):
np.random.seed(8)
with self.cached_session(use_gpu=True):
for indices_shape in (), (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
size = np.prod(indices_shape, dtype=itype)
first_dim = 3 * size
indices = np.arange(first_dim)
np.random.shuffle(indices)
indices = indices[:size]
if size > 1 and repeat_indices:
# Add some random repeats.
indices = indices[:size // 2]
for _ in range(size - size // 2):
# Randomly append some repeats.
indices = np.append(indices,
indices[np.random.randint(size // 2)])
np.random.shuffle(indices)
indices = indices.reshape(indices_shape)
if updates_are_scalar:
updates = _AsType(np.random.randn(), vtype)
else:
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
# Clips small values to avoid division by zero.
def clip_small_values(x):
threshold = 1e-4
sign = np.sign(x)
if isinstance(x, np.int32):
threshold = 1
sign = np.random.choice([-1, 1])
return threshold * sign if np.abs(x) < threshold else x
updates = np.vectorize(clip_small_values)(updates)
old = _AsType(np.random.randn(*((first_dim,) + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
if updates_are_scalar:
np_scatter = _TF_OPS_TO_NUMPY_SCALAR[tf_scatter]
else:
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = variables.Variable(old)
self.evaluate(ref.initializer)
self.evaluate(tf_scatter(ref, indices, updates))
self.assertAllClose(self.evaluate(ref), new)
def _VariableRankTests(self,
tf_scatter,
repeat_indices=False,
updates_are_scalar=False):
vtypes = [np.float32, np.float64]
if tf_scatter != state_ops.scatter_div:
vtypes.append(np.int32)
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(tf_scatter, vtype, itype, repeat_indices,
updates_are_scalar)
def testVariableRankUpdate(self):
self._VariableRankTests(state_ops.scatter_update, False)
def testVariableRankAdd(self):
self._VariableRankTests(state_ops.scatter_add, False)
def testVariableRankSub(self):
self._VariableRankTests(state_ops.scatter_sub, False)
def testVariableRankMul(self):
self._VariableRankTests(state_ops.scatter_mul, False)
def testVariableRankDiv(self):
self._VariableRankTests(state_ops.scatter_div, False)
def testVariableRankMin(self):
self._VariableRankTests(state_ops.scatter_min, False)
def testVariableRankMax(self):
self._VariableRankTests(state_ops.scatter_max, False)
def testRepeatIndicesAdd(self):
self._VariableRankTests(state_ops.scatter_add, True)
def testRepeatIndicesSub(self):
self._VariableRankTests(state_ops.scatter_sub, True)
def testRepeatIndicesMul(self):
self._VariableRankTests(state_ops.scatter_mul, True)
def testRepeatIndicesDiv(self):
self._VariableRankTests(state_ops.scatter_div, True)
def testRepeatIndicesMin(self):
self._VariableRankTests(state_ops.scatter_min, True)
def testRepeatIndicesMax(self):
self._VariableRankTests(state_ops.scatter_max, True)
def testVariableRankUpdateScalar(self):
self._VariableRankTests(state_ops.scatter_update, False, True)
def testVariableRankAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, False, True)
def testVariableRankSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, False, True)
def testVariableRankMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, False, True)
def testVariableRankDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, False, True)
def testVariableRankMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, False, True)
def testVariableRankMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, False, True)
def testRepeatIndicesAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, True, True)
def testRepeatIndicesSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, True, True)
def testRepeatIndicesMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, True, True)
def testRepeatIndicesDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, True, True)
def testRepeatIndicesMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, True, True)
def testRepeatIndicesMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, True, True)
def testBooleanScatterUpdate(self):
if not test.is_gpu_available():
with self.session(use_gpu=False):
var = variables.Variable([True, False])
update0 = state_ops.scatter_update(var, 1, True)
update1 = state_ops.scatter_update(
var, constant_op.constant(
0, dtype=dtypes.int64), False)
self.evaluate(var.initializer)
self.evaluate([update0, update1])
self.assertAllEqual([False, True], self.evaluate(var))
def testScatterOutOfRangeCpu(self):
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
if not test.is_gpu_available():
with self.session(use_gpu=False):
ref = variables.Variable(params)
self.evaluate(ref.initializer)
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
self.evaluate(op(ref, indices, updates))
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = -1 is not in \[0, 6\)'):
self.evaluate(op(ref, indices, updates))
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = 6 is not in \[0, 6\)'):
self.evaluate(op(ref, indices, updates))
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if test.is_gpu_available():
return
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with test_util.force_gpu():
ref = variables.Variable(params)
self.evaluate(ref.initializer)
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
self.evaluate(op(ref, indices, updates))
# Indicies out of range should not fail.
indices = np.array([-1, 0, 5])
self.evaluate(op(ref, indices, updates))
indices = np.array([2, 0, 6])
self.evaluate(op(ref, indices, updates))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/scatter_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for new version of accumulate_n op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.control_flow_ops import while_loop as while_loop_v1
from tensorflow.python.platform import googletest
class AccumulateNV2Test(test_util.TensorFlowTestCase):
"""Tests of the new, differentiable version of accumulate_n."""
@test_util.run_deprecated_v1
def testFloat(self):
np.random.seed(12345)
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
tf_x = ops.convert_n_to_tensor(x)
with self.session(use_gpu=True):
self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllClose(x[0] * 5,
math_ops.accumulate_n([tf_x[0]] * 5).eval())
@test_util.run_deprecated_v1
def testInt(self):
np.random.seed(54321)
x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
tf_x = ops.convert_n_to_tensor(x)
with self.session(use_gpu=True):
self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllEqual(x[0] * 6,
math_ops.accumulate_n([tf_x[0]] * 6).eval())
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.session(use_gpu=True):
x0 = array_ops.placeholder(dtype=dtypes_lib.int32, shape=[None])
acc = math_ops.accumulate_n([x0, x0], shape=[None])
self.assertAllEqual([2, 4], acc.eval(feed_dict={x0: [1, 2]}))
@test_util.run_deprecated_v1
def testGrad(self):
np.random.seed(42)
for num_inputs in range(1, 10):
with self.cached_session(use_gpu=True) as sess:
input_vars = [
variables.Variable(10.0 * np.random.random())
for _ in range(0, num_inputs)
]
accum_n = math_ops.accumulate_n(input_vars)
self.evaluate(variables.global_variables_initializer())
accum_n_grad = gradients.gradients(accum_n, input_vars)
self.assertAllEqual(
np.repeat(1.0, num_inputs), # d/dx (x + y + ...) = 1
[g.eval() for g in accum_n_grad])
# The tests below used to be in a separate class under cwise_ops_test.py,
# which did not run in the default test target.
# Putting them here so that everything that exercises AccumulateNV2 is in
# one place and the default build runs all unit tests.
def testSimple(self):
with self.cached_session():
random_arrays = [
np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
]
random_tensors = [
ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
for x in random_arrays
]
tf_val = math_ops.accumulate_n(random_tensors)
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, self.evaluate(tf_val))
# Test that AccumulateNV2 rewrite correctly add edges necessary to propagate
# while loop execution frame to all nodes.
def testAccumulateInsideWhileLoop(self):
with self.cached_session():
random_arrays = [
np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
]
random_tensors = [
ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
for x in random_arrays
]
def cond_fn(i, acc, tensors):
del acc, tensors # unused
return i < 1 # do just one iteration
def body_fn(i, acc, tensors):
return i + 1, acc + math_ops.accumulate_n(tensors), tensors
zeros = np.zeros((16, 16, 16, 16)).astype(np.float32)
_, tf_val, _ = while_loop_v1(cond_fn, body_fn, (0, zeros, random_tensors))
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, self.evaluate(tf_val))
def testZeroArgs(self):
with self.cached_session():
with self.assertRaises(ValueError):
tf_val = math_ops.accumulate_n([])
self.evaluate(tf_val)
def testWrongShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
a = variables.Variable(0.2)
b = variables.Variable(0.1)
math_ops.accumulate_n([a, b], shape=[2, 2]) # Should be shape=[]
def testIncompatibleShapes(self):
with self.cached_session():
with self.assertRaises(ValueError):
a = variables.Variable(np.array([0.1, 0.2]))
b = variables.Variable(np.array([[0.3], [0.4]]))
math_ops.accumulate_n([a, b])
def testWrongType(self):
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
b = variables.Variable(0.1, dtype=np.float32)
math_ops.accumulate_n([a, b], tensor_dtype=np.int32)
def testWrongTypeOneInput(self):
# Scenario that used to trigger a bug, even when testWrongType() worked
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
math_ops.accumulate_n([a], tensor_dtype=np.int32)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/accumulate_n_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for coefficient-wise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x**y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
_LT = lambda x, y: x < y
_LE = lambda x, y: x <= y
_GT = lambda x, y: x > y
_GE = lambda x, y: x >= y
_AND = lambda x, y: x & y
_OR = lambda x, y: x | y
_XOR = lambda x, y: x ^ y
_INV = lambda x: ~x
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
def _default_tolerance(dtype):
"""Returns a sensible default tolerance for comparing results of a given type.
Args:
dtype: A datatype.
"""
if dtype == np.float16:
return 5e-3
elif dtype in (np.float32, np.complex64):
return 1e-3
elif dtype in (np.float64, np.complex128):
return 1e-5
else:
return None # Fail fast for unexpected types
class ComparisonOpTest(test.TestCase):
def _compareScalar(self, func, x, y, dtype):
with test_util.use_gpu():
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = self.evaluate(out)
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)
self.assertEqual(
self._compareScalar(math_ops.less_equal, x, y, t), x <= y)
self.assertEqual(
self._compareScalar(math_ops.greater, x, y, t), x > y)
self.assertEqual(
self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(xt, yt, np.less, math_ops.less)
self._compare(xt, yt, np.less_equal, math_ops.less_equal)
self._compare(xt, yt, np.greater, math_ops.greater)
self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
# Complex types do not support ordering but do support equality tests.
for t in [np.complex64, np.complex128]:
xt = x.astype(t)
xt -= 1j * xt
yt = y.astype(t)
yt -= 1j * yt
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
if dtype in (np.complex64, np.complex128):
x -= 1j * x
y -= 1j * y
self._compare(x, y, np_func, tf_func)
self._compare(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func, include_complex=False):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
if include_complex:
dtypes.extend([np.complex64, np.complex128])
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, math_ops.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, math_ops.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, math_ops.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True)
def testBCastNotEqual(self):
self._testBCastByFunc(
np.not_equal, math_ops.not_equal, include_complex=True)
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [
math_ops.less, math_ops.less_equal, math_ops.greater,
math_ops.greater_equal, math_ops.equal, math_ops.not_equal
]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesRegexp(
(ValueError, errors.InvalidArgumentError),
"Incompatible shapes|Dimensions must be equal"):
f(x.astype(t), y.astype(t))
class LogicalOpTest(test.TestCase):
def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
np_ans = np_func(x, y)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with test_util.device(use_gpu=use_gpu):
out = math_ops.logical_not(ops.convert_to_tensor(x))
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def testScalar(self):
data = [np.array([True]), np.array([False])]
for use_gpu in [True, False]:
for x in data:
self._not(x, use_gpu)
for x in data:
for y in data:
self._compareBinary(x, y, np.logical_and, math_ops.logical_and,
use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor,
use_gpu)
def testTensor(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for use_gpu in [True, False]:
self._not(x, use_gpu)
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)
def testBCast(self):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
for (xs, ys) in shapes:
x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)
y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)
for use_gpu in [True, False]:
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)
for f in [math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x, y)
@test_util.run_deprecated_v1
def testUsingAsPythonValueFails(self):
# Ensure that we raise an error when the user attempts to treat a
# `Tensor` as a Python `bool`.
b = constant_op.constant(False)
with self.assertRaises(TypeError):
if b:
pass
x = constant_op.constant(3)
y = constant_op.constant(4)
with self.assertRaises(TypeError):
if x > y:
pass
z = constant_op.constant(7)
# The chained comparison should fail because Python computes `x <
# y` and short-circuits the comparison with `z` if it is `False`.
with self.assertRaises(TypeError):
_ = x < y < z
class SelectOpTest(test.TestCase):
def _compare(self, fn, c, x, y, use_gpu):
np_ans = np.where(c, x, y)
with test_util.device(use_gpu=use_gpu):
out = fn(c, x, y)
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self,
fn,
c,
x,
y,
numeric_gradient_type=None,
x_init_value=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = fn(c, inx, iny)
s = list(np.shape(c))
if x_init_value is None:
x_init_value = x
if x.shape != y.shape:
x_init_value = np.broadcast_to(y, x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x_init_value)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = fn(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, outf, s, x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, fn, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = fn(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=x, delta=1.0)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = fn(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, s, outf, s, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testScalar(self, fn):
c = True
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testScalar(self):
self._testScalar(array_ops.where)
self._testScalar(array_ops.where_v2)
def _testScalarBroadcast(self, fn, c, x, y):
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testScalarBroadcast(self):
c = True
# where_v2 only
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 2) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 2) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(3, 2) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
def _testBasic(self, fn):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testBasic(self):
self._testBasic(array_ops.where)
self._testBasic(array_ops.where_v2)
def _testBasicBroadcast(self, fn, c, x, y):
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testBasicBroadcast(self):
c0 = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
c1 = np.random.randint(0, 2, 2).astype(np.bool).reshape(1, 1, 2)
c2 = np.random.randint(0, 2, 3).astype(np.bool).reshape(1, 3, 1)
c3 = np.random.randint(0, 2, 1).astype(np.bool).reshape(1, 1, 1)
for c in [c0, c1, c2, c3]:
# where_v2 only
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 2) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 2) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(3, 2) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
def _testGradients(self, fn):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(fn, c, xt, yt, np.float)
self._compareGradientY(fn, c, xt, yt, np.float)
else:
self._compareGradientX(fn, c, xt, yt)
self._compareGradientY(fn, c, xt, yt)
@test_util.run_deprecated_v1
def testGradients(self):
self._testGradients(array_ops.where)
self._testGradients(array_ops.where_v2)
@test_util.run_deprecated_v1
def testGradientsBroadcast(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for t in [np.float32, np.float64]:
# where_v2 only
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 2) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 2) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(3, 2) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
def _testShapeMismatch(self, fn):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(2, 5, 3) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
fn(c, xt, yt)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
self._testShapeMismatch(array_ops.where)
self._testShapeMismatch(array_ops.where_v2)
def _testEmptyTensor(self, fn):
c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)
x = np.random.rand(1, 3, 0) * 100
y = np.random.rand(1, 3, 0) * 100
z_expected = np.zeros((1, 3, 0), dtype=np.float32)
with self.cached_session():
xt = x.astype(np.float32)
yt = y.astype(np.float32)
z = fn(c, xt, yt).eval()
self.assertAllEqual(z_expected, z)
@test_util.run_deprecated_v1
def testEmptyTensor(self):
self._testEmptyTensor(array_ops.where)
self._testEmptyTensor(array_ops.where_v2)
def _testNan(self, fn):
with self.cached_session():
for c in False, True:
for a in 7.0, np.nan:
for b in 5.0, np.nan:
x = fn(c, a, b).eval()
y = a if c else b
self.assertEqual(np.isnan(x), np.isnan(y))
@test_util.run_deprecated_v1
def testNan(self):
"""Verify that nans don't propagate where they shouldn't."""
self._testNan(array_ops.where)
self._testNan(array_ops.where_v2)
class BatchSelectOpTest(test.TestCase):
"""Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+."""
def _compare(self, c, x, y, use_gpu):
np_ans = np.dstack(
[x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(
[2, 0, 1])
with test_util.device(use_gpu=use_gpu):
out = array_ops.where(c, x, y)
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = array_ops.where(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = array_ops.where(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, outf, s, x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = array_ops.where(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = array_ops.where(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, s, outf, s, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
@test_util.run_deprecated_v1
def testGradients(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(c, xt, yt, np.float)
self._compareGradientY(c, xt, yt, np.float)
else:
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
c = np.random.randint(0, 2, 8).astype(np.bool)
x = np.random.rand(16, 3, 2) * 100
y = np.random.rand(16, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
array_ops.where(c, xt, yt)
class MinMaxOpTest(test.TestCase):
def _compare(self, x, y, use_gpu):
np_min, np_max = np.minimum(x, y), np.maximum(x, y)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
omin, omax = math_ops.minimum(inx, iny), math_ops.maximum(inx, iny)
tf_min, tf_max = self.evaluate([omin, omax])
self.assertAllEqual(np_min, tf_min)
self.assertAllEqual(np_max, tf_max)
def testBasic(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1, 3, 2) * 100.
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testDifferentShapes(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(2) * 100. # should broadcast
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testScalar(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1).item() * 100. # should broadcast
# dropped np.float64, int64 because TF automatically converts to 32 bit
for t in [np.float32, np.int32]:
self._compare(x.astype(t), t(y), use_gpu=False)
self._compare(x.astype(t), t(y), use_gpu=True)
def _compareGradientX(self, func, x, y):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, func, x, y):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=y)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testGradients(self):
x = np.random.rand(1, 3, 2) * 100.
# ensure x != y
y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1
self._compareGradientX(math_ops.maximum, x, y)
self._compareGradientY(math_ops.maximum, x, y)
self._compareGradientX(math_ops.minimum, x, y)
self._compareGradientY(math_ops.minimum, x, y)
class MathOpsOverloadTest(test.TestCase):
def _computeTensorAndLiteral(self, x, y, dtype, func):
with test_util.force_cpu():
inx = ops.convert_to_tensor(x, dtype=dtype)
z = func(inx, y) # Should use __add__, __sub__, etc.
return self.evaluate(z)
def _computeLiteralAndTensor(self, x, y, dtype, func):
with test_util.force_cpu():
iny = ops.convert_to_tensor(y, dtype=dtype)
z = func(x, iny) # Should use __radd__, __rsub__, etc.
return self.evaluate(z)
def _compareBinary(self, x, y, dtype, np_func, tf_func):
np_ans = np_func(x, y).astype(dtype.as_numpy_dtype)
self.assertAllClose(np_ans,
self._computeTensorAndLiteral(x, y, dtype, tf_func))
self.assertAllClose(np_ans,
self._computeLiteralAndTensor(x, y, dtype, tf_func))
def _compareUnary(self, x, dtype, np_func, tf_func):
np_ans = np_func(x).astype(dtype.as_numpy_dtype)
with test_util.force_cpu():
self.assertAllClose(
np_ans, self.evaluate(tf_func(ops.convert_to_tensor(x, dtype=dtype))))
def testOverload(self):
dtypes = [
dtypes_lib.float16,
dtypes_lib.float32,
dtypes_lib.float64,
dtypes_lib.int32,
dtypes_lib.int64,
dtypes_lib.complex64,
dtypes_lib.complex128,
]
funcs = [
(np.add, _ADD),
(np.subtract, _SUB),
(np.multiply, _MUL),
(np.power, _POW),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
if dtype in (dtypes_lib.complex64,
dtypes_lib.complex128) and tf_func == _FLOORDIV:
continue # floordiv makes no sense for complex
self._compareBinary(10, 5, dtype, np_func, tf_func)
# Mod only works for int32 and int64.
for dtype in [dtypes_lib.int32, dtypes_lib.int64]:
self._compareBinary(10, 3, dtype, np.mod, _MOD)
def testOverloadComparisons(self):
dtypes = [
dtypes_lib.float16,
dtypes_lib.float32,
dtypes_lib.float64,
dtypes_lib.int32,
dtypes_lib.int64,
]
funcs = [
(np.less, _LT),
(np.less_equal, _LE),
(np.greater, _GT),
(np.greater_equal, _GE),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
self._compareBinary(10, 5, dtype, np_func, tf_func)
logical_funcs = [(np.logical_and, _AND), (np.logical_or, _OR),
(np.logical_xor, _XOR), (np.equal, math_ops.equal),
(np.not_equal, math_ops.not_equal)]
for np_func, tf_func in logical_funcs:
self._compareBinary(True, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(True, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary([True, True, False, False],
[True, False, True, False], dtypes_lib.bool, np_func,
tf_func)
self._compareUnary(True, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary(False, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary([True, False], dtypes_lib.bool, np.logical_not, _INV)
class IsFiniteInfNanTest(test.TestCase):
def _compare(self, x, use_gpu):
np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
ofinite, oinf, onan = math_ops.is_finite(inx), math_ops.is_inf(
inx), math_ops.is_nan(inx)
tf_finite, tf_inf, tf_nan = self.evaluate([ofinite, oinf, onan])
self.assertAllEqual(np_inf, tf_inf)
self.assertAllEqual(np_nan, tf_nan)
self.assertAllEqual(np_finite, tf_finite)
self.assertShapeEqual(np_inf, oinf)
self.assertShapeEqual(np_nan, onan)
self.assertShapeEqual(np_finite, ofinite)
def _testDtype(self, dtype):
fi = np.finfo(dtype)
data = np.array([
0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max, -np.inf,
np.inf, np.nan
]).astype(dtype)
self._compare(data, use_gpu=False)
self._compare(data, use_gpu=True)
def testHalf(self):
self._testDtype(np.float16)
def testFloat(self):
self._testDtype(np.float32)
def testDouble(self):
self._testDtype(np.float64)
def testSqrt(self):
for dtype in [np.float16, np.float32, np.float64]:
fi = np.finfo(dtype)
for size in [1, 3, 4, 7, 8, 63, 64, 65]:
# For float32 Eigen uses Carmack's fast vectorized sqrt algorithm.
# It is not accurate for very large arguments, so we test for
# fi.max/100 instead of fi.max here.
for value in [fi.min, -2, -1, 0, fi.tiny, 1, 2, 1000, fi.max / 100]:
x = np.full((size,), value, dtype=dtype)
np_y = np.sqrt(x)
np_nan = np.isnan(np_y)
with test_util.use_gpu():
tf_y = math_ops.sqrt(x)
tf_nan = math_ops.is_nan(tf_y)
if value < 0:
self.assertAllEqual(np_nan, self.evaluate(tf_nan))
else:
self.assertAllCloseAccordingToType(np_y, self.evaluate(tf_y))
class RoundingTest(test.TestCase):
def _compare_values(self, x, y=None):
y = np.rint(x) if y is None else np.asarray(y)
tf_rint = math_ops.rint(x)
np_rint = self.evaluate(tf_rint)
self.assertAllEqual(y, np_rint)
self.assertShapeEqual(y, tf_rint)
def _compare(self, x):
np_floor, np_ceil = np.floor(x), np.ceil(x)
inx = ops.convert_to_tensor(x)
ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx)
tf_floor, tf_ceil = self.evaluate([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
self.assertShapeEqual(np_floor, ofloor)
self.assertShapeEqual(np_ceil, oceil)
def _testDtype(self, dtype):
data = (np.arange(-3, 3) / 4.).reshape(1, 3, 2).astype(dtype)
self._compare(data)
# TODO: rint op is not supported for float16
if dtype is np.float16:
return
self._compare_values(data)
x = [0.5, 0.5000001]
y = [0.0, 1.0]
self._compare_values(x, y=y)
# numpy example
x = [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]
y = [-2., -2., -0., 0., 2., 2., 2.]
self._compare_values(x, y=y)
def testTypes(self):
self.skipTest("b/131162241")
for dtype in [np.float16, np.float32, np.float64]:
self._testDtype(dtype)
class ComplexMakeRealImagTest(test.TestCase):
def _compareMake(self, real, imag, use_gpu):
np_ans = real + (1j) * imag
with test_util.device(use_gpu=use_gpu):
real = ops.convert_to_tensor(real)
imag = ops.convert_to_tensor(imag)
tf_ans = math_ops.complex(real, imag)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def testMake(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
for use_gpu in [False, True]:
self._compareMake(real, imag, use_gpu)
self._compareMake(real, 12.0, use_gpu)
self._compareMake(23.0, imag, use_gpu)
def testRealImagNumericType(self):
for use_gpu in [True, False]:
for value in [1., 1j, 1. + 1j]:
np_real, np_imag = np.real(value), np.imag(value)
with test_util.device(use_gpu=use_gpu):
tf_real = math_ops.real(value)
tf_imag = math_ops.imag(value)
self.assertAllEqual(np_real, self.evaluate(tf_real))
self.assertAllEqual(np_imag, self.evaluate(tf_imag))
def _compareRealImag(self, cplx, use_gpu):
np_real, np_imag = np.real(cplx), np.imag(cplx)
np_zeros = np_real * 0
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_real = math_ops.real(inx)
tf_imag = math_ops.imag(inx)
tf_real_real = math_ops.real(tf_real)
tf_imag_real = math_ops.imag(tf_real)
self.assertAllEqual(np_real, self.evaluate(tf_real))
self.assertAllEqual(np_imag, self.evaluate(tf_imag))
self.assertAllEqual(np_real, self.evaluate(tf_real_real))
self.assertAllEqual(np_zeros, self.evaluate(tf_imag_real))
def testRealImag64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def testRealImag128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def _compareAngle(self, cplx, use_gpu):
np_angle = np.angle(cplx)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_angle = math_ops.angle(inx)
tf_angle_val = self.evaluate(tf_angle)
self.assertAllClose(np_angle, tf_angle_val)
self.assertShapeEqual(np_angle, tf_angle)
def testAngle64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareAngle(cplx, use_gpu=False)
self._compareAngle(cplx, use_gpu=True)
def testAngle(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareAngle(cplx, use_gpu=False)
self._compareAngle(cplx, use_gpu=True)
@test_util.run_deprecated_v1
def testRealReal(self):
for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float32,
dtypes_lib.float64):
x = array_ops.placeholder(dtype)
y = math_ops.real(x)
self.assertEqual(x, y)
def _compareConj(self, cplx, use_gpu):
np_ans = np.conj(cplx)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_conj = math_ops.conj(inx)
tf_ans = self.evaluate(tf_conj)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, tf_conj)
def testConj64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
def testConj128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
@test_util.run_deprecated_v1
def testConjReal(self):
for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float16,
dtypes_lib.float32, dtypes_lib.float64):
x = array_ops.placeholder(dtype)
y = math_ops.conj(x)
self.assertEqual(x, y)
@test_util.run_deprecated_v1
def testConjString(self):
x = array_ops.placeholder(dtypes_lib.string)
with self.assertRaisesRegexp(TypeError,
r"Expected numeric or variant tensor"):
math_ops.conj(x)
def _compareGradient(self, x):
# x[:, 0] is real, x[:, 1] is imag. We combine real and imag into
# complex numbers. Then, we extract real and imag parts and
# computes the squared sum. This is obviously the same as sum(real
# * real) + sum(imag * imag). We just want to make sure the
# gradient function is checked.
with self.cached_session():
inx = ops.convert_to_tensor(x)
real, imag = array_ops.split(value=inx, num_or_size_splits=2, axis=1)
real, imag = array_ops.reshape(real, [-1]), array_ops.reshape(imag, [-1])
cplx = math_ops.complex(real, imag)
cplx = math_ops.conj(cplx)
loss = math_ops.reduce_sum(math_ops.square(
math_ops.real(cplx))) + math_ops.reduce_sum(
math_ops.square(math_ops.imag(cplx)))
epsilon = 1e-3
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def _compareBroadcastGradient(self, x):
x_ = ops.convert_to_tensor(x)
epsilon = 1e-3
with self.cached_session():
for args in [(x_, 0.), (0., x_)]:
z = math_ops.reduce_sum(math_ops.abs(math_ops.complex(*args)))
jacob_t, jacob_n = gradient_checker.compute_gradient(
x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
@test_util.run_deprecated_v1
def testGradient(self):
# complex64
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32)
self._compareGradient(data)
self._compareBroadcastGradient(data)
# complex128
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float64)
self._compareGradient(data)
def _compareMulGradient(self, data):
# data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],
# data[:, 2], data[:, 3] are real parts of x, imaginary parts of
# x, real parts of y and imaginary parts of y.
with self.cached_session():
inp = ops.convert_to_tensor(data)
xr, xi, yr, yi = array_ops.split(value=inp, num_or_size_splits=4, axis=1)
def vec(x): # Reshape to a vector
return array_ops.reshape(x, [-1])
xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)
def cplx(r, i): # Combine to a complex vector
return math_ops.complex(r, i)
x, y = cplx(xr, xi), cplx(yr, yi)
# z is x times y in complex plane.
z = x * y
# Defines the loss function as the sum of all coefficients of z.
loss = math_ops.reduce_sum(math_ops.real(z) + math_ops.imag(z))
epsilon = 0.005
jacob_t, jacob_n = gradient_checker.compute_gradient(
inp, list(data.shape), loss, [1], x_init_value=data, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
@test_util.run_deprecated_v1
def testMulGradient(self):
data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32)
self._compareMulGradient(data)
class AccumulateTest(test.TestCase):
def testSimple(self):
with self.cached_session():
random_arrays = [
np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
]
random_tensors = [
ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
for x in random_arrays
]
tf_val = math_ops.accumulate_n(random_tensors)
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, self.evaluate(tf_val))
def testZeroArgs(self):
with self.cached_session():
with self.assertRaises(ValueError):
tf_val = math_ops.accumulate_n([])
self.evaluate(tf_val)
def testWrongShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
a = variables.Variable(0.2)
b = variables.Variable(0.1)
math_ops.accumulate_n([a, b], shape=[2, 2]) # Should be shape=[]
def testWrongType(self):
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
b = variables.Variable(0.1, dtype=np.float32)
math_ops.accumulate_n([a, b], tensor_dtype=np.int32)
def testWrongTypeOneInput(self):
# Scenario that used to trigger a bug, even when testWrongType() worked
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
math_ops.accumulate_n([a], tensor_dtype=np.int32)
class PolyvalTest(test.TestCase):
def _runtest(self, dtype, degree):
x = np.random.rand(2, 2).astype(dtype)
coeffs = [np.random.rand(2, 2).astype(dtype) for _ in range(degree + 1)]
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
def testSimple(self):
for dtype in [
np.int32, np.float32, np.float64, np.complex64, np.complex128
]:
for degree in range(5):
self._runtest(dtype, degree)
def testBroadcast(self):
dtype = np.float32
degree = 3
shapes = [(1,), (2, 1), (1, 2), (2, 2)]
for x_shape in shapes:
for coeff_shape in shapes:
x = np.random.rand(*x_shape).astype(dtype)
coeffs = [
np.random.rand(*coeff_shape).astype(dtype)
for _ in range(degree + 1)
]
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
def testEmpty(self):
x = np.random.rand(2, 2).astype(np.float32)
coeffs = []
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
class SingularGradientOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testGradientAtSingularity(self):
if not compat.forward_compatible(2019, 9, 14):
self.skipTest("Skipping test for future functionality.")
ops_and_singularity = [
(gen_math_ops.reciprocal, (0.,)),
(gen_math_ops.rsqrt, (0.,)),
(gen_math_ops.sqrt, (0.,)),
(gen_math_ops.sqrt_grad, (
0.,
0.,
)),
(gen_math_ops.reciprocal_grad, (
1.,
0.,
)),
(gen_math_ops.tan, (np.pi / 2,)),
(gen_math_ops.log, (0.,)),
(gen_math_ops.log1p, (-1.,)),
(gen_math_ops.acosh, (0.,)),
(gen_math_ops.asin, (1.,)),
(gen_math_ops.acos, (1.,)),
(gen_math_ops.atan2, (0., 0.)),
(gen_math_ops.div, (1., 0.)),
(gen_math_ops.div_no_nan, (1., 0.)),
(gen_math_ops.real_div, (1., 0.)),
(math_ops.pow, (0., -1.)),
]
for op, singularity in ops_and_singularity:
for dtype in (dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128):
if dtype.is_complex and op in [
gen_math_ops.asin, gen_math_ops.acos, gen_math_ops.atan2
]:
continue
if dtype == dtypes_lib.half and op in [
gen_math_ops.acosh, gen_math_ops.asin, gen_math_ops.acos,
gen_math_ops.atan2
]:
continue
with self.cached_session():
print("op = ", op, ", singularity = ", singularity, ", type = ",
dtype)
args = [constant_op.constant(s, dtype=dtype) for s in singularity]
grad_y = constant_op.constant(0, dtype=dtype)
y = op(*args)
g = gradients_impl.gradients(y, args, grad_ys=grad_y)
g_val = self.evaluate(g)
self.assertAllEqual(g_val, np.zeros(len(singularity)))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/cwise_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_v1_only("FIFOQueue removed from v2")
class FIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueHalf(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
@test_util.run_in_graph_and_eager_modes
def testMultipleDequeues(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue_many([[1, 2, 3]]))
a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()])
self.assertAllEqual(set([1, 2, 3]), set([a, b, c]))
@test_util.run_in_graph_and_eager_modes
def testQueuesDontShare(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue(1))
q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q2.enqueue(2))
self.assertAllEqual(self.evaluate(q2.dequeue()), 2)
self.assertAllEqual(self.evaluate(q.dequeue()), 1)
def testEnqueueDictWithoutNames(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue({"a": 12.0})
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue_many({"a": [12.0, 13.0]})
def testParallelEnqueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testDequeueHalf(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(3, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = self.evaluate(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], self.evaluate(size_t))
enqueue_op.run()
self.assertEqual([0], self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError("specified shapes"):
q.dequeue_many(0).eval()
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueUpToNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, (4, 4, 4, 4))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), ((),
(2)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongShapeAtRuntime(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[3,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongShape(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
self.evaluate(dequeued_t)
def testParallelEnqueueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(50, dtypes_lib.float32, shapes=())
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
self.evaluate(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(self.evaluate(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
self.evaluate(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, self.evaluate(dequeued_t))
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.FIFOQueue(100, dtypes_lib.int32, ())
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.FIFOQueue(total_count, dtypes_lib.int32, ())
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
self.assertAllEqual(elems[3:], self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with ops.Graph().as_default(), self.session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, (dtypes_lib.float32, dtypes_lib.float32), (
(), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = self.evaluate([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
self.assertEqual([50.0], self.evaluate(dequeued_t))
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
time.sleep(0.01)
self.assertEqual([50.0], self.evaluate(dequeued_t))
self.assertEqual([60.0], self.evaluate(dequeued_t))
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.2)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
def testDoesNotLoseValue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(1, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_f")
q_f_2 = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = []
for _ in xrange(num_queues):
qlist.append(data_flow_ops.FIFOQueue(10, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.FIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
q2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32)
enq_q = data_flow_ops.FIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.session() as sess:
q_empty = data_flow_ops.FIFOQueue(5, dtypes_lib.float32, ())
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.FIFOQueue(5, dtypes_lib.float32)
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
# Create a new session that hasn't been closed, so cached_session
# isn't messed up.
with self.session() as sess:
pass
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.uint16, dtypes_lib.bool, dtypes_lib.complex64,
dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = self.evaluate(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testDequeueEnqueueFail(self):
with self.cached_session() as session:
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
a = q.dequeue()
b = control_flow_ops.Assert(False, ["Before enqueue"])
with ops.control_dependencies([b]):
c = q.enqueue(33)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Before enqueue" in str(e)):
session.run([a, c])
@test_util.run_v1_only("FIFOQueue removed from v2")
class FIFOQueueDictTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
names=("i", "j"),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "j"], q.names)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
names=("i", "f"),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "f"], q.names)
def testEnqueueDequeueOneComponent(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=((),), names="f")
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue(10.0)
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0,))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"f": 10.0, "s": "aa"})
enqueue_op = q.enqueue({"f": 10.0})
enqueue_op2 = q.enqueue({"f": 20.0})
enqueue_op3 = q.enqueue({"f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many([40.0, 50.0])
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "s": ["aa", "bb"]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
self.evaluate(enqueue_op)
self.evaluate(enqueue_op2)
self.evaluate(enqueue_op3)
self.evaluate(enqueue_op4)
f = sess.run(dequeue["f"])
self.assertEqual(10.0, f)
f = sess.run(dequeue_2["f"])
self.assertEqual([20.0, 30.0], list(f))
f = sess.run(dequeue_2["f"])
self.assertEqual([40.0, 50.0], list(f))
def testEnqueueDequeueMultipleComponent(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32, dtypes_lib.string),
shapes=((), (), ()),
names=("f", "i", "s"))
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0, 123, "aa"))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 10.0})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 12, "s": "aa"})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0, "x": 10.0})
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0})
enqueue_op2 = q.enqueue({"i": 124, "s": "bb", "f": 20.0})
enqueue_op3 = q.enqueue({"i": 125, "s": "cc", "f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many(([40.0, 50.0], [126, 127], ["dd", "ee"]))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": [10.0, 20.0]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"],
"x": [1, 2]
})
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"]
})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
self.evaluate(enqueue_op)
self.evaluate(enqueue_op2)
self.evaluate(enqueue_op3)
self.evaluate(enqueue_op4)
i, f, s = sess.run([dequeue["i"], dequeue["f"], dequeue["s"]])
self.assertEqual(123, i)
self.assertEqual(10.0, f)
self.assertEqual(compat.as_bytes("aa"), s)
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([124, 125], list(i))
self.assertTrue([20.0, 30.0], list(f))
self.assertTrue([compat.as_bytes("bb"), compat.as_bytes("cc")], list(s))
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([126, 127], list(i))
self.assertTrue([40.0, 50.0], list(f))
self.assertTrue([compat.as_bytes("dd"), compat.as_bytes("ee")], list(s))
@test_util.run_v1_only("FIFOQueue removed from v2")
class FIFOQueueWithTimeoutTest(test.TestCase):
def testDequeueWithTimeout(self):
with self.session(
config=config_pb2.ConfigProto(operation_timeout_in_ms=20)) as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual(
compat.as_bytes(""), q.queue_ref.op.get_attr("container"))
dequeued_t = q.dequeue()
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
self.evaluate(dequeued_t)
def testReusableAfterTimeout(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
dequeued_t = q.dequeue()
enqueue_op = q.enqueue(37)
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
self.evaluate(enqueue_op)
self.assertEqual(37, self.evaluate(dequeued_t))
@test_util.run_v1_only("FIFOQueue removed from v2")
class QueueContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
with ops.container("test"):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual(
compat.as_bytes("test"), q.queue_ref.op.get_attr("container"))
@test_util.run_v1_only("FIFOQueue removed from v2")
class FIFOQueueBenchmark(test.Benchmark):
"""Benchmark FIFOQueue operations."""
def _build_graph(self):
"""Builds a graph that enqueues and dequeues a single float.
Returns:
A tuple with the graph init tensor and graph output tensor.
"""
q = data_flow_ops.FIFOQueue(1, "float")
init = q.enqueue(1.0)
x = q.dequeue()
q_inc = q.enqueue(x + 1)
return init, q_inc
# TODO(suharshs): Add benchmarks for:
# - different capacities of the queue
# - various sizes of tensors
# - enqueue_many, dequeue_many
def _run(self, num_iters):
"""Benchmarks enqueueing and dequeueing from a FIFOQueue.
Args:
num_iters: The number of iterations to run.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
init, output = self._build_graph()
with session_lib.Session(graph=graph) as session:
init.run()
_ = session.run(output) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run(output)
duration = time.time() - start_time
print("%f secs per enqueue-dequeue" % (duration / num_iters))
self.report_benchmark(
name="fifo_queue", iters=num_iters, wall_time=duration / num_iters)
return duration
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/fifo_queue_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.resource_variable_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import gc
import os
import pickle
import re
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
@test_util.with_control_flow_v2
class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEmpty(gc.garbage)
super(ResourceVariableOpsTest, self).tearDown()
@test_util.run_deprecated_v1
def testHandleDtypeShapeMatch(self):
with self.cached_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[0],
dtype=dtypes.int32)).run()
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
0,
dtype=dtypes.int32)).run()
@test_util.run_gpu_only
def testGPUInt64(self):
with context.eager_mode(), context.device("gpu:0"):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int64)
self.assertAllEqual(1, v.numpy())
def testEagerNameNotIdentity(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0, name="a")
v1 = resource_variable_ops.ResourceVariable(2.0, name="a")
self.assertAllEqual(v0.numpy(), 1.0)
self.assertAllEqual(v1.numpy(), 2.0)
def testEagerNameNotNeeded(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0)
self.assertAllEqual(v0.numpy(), 1.0)
def testReadVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(handle, 1)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to read variable with wrong dtype. "
"Expected float got int32."):
_ = resource_variable_ops.read_variable_op(handle, dtype=dtypes.float32)
def testEagerInitializedValue(self):
with context.eager_mode():
variable = resource_variable_ops.ResourceVariable(1.0, name="eager-init")
self.assertAllEqual(variable.numpy(), 1.0)
self.assertAllEqual(variable.initialized_value().numpy(), 1.0)
def testInitializeVariableUsingInitializedValue(self):
var1 = resource_variable_ops.ResourceVariable(1.0, name="var1")
var2 = resource_variable_ops.ResourceVariable(var1.initialized_value(),
name="var2")
self.assertAllEqual(var2.initialized_value(), 1.0)
def testEagerBool(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(False, name="bool_test")
self.assertAllEqual(bool(v), False)
def testEagerDeepCopy(self):
with context.eager_mode():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(init_value,
name="init")
copied_variable = copy.deepcopy(variable)
copied_variable.assign(4 * np.ones((4, 4, 4)))
# Copying the variable should create a new underlying tensor with distinct
# values.
self.assertFalse(np.allclose(variable.numpy(), copied_variable.numpy()))
@test_util.run_deprecated_v1
def testGraphDeepCopy(self):
with self.cached_session():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(init_value,
name="init")
with self.assertRaises(NotImplementedError):
copy.deepcopy(variable)
@test_util.run_in_graph_and_eager_modes
def testStridedSliceAssign(self):
v = resource_variable_ops.ResourceVariable([1.0, 2.0])
self.evaluate(variables.global_variables_initializer())
self.evaluate(v[0].assign(2.0))
self.assertAllEqual(self.evaluate(v), [2.0, 2.0])
@test_util.run_in_graph_and_eager_modes
def testVariableShape(self):
v = resource_variable_ops.ResourceVariable([1., 1.])
self.assertAllEqual(
tensor_util.constant_value(
resource_variable_ops.variable_shape(v.handle)),
[2])
@test_util.run_deprecated_v1
def testDifferentAssignGraph(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
ops.reset_default_graph()
v.assign(2.0) # Note: this fails if we run convert_to_tensor on not the
# variable graph.
@test_util.run_deprecated_v1
def testFetchHandle(self):
with self.cached_session():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertNotEmpty(handle.eval())
@test_util.run_deprecated_v1
def testCachedValueReadBeforeWrite(self):
with self.cached_session() as sess:
v = resource_variable_ops.ResourceVariable(0.0, caching_device="cpu:0")
self.evaluate(v.initializer)
value, _ = sess.run([v, v.assign_add(1.0)])
self.assertAllEqual(value, 0.0)
def testAssignVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1]))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to assign variable with wrong "
"dtype. Expected int32 got float."):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1.], dtype=dtypes.float32))
def testUnprintableHandle(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertIn("<unprintable>", str(handle))
self.assertIn("<unprintable>", repr(handle))
@test_util.run_in_graph_and_eager_modes
def testDtypeSurvivesIdentity(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
self.evaluate(resource_variable_ops.assign_variable_op(
id_handle, constant_op.constant(0, dtype=dtypes.int32)))
def testUnreadOpName(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.assertNotEqual(v.name, v.assign_add(1.0).name)
@test_util.run_in_graph_and_eager_modes
def testCreateRead(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
value = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertAllEqual(1, value)
@test_util.run_in_graph_and_eager_modes
def testManyAssigns(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = self.evaluate([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
@test_util.run_in_graph_and_eager_modes
def testAssignAdd(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
self.evaluate(resource_variable_ops.assign_add_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
read = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertEqual(read, 2)
@test_util.run_in_graph_and_eager_modes
def testScatterAdd(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testGradientGatherNd(self):
v = resource_variable_ops.ResourceVariable(
np.random.uniform(size=[2, 2]), dtype=dtypes.float32)
with backprop.GradientTape() as tape:
l = array_ops.gather_nd(v, [[1, 1]])
l = math_ops.reduce_sum(l)
grads = tape.gradient(l, v)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(grads), [[0., 0.], [0., 1.]])
@test_util.run_deprecated_v1
def testDefaultGradientDtype(self):
v = resource_variable_ops.ResourceVariable(
np.random.uniform(size=[2, 2]), dtype=dtypes.float64)
c = constant_op.constant(1.)
identity = array_ops.identity_n([c, v.handle])
# TODO(b/137403775): Remove this.
custom_gradient.copy_handle_data(v.handle, identity[1])
g = gradients_impl.gradients(identity[0], [c, v.handle])
self.assertEqual(g[1].dtype, dtypes.float64)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(g[1], [[0., 0.], [0., 0.]])
@test_util.run_deprecated_v1
def testUnconnectedGradientZeros(self):
b = resource_variable_ops.ResourceVariable(initial_value=[[3., 4.]])
c = constant_op.constant(0.)
g = gradients_impl.gradients(c, [b], unconnected_gradients="zero")[0]
self.assertAllEqual(g.shape.as_list(), [1, 2])
@test_util.run_in_graph_and_eager_modes
def testGradientGatherNdIndexedSlices(self):
v = resource_variable_ops.ResourceVariable(
np.random.uniform(size=[2, 2]), dtype=dtypes.float32)
with backprop.GradientTape() as tape:
l = array_ops.gather_nd(v, [[1], [1]])
l = math_ops.reduce_sum(l)
grads = tape.gradient(l, v)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(grads.values), [[1., 1.], [1., 1.]])
@test_util.run_in_graph_and_eager_modes
def testScatterSub(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMul(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant([[5]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
def testEagerPickle(self):
with context.eager_mode():
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, "var.pickle")
with open(fname, "wb") as f:
v = resource_variable_ops.ResourceVariable(
10.0,
dtype=dtypes.float16,
name="v")
pickle.dump(v, f)
with open(fname, "rb") as f:
new_v = pickle.load(f)
self.assertEqual(new_v.name, v.name)
self.assertEqual(new_v.shape, v.shape)
self.assertEqual(new_v.dtype, v.dtype)
self.assertEqual(new_v.trainable, v.trainable)
self.assertAllEqual(new_v.numpy(), v.numpy())
@test_util.run_in_graph_and_eager_modes
def testScatterDiv(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
def testUseResource(self):
v = variables.VariableV1(1.0, use_resource=True)
self.assertIsInstance(v, resource_variable_ops.ResourceVariable)
def testEagerNoUseResource(self):
with context.eager_mode():
v = variables.Variable(1.0)
self.assertIsInstance(v, resource_variable_ops.ResourceVariable)
@test_util.run_in_graph_and_eager_modes
def testScatterMin(self):
with ops.device("cpu:0"):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[[6]],
dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(handle, [0],
constant_op.constant(
[[3]],
dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
def testMetagraph(self):
with ops.Graph().as_default():
with variable_scope.variable_scope("foo", use_resource=True):
a = variable_scope.get_variable("a", initializer=10.0)
momentum.MomentumOptimizer(
learning_rate=0.001, momentum=0.1).minimize(
a,
colocate_gradients_with_ops=True,
global_step=training_util.get_or_create_global_step())
graph = ops.get_default_graph()
meta_graph_def = saver.export_meta_graph(graph=graph)
with ops.Graph().as_default():
saver.import_meta_graph(meta_graph_def, import_scope="")
meta_graph_two = saver.export_meta_graph(graph=graph)
self.assertEqual(meta_graph_def, meta_graph_two)
@test_util.run_in_graph_and_eager_modes
def testScatterMax(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@test_util.run_in_graph_and_eager_modes
def testScatterAddScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterSubScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMulScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant(5, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
@test_util.run_in_graph_and_eager_modes
def testScatterDivScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
@test_util.run_in_graph_and_eager_modes
def testScatterMinScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterMaxScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@test_util.run_in_graph_and_eager_modes
def testScatterAddVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 1.5], name="add")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_add(ops.IndexedSlices(indices=[1], values=[2.5])))
self.assertAllEqual([0.0, 4.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterSubVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 2.5], name="sub")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_sub(ops.IndexedSlices(indices=[1], values=[1.5])))
self.assertAllEqual([0.0, 1.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterMaxVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 4.0], name="max1")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_max(ops.IndexedSlices(indices=[1], values=[5.0])))
self.assertAllEqual([0.0, 5.0], self.evaluate(v))
v = resource_variable_ops.ResourceVariable([0.0, 3.5], name="max2")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_max(ops.IndexedSlices(indices=[1], values=[2.0])))
self.assertAllEqual([0.0, 3.5], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterMinVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 4.0], name="min1")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_min(ops.IndexedSlices(indices=[1], values=[5.0])))
self.assertAllEqual([0.0, 4.0], self.evaluate(v))
v = resource_variable_ops.ResourceVariable([0.0, 3.5], name="min2")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_min(ops.IndexedSlices(indices=[1], values=[2.0])))
self.assertAllEqual([0.0, 2.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterMulVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 4.0], name="mul")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_mul(ops.IndexedSlices(indices=[1], values=[3.0])))
self.assertAllEqual([0.0, 12.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterDivVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 6.0], name="div")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_div(ops.IndexedSlices(indices=[1], values=[2.0])))
self.assertAllEqual([0.0, 3.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 6.0], name="update")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_update(ops.IndexedSlices(indices=[1], values=[3.0])))
self.assertAllEqual([0.0, 3.0], self.evaluate(v))
@test_util.run_deprecated_v1
def testScatterUpdateString(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.string, shape=[1, 1])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant([["a"]], dtype=dtypes.string)))
self.evaluate(resource_variable_ops.resource_scatter_update(
handle, [0], constant_op.constant([["b"]], dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(compat.as_bytes(self.evaluate(read)[0][0]),
compat.as_bytes("b"))
@test_util.run_deprecated_v1
def testScatterUpdateStringScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.string, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[["a"]],
dtype=dtypes.string)))
self.evaluate(
resource_variable_ops.resource_scatter_update(handle, [0],
constant_op.constant(
"b",
dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(
compat.as_bytes(self.evaluate(read)[0][0]), compat.as_bytes("b"))
# TODO(alive): get this to work in Eager mode.
def testGPU(self):
with test_util.use_gpu():
abc = variable_scope.get_variable(
"abc",
shape=[1],
initializer=init_ops.ones_initializer(),
use_resource=True)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(
resource_variable_ops.var_is_initialized_op(abc.handle)),
True)
def testScatterBool(self):
with context.eager_mode():
ref = resource_variable_ops.ResourceVariable(
[False, True, False], trainable=False)
indices = math_ops.range(3)
updates = constant_op.constant([True, True, True])
state_ops.scatter_update(ref, indices, updates)
self.assertAllEqual(ref.read_value(), [True, True, True])
@test_util.run_in_graph_and_eager_modes
def testConstraintArg(self):
constraint = lambda x: x
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var0")
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var1")
# TODO(alive): how should this work in Eager mode?
@test_util.run_deprecated_v1
def testInitFn(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32)
self.assertEqual(v.handle.op.colocation_groups(),
v.initializer.inputs[1].op.colocation_groups())
def testHandleNumpy(self):
with context.eager_mode():
with self.assertRaises(ValueError):
resource_variable_ops.ResourceVariable(
1.0, name="handle-numpy").handle.numpy()
def testCountUpTo(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(v.count_up_to(1), 0)
with self.assertRaises(errors.OutOfRangeError):
v.count_up_to(1)
def testCountUpToFunction(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(state_ops.count_up_to(v, 1), 0)
with self.assertRaises(errors.OutOfRangeError):
state_ops.count_up_to(v, 1)
@test_util.run_in_graph_and_eager_modes
def testInitFnDtype(self):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32, name="var0")
self.assertEqual(dtypes.float32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitFnNoDtype(self):
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="var2")
self.assertEqual(dtypes.int32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitializeAllVariables(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.float32,
name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testOperatorOverload(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(2.0, self.evaluate(v + v))
@test_util.run_in_graph_and_eager_modes
def testAssignMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign(2.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign(3.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign(4.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testLoad(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
v.load(2.0)
self.assertEqual(2.0, self.evaluate(v.value()))
def testShapePassedToGradient(self):
with ops.Graph().as_default():
@custom_gradient.custom_gradient
def differentiable_scatter_update(handle, indices, values):
with ops.control_dependencies([
resource_variable_ops.resource_scatter_update(
handle, indices, values)]):
new_handle = array_ops.identity(handle)
def grad(dresult):
self.assertIsNotNone(
tensor_util.constant_value(dresult.dense_shape))
return [dresult, None, None]
return new_handle, grad
var = variable_scope.get_variable(
"foo", shape=[20], initializer=init_ops.zeros_initializer,
dtype=dtypes.float64, use_resource=True)
indices = math_ops.range(10)
updates = math_ops.range(9, -1, -1, dtype=dtypes.float64)
new_handle = differentiable_scatter_update(var.handle, indices, updates)
gathered = resource_variable_ops.resource_gather(
new_handle, indices, dtype=var.dtype)
gradients_impl.gradients([gathered], [updates])
def testToFromProtoCachedValue(self):
with ops.Graph().as_default():
v_def = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(3.0)).to_proto()
v_prime = resource_variable_ops.ResourceVariable(variable_def=v_def)
self.assertIsNone(getattr(v_prime, "_cached_value", None))
other_v_def = resource_variable_ops.ResourceVariable(
caching_device="cpu:0",
initial_value=constant_op.constant(3.0)).to_proto()
other_v_prime = resource_variable_ops.ResourceVariable(
variable_def=other_v_def)
self.assertIsNotNone(other_v_prime._cached_value)
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session():
v_def = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session():
# v describes a VariableDef-based variable without an initial value.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, v.initialized_value().eval())
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session():
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = resource_variable_ops.ResourceVariable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
resource_variable_ops.ResourceVariable(
variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = resource_variable_ops.ResourceVariable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
resource_variable_ops.ResourceVariable(
variable_def=trainable_variable.to_proto())
.trainable)
@test_util.run_in_graph_and_eager_modes
def testSparseRead(self):
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var3")
self.evaluate(variables.global_variables_initializer())
value = self.evaluate(v.sparse_read([0, 3, 1, 2]))
self.assertAllEqual(init_value[[0, 3, 1, 2], ...], value)
@test_util.run_in_graph_and_eager_modes
def testGatherNd(self):
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var3")
self.evaluate(variables.global_variables_initializer())
value_op = v.gather_nd([[0, 0], [1, 2], [3, 3]])
self.assertAllEqual([3, 4], value_op.shape)
value = self.evaluate(value_op)
self.assertAllEqual([[0, 1, 2, 3], [24, 25, 26, 27], [60, 61, 62, 63]],
value)
value_op = v.gather_nd([[0, 0, 0], [1, 2, 3], [3, 3, 3]])
self.assertAllEqual([3], value_op.shape)
value = self.evaluate(value_op)
self.assertAllEqual([0, 27, 63], value)
@test_util.run_deprecated_v1
def testToFromProto(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertEqual(2, math_ops.add(w, 1).eval())
self.assertEqual(v._handle, w._handle)
self.assertEqual(v._graph_element, w._graph_element)
@test_util.run_in_graph_and_eager_modes
def testAssignAddMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_add(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_add(1.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_add(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testAssignSubMethod(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_sub(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_sub(1.0, read_value=True)
self.assertEqual(1.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_sub(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(0.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testDestroyResource(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3.0, self.evaluate(v.value()))
self.evaluate(resource_variable_ops.destroy_resource_op(v.handle))
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(v.value())
# Handle to a resource not actually created.
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
self.evaluate(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
@test_util.run_deprecated_v1
def testAssignDifferentShapes(self):
with self.cached_session() as sess, variable_scope.variable_scope(
"foo", use_resource=True):
var = variable_scope.get_variable("x", shape=[1, 1], dtype=dtypes.float32)
placeholder = array_ops.placeholder(dtypes.float32)
assign = var.assign(placeholder)
sess.run(
[assign],
feed_dict={placeholder: np.zeros(shape=[2, 2], dtype=np.float32)})
def testAssignDifferentShapesEagerNotAllowed(self):
with context.eager_mode():
with variable_scope.variable_scope("foo"):
var = variable_scope.get_variable("x", shape=[1, 1],
dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError,
"Shapes.*and.*are incompatible"):
assign = var.assign(np.zeros(shape=[2, 2]))
self.evaluate(assign)
@test_util.disable_xla("XLA doesn't allow changing shape at assignment, as "
"dictated by tf2xla/xla_resource.cc:SetTypeAndShape")
@test_util.run_in_graph_and_eager_modes
def testAssignDifferentShapesAllowed(self):
var = resource_variable_ops.ResourceVariable(
initial_value=np.zeros(shape=[1, 1]),
shape=tensor_shape.TensorShape(None))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(np.zeros(shape=[1, 1]), var.read_value())
self.evaluate(var.assign(np.zeros(shape=[2, 2])))
self.assertAllEqual(np.zeros(shape=[2, 2]), var.read_value())
@test_util.run_in_graph_and_eager_modes
def testInitValueWrongShape(self):
with self.assertRaisesWithPredicateMatch(
ValueError, r"not compatible with"):
var = resource_variable_ops.ResourceVariable(
initial_value=np.zeros(shape=[3]),
shape=[4])
self.evaluate(variables.global_variables_initializer())
self.evaluate(var.read_value())
@test_util.run_deprecated_v1
def testDtypeAfterFromProto(self):
v = resource_variable_ops.ResourceVariable(2.0)
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertIsInstance(w.dtype, dtypes.DType)
self.assertEqual(v.dtype, w.dtype)
# TODO(alive): get caching to work in eager mode.
@test_util.run_deprecated_v1
def testCachingDevice(self):
with ops.device("/job:server/task:1"):
v = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", v.value().device)
with self.assertRaises(ValueError):
_ = v.value().op.get_attr("_class")
with ops.colocate_with(v.op):
w = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", w.value().device)
with self.assertRaises(ValueError):
_ = w.value().op.get_attr("_class")
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(300.0, name="var4")
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var4",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
x = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var5",
container=ops.get_default_graph()._container)
with self.assertRaisesOpError("Resource .*/var5/.* does not exist"):
resource_variable_ops.read_variable_op(x, v.dtype.base_dtype).eval()
@test_util.run_deprecated_v1
def testSharedNameWithNamescope(self):
with self.cached_session():
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(300.0, name="var6")
self.assertEqual("foo/var6", v._shared_name) # pylint: disable=protected-access
self.assertEqual("foo/var6:0", v.name)
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="foo/var6",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
@test_util.run_in_graph_and_eager_modes
def testShape(self):
v = resource_variable_ops.ResourceVariable(
name="var4", initial_value=array_ops.ones(shape=[10, 20, 35]))
self.assertEqual("(10, 20, 35)", str(v.shape))
self.assertEqual("(10, 20, 35)", str(v.get_shape()))
self.assertEqual("(10, 20, 35)", str(v.value().shape))
self.assertEqual("(3, 20, 35)", str(v.sparse_read([0, 1, 2]).shape))
if not context.executing_eagerly():
self.assertEqual(
"<unknown>",
str(v.sparse_read(array_ops.placeholder(dtypes.int32)).shape))
@test_util.run_deprecated_v1
def testSetInitialValue(self):
with self.cached_session():
# Initialize variable with a value different from the initial value passed
# in the constructor.
v = resource_variable_ops.ResourceVariable(2.0)
v.initializer.run(feed_dict={v.initial_value: 3.0})
self.assertEqual(3.0, v.value().eval())
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = resource_variable_ops.ResourceVariable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegexp(ValueError, "initializer"):
control_flow_ops.while_loop(cond, body, [0, 0])
def testVariableEager(self):
with context.eager_mode():
init = array_ops.ones(shape=[10, 20, 35], dtype=dtypes.int32)
constraint = lambda x: x
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(
name="var7",
initial_value=init,
caching_device="cpu:0",
constraint=constraint)
# Test properties
self.assertEqual(dtypes.int32, v.dtype)
self.assertEqual("foo/var7:0", v.name)
self.assertAllEqual([10, 20, 35], v.shape.as_list())
self.assertIsInstance(v.handle, ops.EagerTensor)
self.assertEqual(constraint, v.constraint)
self.assertAllEqual(init.numpy(), v.read_value().numpy())
self.assertAllEqual(init.numpy(), v.value().numpy())
# Callable init.
callable_init = lambda: init * 2
v2 = resource_variable_ops.ResourceVariable(
initial_value=callable_init, name="var7")
self.assertEqual("var7:0", v2.name)
self.assertAllEqual(2 * init.numpy(), v2.read_value().numpy())
# Test assign_add.
new_v2_val = v2.assign_add(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 3, new_v2_val.numpy())
# Test assign_sub.
new_v2_val = v2.assign_sub(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 2, new_v2_val.numpy())
# Test assign.
v2.assign(v.read_value())
self.assertAllEqual(v.read_value().numpy(), v2.read_value().numpy())
# Test load
v2.load(2 * v.read_value())
self.assertAllEqual(2 * v.read_value().numpy(), v2.read_value().numpy())
# Test convert_to_tensor
t = ops.convert_to_tensor(v)
self.assertAllEqual(t.numpy(), v.read_value().numpy())
# Test operations
self.assertAllEqual((v * 2).numpy(), (v + v).numpy())
def testContainerEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="same")
with ops.container("different"):
v2 = resource_variable_ops.ResourceVariable(initial_value=lambda: 0,
name="same")
v2.assign(2)
self.assertEqual(1, v1.read_value().numpy())
self.assertEqual(2, v2.read_value().numpy())
def testDestruction(self):
with context.eager_mode():
var = resource_variable_ops.ResourceVariable(initial_value=1.0,
name="var8")
var_handle = var._handle
del var
with self.assertRaisesRegexp(errors.NotFoundError,
r"Resource .* does not exist."):
resource_variable_ops.destroy_resource_op(var_handle,
ignore_lookup_error=False)
def testScatterUpdate(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3.0])
self.assertAllEqual([1.0, 3.0], v.numpy())
def testScatterAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="add")
state_ops.scatter_add(v, [1], [3])
self.assertAllEqual([1.0, 5.0], v.numpy())
def testScatterSubStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="sub")
state_ops.scatter_sub(v, [1], [3])
self.assertAllEqual([1.0, -1.0], v.numpy())
def testScatterUpdateVariant(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([
list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[])
])
v.scatter_update(
ops.IndexedSlices(
list_ops.tensor_list_from_tensor([1., 2.], element_shape=[]), 0))
self.assertAllEqual(
list_ops.tensor_list_get_item(v[0], 0, element_dtype=dtypes.float32),
1.)
def testGroupDoesntForceRead(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
assign = v.assign_add(1.0)
g = control_flow_ops.group([assign])
self.assertEqual(g.control_inputs[0].type, "AssignAddVariableOp")
def testScatterNdAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(
[1, 2, 3, 4, 5, 6, 7, 8], dtype=dtypes.float32, name="add")
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, 13, 3, 14, 14, 6, 7, 20])
state_ops.scatter_nd_add(v, indices, updates)
self.assertAllClose(expected, v.numpy())
@test_util.run_in_graph_and_eager_modes
def testUnreadVariableInsideFunction(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def assign():
v.assign(1.0)
graph = assign.get_concrete_function().graph
self.assertTrue(all(x.type != "ReadVariableOp"
for x in graph.get_operations()))
def testScatterNdSubStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(
[1, 2, 3, 4, 5, 6, 7, 8], dtype=dtypes.float32, name="sub")
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, -9, 3, -6, -4, 6, 7, -4])
state_ops.scatter_nd_sub(v, indices, updates)
self.assertAllClose(expected, v.numpy())
def testScatterUpdateCast(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3])
self.assertAllEqual([1.0, 3.0], v.numpy())
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateInvalidArgs(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3], name="update")
# The exact error and message differ between graph construction (where the
# error is realized during shape inference at graph construction time) and
# eager execution (where the error is realized during kernel execution).
with self.assertRaisesRegexp(Exception, r"shape.*2.*3"):
state_ops.scatter_update(v, [0, 1], [0, 1, 2])
@test_util.run_in_graph_and_eager_modes
def testAssignIncompatibleShape(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
self.evaluate(v.initializer)
pattern = re.compile("shapes must be equal", re.IGNORECASE)
with self.assertRaisesRegexp(Exception, pattern):
self.evaluate(v.assign_add(1))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testCopyToGraphUninitialized(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
copy_to_graph = ops.Graph()
with copy_to_graph.as_default(): # Intentionally testing v1 behavior
copied = resource_variable_ops.copy_to_graph_uninitialized(v)
self.assertEqual(v.name, copied.name)
self.assertIsNone(copied.initializer)
def create_variant_shape_and_type_data(self):
variant_shape_and_type_data = (
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData())
variant_shape_and_type_data.is_set = True
stored_shape = tensor_shape.TensorShape([None, 4]).as_proto()
stored_dtype = dtypes.float32.as_datatype_enum
# NOTE(ebrevdo): shape_and_type lacks append() in some versions of protobuf.
variant_shape_and_type_data.shape_and_type.extend([
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(
shape=stored_shape, dtype=stored_dtype)])
return variant_shape_and_type_data
@def_function.function
def create_constant_variant(self, value):
value = constant_op.constant(
tensor_pb2.TensorProto(
dtype=dtypes.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(value, dtype=np.int32).tobytes())
]))
return value
# TODO(ebrevdo): Add run_in_graph_and_eager_modes once we can create
# EagerTensor constants with TensorProto inputs.
@test_util.run_in_graph_and_eager_modes()
def testVariantInitializer(self):
variant_shape_and_type_data = self.create_variant_shape_and_type_data()
value = self.create_constant_variant(3)
initializer = array_ops.fill([3], value)
resource_variable_ops._set_handle_shapes_and_types( # pylint: disable=protected-access
initializer, variant_shape_and_type_data,
graph_mode=not context.executing_eagerly())
v = resource_variable_ops.ResourceVariable(initializer)
read = array_ops.identity(v)
read_variant_shape_and_type = (
resource_variable_ops.get_eager_safe_handle_data(read))
self.assertEqual(
read_variant_shape_and_type, variant_shape_and_type_data)
gather = v.sparse_read([0])
gather_variant_shape_and_type = (
resource_variable_ops.get_eager_safe_handle_data(gather))
self.assertEqual(
gather_variant_shape_and_type, variant_shape_and_type_data)
# Make sure initializer runs.
if not context.executing_eagerly():
self.evaluate(v.initializer)
self.evaluate(read.op)
self.evaluate(gather.op)
@parameterized.parameters([
# batch_dims=0 (equivalent to tf.gather)
dict( # 2D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[2, 1], [0, 3]],
expected=[[8, 7], [6, 9]]),
dict( # 3D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[9, 7], [8, 6]], [[6, 9], [8, 8]]]),
dict( # 4D indices
batch_dims=0,
params=[8, 9],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[8, 9], [9, 8]], [[8, 8], [9, 9]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
# batch_dims=indices.shape.ndims - 1 (equivalent to
# tf.compat.v1.batch_gather)
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
# 0 < batch_dims < indices.shape.ndims - 1
dict( # 3D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[13, 11], [12, 10]], [[20, 23], [22, 22]]]),
dict( # 4D indices (1 batch dim)
batch_dims=1,
params=[[6, 7], [8, 9]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[6, 7], [7, 6]], [[6, 6], [7, 7]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
dict( # 4D indices (2 batch dims)
batch_dims=2,
params=[[[2, 3], [4, 5]], [[6, 7], [8, 9]]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[2, 3], [3, 2]], [[4, 4], [5, 5]]],
[[[7, 7], [6, 6]], [[8, 9], [9, 8]]]]),
])
@test_util.run_in_graph_and_eager_modes
def testGatherWithBatchDims(self, params, indices, batch_dims, expected):
var = resource_variable_ops.ResourceVariable(params, name="var0")
with ops.control_dependencies([var.initializer]):
result = resource_variable_ops.resource_gather(
var.handle, indices, dtype=var.dtype, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=0,
output_shape=[2, 3, 8, 9, 10, 3, 4, 5, 6, 7]
# = indices.shape + params.shape[1:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=1,
output_shape=[2, 3, 8, 9, 10, 4, 5, 6, 7]
# = params.shape[:1] + indices.shape[1:] + params.shape[2:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 4, 9, 10],
batch_dims=3,
output_shape=[2, 3, 4, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[3:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 4, 5, 10],
batch_dims=4,
output_shape=[2, 3, 4, 5, 10, 7]
# = params.shape[:4] + indices.shape[4:] + params.shape[5:]
),
])
@test_util.run_in_graph_and_eager_modes
def testGatherWithBatchDimsMatchesTensor(self, params_shape, indices_shape,
batch_dims, output_shape):
"""Checks that gather with batch_dims returns the correct shape."""
# Generate a `params` tensor with the indicated shape.
params_size = np.prod(params_shape)
params = np.reshape(np.arange(params_size, dtype=np.int32), params_shape)
# Generate an `indices` tensor with the indicated shape, where each index
# is within the appropriate range.
indices_size = np.prod(indices_shape)
indices = np.reshape(np.arange(indices_size, dtype=np.int32), indices_shape)
indices = indices % params_shape[batch_dims]
var = resource_variable_ops.ResourceVariable(params, name="var0")
with ops.control_dependencies([var.initializer]):
expected = array_ops.gather(
var.read_value(), indices, batch_dims=batch_dims)
result = resource_variable_ops.resource_gather(
var.handle, indices, dtype=var.dtype, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/resource_variable_ops_test.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for py_func op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import re
import numpy as np
from six.moves import queue
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
def np_func(x, y):
return np.sinh(x) + np.cosh(y)
def matmul(x, y):
return math_ops.matmul(x, y)
class PyFuncTest(test.TestCase):
"""Encapsulates tests for py_func and eager_py_func."""
# ----- Tests for py_func -----
def testRealDataTypes(self):
def sum_func(x, y):
return x + y
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.uint8, dtypes.int8, dtypes.uint16, dtypes.int16,
dtypes.int32, dtypes.int64]:
with self.cached_session():
x = constant_op.constant(1, dtype=dtype)
y = constant_op.constant(2, dtype=dtype)
z = self.evaluate(script_ops.py_func(sum_func, [x, y], dtype))
self.assertEqual(z, 3)
def testComplexDataTypes(self):
def sub_func(x, y):
return x - y
for dtype in [dtypes.complex64, dtypes.complex128]:
with self.cached_session():
x = constant_op.constant(1 + 1j, dtype=dtype)
y = constant_op.constant(2 - 2j, dtype=dtype)
z = self.evaluate(script_ops.py_func(sub_func, [x, y], dtype))
self.assertEqual(z, -1 + 3j)
def testBoolDataTypes(self):
def and_func(x, y):
return x and y
dtype = dtypes.bool
with self.cached_session():
x = constant_op.constant(True, dtype=dtype)
y = constant_op.constant(False, dtype=dtype)
z = self.evaluate(script_ops.py_func(and_func, [x, y], dtype))
self.assertEqual(z, False)
def testSingleType(self):
with self.cached_session():
x = constant_op.constant(1.0, dtypes.float32)
y = constant_op.constant(2.0, dtypes.float32)
z = self.evaluate(script_ops.py_func(np_func, [x, y], dtypes.float32))
self.assertEqual(z, np_func(1.0, 2.0).astype(np.float32))
def testScalar(self):
with self.cached_session():
x = constant_op.constant(1.0, dtypes.float32)
y = constant_op.constant(2.0, dtypes.float32)
z = self.evaluate(
script_ops.eager_py_func(np_func, [x, y], [dtypes.float32]))
self.assertEqual(z[0], np_func(1.0, 2.0).astype(np.float32))
@test_util.run_v1_only("b/120545219")
def testArray(self):
with self.cached_session():
x = constant_op.constant([1.0, 2.0], dtypes.float64)
y = constant_op.constant([2.0, 3.0], dtypes.float64)
z = self.evaluate(script_ops.py_func(np_func, [x, y], [dtypes.float64]))
self.assertAllEqual(z[0],
np_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
def testComplexType(self):
with self.cached_session():
x = constant_op.constant(1 + 2j, dtypes.complex64)
y = constant_op.constant(3 + 4j, dtypes.complex64)
z = self.evaluate(script_ops.py_func(np_func, [x, y], dtypes.complex64))
self.assertAllClose(z, np_func(1 + 2j, 3 + 4j))
def testRFFT(self):
with self.cached_session():
x = constant_op.constant([1., 2., 3., 4.], dtypes.float32)
def rfft(x):
return np.fft.rfft(x).astype(np.complex64)
y = self.evaluate(script_ops.py_func(rfft, [x], dtypes.complex64))
self.assertAllClose(y, np.fft.rfft([1., 2., 3., 4.]))
def testPythonLiteral(self):
with self.cached_session():
def literal(x):
return 1.0 if float(x) == 0.0 else 0.0
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(script_ops.py_func(literal, [x], dtypes.float64))
self.assertAllClose(y, 1.0)
def testList(self):
with self.cached_session():
def list_func(x):
return [x, x + 1]
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(
script_ops.py_func(list_func, [x], [dtypes.float64] * 2))
self.assertAllClose(y, [0.0, 1.0])
def testTuple(self):
# returns a tuple
with self.cached_session():
def tuple_func(x):
return x, x + 1
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(
script_ops.py_func(tuple_func, [x], [dtypes.float64] * 2))
self.assertAllClose(y, [0.0, 1.0])
# returns a tuple, Tout and inp a tuple
with self.cached_session():
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(
script_ops.py_func(tuple_func, (x,),
(dtypes.float64, dtypes.float64)))
self.assertAllClose(y, [0.0, 1.0])
@test_util.run_v1_only("b/120545219")
def testStrings(self):
def read_fixed_length_numpy_strings():
return np.array([b" there"])
def read_and_return_strings(x, y):
return x + y
with self.cached_session():
x = constant_op.constant([b"hello", b"hi"], dtypes.string)
y = self.evaluate(
script_ops.py_func(read_fixed_length_numpy_strings, [],
dtypes.string))
z = self.evaluate(
script_ops.py_func(read_and_return_strings, [x, y], dtypes.string))
self.assertAllEqual(z, [b"hello there", b"hi there"])
@test_util.run_v1_only("b/120545219")
def testStringsAreConvertedToBytes(self):
def read_fixed_length_numpy_strings():
return np.array([" there"])
def read_and_return_strings(x, y):
return x + y
with self.cached_session():
x = constant_op.constant(["hello", "hi"], dtypes.string)
y = self.evaluate(
script_ops.py_func(read_fixed_length_numpy_strings, [],
dtypes.string))
z = self.evaluate(
script_ops.py_func(read_and_return_strings, [x, y], dtypes.string))
self.assertAllEqual(z, [b"hello there", b"hi there"])
@test_util.run_v1_only("b/120545219")
def testObjectArraysAreConvertedToBytes(self):
def read_object_array():
return np.array([b" there", u" ya"], dtype=np.object)
def read_and_return_strings(x, y):
return x + y
with self.cached_session():
x = constant_op.constant(["hello", "hi"], dtypes.string)
y, = script_ops.py_func(read_object_array, [],
[dtypes.string])
z, = script_ops.py_func(read_and_return_strings, [x, y], [dtypes.string])
self.assertListEqual(list(z.eval()), [b"hello there", b"hi ya"])
@test_util.run_v1_only("b/120545219")
def testStringPadding(self):
correct = [b"this", b"is", b"a", b"test"]
with self.cached_session():
s, = script_ops.py_func(lambda: [correct], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
@test_util.run_v1_only("b/120545219")
def testStringPaddingAreConvertedToBytes(self):
inp = ["this", "is", "a", "test"]
correct = [b"this", b"is", b"a", b"test"]
with self.cached_session():
s, = script_ops.py_func(lambda: [inp], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
@test_util.run_v1_only("b/120545219")
def testNulTerminatedStrings(self):
inp = np.array(["this\0", "is\0\0", "a\0", "test\0\0"], dtype=np.str_)
correct = [b"this", b"is", b"a", b"test"]
with self.cached_session():
s, = script_ops.py_func(lambda: [inp], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
@test_util.run_v1_only("b/120545219")
def testLarge(self):
with self.cached_session() as sess:
x = array_ops.zeros([1000000], dtype=np.float32)
y = script_ops.py_func(lambda x: x + 1, [x], [dtypes.float32])
z = script_ops.py_func(lambda x: x * 2, [x], [dtypes.float32])
for _ in xrange(100):
sess.run([y[0].op, z[0].op])
def testNoInput(self):
with self.cached_session():
x = self.evaluate(script_ops.py_func(lambda: 42.0, [], dtypes.float64))
self.assertAllClose(x, 42.0)
@test_util.run_v1_only("b/120545219")
def testAlias(self):
with self.cached_session():
np_array = np.array([1.0, 2.0], dtype=np.float32)
tf_array = script_ops.py_func(lambda: np_array, [], [dtypes.float32])
value = tf_array + constant_op.constant([2.0, 3.0], dtype=dtypes.float32)
value.op.run()
self.assertAllEqual(np_array, [1.0, 2.0])
@test_util.run_v1_only("b/120545219")
def testReturnUnicodeString(self):
with self.cached_session():
correct = u"你好 世界"
def unicode_string():
return correct
z, = script_ops.py_func(unicode_string, [], [dtypes.string])
self.assertEqual(z.eval(), correct.encode("utf8"))
@test_util.run_v1_only("b/120545219")
def testBadNumpyReturnType(self):
with self.cached_session():
def bad():
# Structured numpy arrays aren't supported.
return np.array([], dtype=[("foo", np.float32)])
y, = script_ops.py_func(bad, [], [dtypes.float32])
with self.assertRaisesRegexp(errors.InternalError,
"Unsupported numpy data type"):
self.evaluate(y)
@test_util.run_v1_only("b/120545219")
def testBadReturnType(self):
with self.cached_session():
def bad():
# Non-string python objects aren't supported.
return {"foo": dtypes.float32}
z, = script_ops.py_func(bad, [], [dtypes.int64])
with self.assertRaisesRegexp(errors.InternalError,
"Unsupported object type"):
self.evaluate(z)
@test_util.run_v1_only("b/120545219")
def testReturnInput(self):
with self.cached_session():
def ident(x):
return x[0]
p = array_ops.placeholder(dtypes.float32)
# Create a numpy array aliasing a tensor and a tensor aliasing this array
z, = script_ops.py_func(ident, [p], [dtypes.float32])
z += 0.0 # Makes sure we release the tensor aliasing the numpy array x[0]
# above instead of using its memory as the return value of
# session.run
self.assertEqual(0.0, z.eval(feed_dict={p: [0.0]}))
def testStateful(self):
# Not using self.cached_session(), which disables optimization.
with session_lib.Session() as sess:
producer = iter(range(3))
x, = script_ops.py_func(lambda: next(producer), [], [dtypes.int64])
self.assertEqual(self.evaluate(x), 0)
self.assertEqual(self.evaluate(x), 1)
self.assertEqual(self.evaluate(x), 2)
@test_util.enable_tf_xla_constant_folding("b/134376434")
def testStateless(self):
# Not using self.cached_session(), which disables optimization.
with session_lib.Session() as sess:
producer = iter(range(3))
x, = script_ops.py_func(
lambda: next(producer), [], [dtypes.int64], stateful=False)
self.assertEqual(self.evaluate(x), 0)
self.assertEqual(self.evaluate(x), 0)
self.assertEqual(self.evaluate(x), 0)
@test_util.run_v1_only("b/120545219")
def testGradientFunction(self):
# Input to tf.compat.v1.py_func is necessary,
# otherwise get_gradient_function() returns None per default.
a = constant_op.constant(0)
x, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64])
y, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64], stateful=False)
self.assertEqual(None, ops.get_gradient_function(x.op))
self.assertEqual(None, ops.get_gradient_function(y.op))
@test_util.run_v1_only("b/120545219")
def testCOrder(self):
with self.cached_session():
val = [[1, 2], [3, 4]]
x, = script_ops.py_func(lambda: np.array(val, order="F"), [],
[dtypes.int64])
self.assertAllEqual(val, self.evaluate(x))
@test_util.run_v1_only("b/120545219")
def testParallel(self):
# Tests that tf.compat.v1.py_func's can run in parallel if they release
# the GIL.
with self.cached_session() as session:
q = queue.Queue(1)
def blocking_put():
q.put(42)
q.join() # Wait for task_done().
return 42
def blocking_get():
v = q.get(block=True) # Wait for put().
q.task_done()
return v
x, = script_ops.py_func(blocking_put, [], [dtypes.int64])
y, = script_ops.py_func(blocking_get, [], [dtypes.int64])
# This will result in a deadlock if the py_func's don't run in parallel.
session.run([x, y])
def testNoReturnValueStateful(self):
class State(object):
def __init__(self):
self._value = np.array([1], np.int64)
def _increment(self, diff):
self._value += diff
def increment(self, diff):
return script_ops.py_func(self._increment, [diff], [], stateful=True)
@property
def value(self):
return self._value
with self.cached_session():
s = State()
op = s.increment(constant_op.constant(2, dtypes.int64))
ret = self.evaluate(op)
self.assertIsNone(ret)
self.assertAllEqual([3], s.value)
@test_util.run_v1_only("b/120545219")
def testNoReturnValueStateless(self):
def do_nothing(unused_x):
pass
f = script_ops.py_func(
do_nothing, [constant_op.constant(3, dtypes.int64)], [], stateful=False)
with self.cached_session() as sess:
self.assertEqual(self.evaluate(f), [])
def _testExceptionHandling(self, py_exp, tf_exp, eager=False):
def inner_exception():
raise py_exp("blah") # pylint: disable=not-callable
def raise_exception():
inner_exception()
expected_regexp = r": blah.*" # Error at the top
expected_regexp += r"in raise_exception.*" # Stacktrace outer
expected_regexp += r"in inner_exception.*" # Stacktrace inner
expected_regexp += r": blah" # Stacktrace of raise
def expected_error_check(exception):
return re.search(expected_regexp, str(exception), re.DOTALL)
if eager:
if context.executing_eagerly():
with self.assertRaisesWithPredicateMatch(tf_exp, expected_error_check):
f = script_ops.eager_py_func(raise_exception, [], [])
return
else:
f = script_ops.eager_py_func(raise_exception, [], [])
else:
f = script_ops.py_func(raise_exception, [], [])
with self.assertRaisesWithPredicateMatch(tf_exp, expected_error_check):
self.evaluate(f)
@test_util.run_v1_only("b/120545219")
def testExceptionHandling(self):
with self.cached_session():
self._testExceptionHandling(ValueError, errors.InvalidArgumentError)
self._testExceptionHandling(TypeError, errors.InvalidArgumentError)
self._testExceptionHandling(StopIteration, errors.OutOfRangeError)
self._testExceptionHandling(MemoryError, errors.ResourceExhaustedError)
self._testExceptionHandling(NotImplementedError,
errors.UnimplementedError)
class WeirdError(Exception):
pass
self._testExceptionHandling(WeirdError, errors.UnknownError)
# ----- Tests shared by py_func and eager_py_func -----
def testCleanup(self):
# Delete everything created by previous tests to avoid side effects.
ops.reset_default_graph()
gc.collect()
initial_size = script_ops._py_funcs.size()
# Encapsulate the graph generation, so locals can be deleted.
def make_graphs():
for _ in xrange(1000):
g = ops.Graph()
with g.as_default():
c = constant_op.constant([1.], dtypes.float32)
_ = script_ops.py_func(lambda x: x + 1, [c], [dtypes.float32])
_ = script_ops.eager_py_func(lambda x: x + 1, [c], [dtypes.float32])
# These ops have a reference to 'c' which has a reference to the graph.
# Checks if the functions are being deleted though the graph is referenced from them.
# (see #18292)
_ = script_ops.py_func(lambda x: x + c.shape[0], [c], [dtypes.float32])
_ = script_ops.eager_py_func(lambda x: x + c.shape[0], [c], [dtypes.float32])
# Call garbage collector to enforce deletion.
make_graphs()
ops.reset_default_graph()
gc.collect()
self.assertEqual(initial_size, script_ops._py_funcs.size())
# ----- Tests for eager_py_func -----
@test_util.run_in_graph_and_eager_modes
def testEagerSingleOutputInt32(self):
a = array_ops.ones((3, 3), dtype=dtypes.int32)
x = array_ops.ones((3, 1), dtype=dtypes.int32)
output = script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.int32)
ret = self.evaluate(output)
self.assertAllEqual(ret, [[3], [3], [3]])
@test_util.run_in_graph_and_eager_modes
def testRenamedDeviceInTestClusterCorrectlyIdentifiedAsLocalhost(self):
if context.executing_eagerly():
self.skipTest("b/126565353: We don't test eager's remote execution.")
workers, _ = test_util.create_local_cluster(num_workers=1, num_ps=0)
worker = workers[0]
session = session_lib.Session(worker.target)
with ops.device("/job:worker/task:0/cpu:0"):
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
output = script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.float32)
ret = session.run(output)
self.assertAllClose(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes
def testEagerSingleOutputFloat32(self):
with test_util.device(use_gpu=True):
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
output = script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.float32)
ret = self.evaluate(output)
self.assertAllClose(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes
def testEagerArrayOutput(self):
with test_util.device(use_gpu=True):
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
output = script_ops.eager_py_func(
lambda a, x: [matmul(a, x)], inp=[a, x], Tout=[dtypes.float32])
ret = self.evaluate(output)
self.assertAllEqual(ret, [[[3.0], [3.0], [3.0]]])
@test_util.run_in_graph_and_eager_modes
def testEagerReturnNone(self):
with test_util.device(use_gpu=True):
def no_return_value():
return
output = script_ops.eager_py_func(no_return_value, inp=[], Tout=[])
ret = self.evaluate(output)
if context.executing_eagerly():
self.assertEquals(len(ret), 0)
else:
self.assertIsNone(ret)
@test_util.run_in_graph_and_eager_modes
def testEagerPyFuncInDefun(self):
with test_util.device(use_gpu=True):
def wrapper():
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
return script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.float32)
wrapped = function.defun(wrapper)
ret = self.evaluate(wrapped())
self.assertAllEqual(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testEagerExceptionHandling(self):
with test_util.device(use_gpu=True):
self._testExceptionHandling(
ValueError, errors.InvalidArgumentError, eager=True)
self._testExceptionHandling(
TypeError, errors.InvalidArgumentError, eager=True)
self._testExceptionHandling(
StopIteration, errors.OutOfRangeError, eager=True)
self._testExceptionHandling(
MemoryError, errors.ResourceExhaustedError, eager=True)
self._testExceptionHandling(
NotImplementedError, errors.UnimplementedError, eager=True)
class WeirdError(Exception):
pass
self._testExceptionHandling(WeirdError, errors.UnknownError, eager=True)
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testEagerReturningVariableRaisesError(self):
def return_variable():
return resource_variable_ops.ResourceVariable(0.0)
with self.assertRaisesRegexp(errors.UnknownError,
"Attempting to return a variable"):
output = script_ops.eager_py_func(
return_variable, inp=[], Tout=dtypes.float32)
self.evaluate(output)
@test_util.run_in_graph_and_eager_modes
def testEagerGradientTape(self):
def f(x):
return x**2
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(x)
y = script_ops.eager_py_func(f, inp=[x], Tout=dtypes.float32)
dy_dx = tape.gradient(y, x)
self.assertEqual(self.evaluate(dy_dx), 6.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraph(self):
def f(x):
return x**2
x = constant_op.constant(3.0)
y = script_ops.eager_py_func(f, inp=[x], Tout=dtypes.float32)
dy_dx = gradients_impl.gradients(y, x)[0]
self.assertEqual(self.evaluate(dy_dx), 6.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraphTwoOutputs(self):
def f(x, y):
return x * y, x / y
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
fa, fb = script_ops.eager_py_func(f, inp=[x, y],
Tout=[dtypes.float32, dtypes.float32])
dy_dx = gradients_impl.gradients(fa + fb, x)[0]
self.assertEqual(self.evaluate(dy_dx), 2.5)
@test_util.run_in_graph_and_eager_modes
def testEagerGradientTapeMultipleArgs(self):
def f(x, y):
return x**2 + y**2
x = constant_op.constant(3.0)
y = constant_op.constant(4.0)
with backprop.GradientTape() as tape:
tape.watch(x)
tape.watch(y)
z = script_ops.eager_py_func(f, inp=[x, y], Tout=dtypes.float32)
dz_dx, dz_dy = tape.gradient(z, [x, y])
self.assertEqual(self.evaluate(dz_dx), 6.0)
self.assertEqual(self.evaluate(dz_dy), 8.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraphMultipleArgs(self):
def f(x, y):
return x**2 + y**2
x = constant_op.constant(3.0)
y = constant_op.constant(4.0)
z = script_ops.eager_py_func(f, inp=[x, y], Tout=dtypes.float32)
dz_dx, dz_dy = gradients_impl.gradients(z, [x, y])
self.assertEqual(self.evaluate(dz_dx), 6.0)
self.assertEqual(self.evaluate(dz_dy), 8.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraphLogHuber(self):
def log_huber(x, m):
if math_ops.abs(x) <= m:
return x**2
else:
return m**2 * (1 - 2 * math_ops.log(m) + math_ops.log(x**2))
x = array_ops.placeholder(dtypes.float32)
m = array_ops.placeholder(dtypes.float32)
y = script_ops.eager_py_func(
func=log_huber, inp=[x, m], Tout=dtypes.float32)
dy_dx = gradients_impl.gradients(y, x)[0]
with self.cached_session() as sess:
# Takes the first branch of log_huber.
y, dy_dx = sess.run([y, dy_dx], feed_dict={x: 1.0, m: 2.0})
self.assertEqual(y, 1.0)
self.assertEqual(dy_dx, 2.0)
@test_util.run_v1_only("b/120545219")
def testEagerRespectsDevicePlacmentOfOp(self):
def f(x):
return math_ops.square(x)
def g(x):
return math_ops.add(x, x)
with ops.device("/CPU:0"):
# Explicitly ask for the py_funcs to execute on CPU, even if
# a GPU is available.
x = array_ops.placeholder(dtypes.float32)
y = script_ops.eager_py_func(func=f, inp=[x], Tout=dtypes.float32)
z = script_ops.eager_py_func(func=g, inp=[y], Tout=dtypes.float32)
with self.session(use_gpu=True) as sess:
output = sess.run(z, feed_dict={x: 3.0})
self.assertEqual(output, 18.0)
@test_util.run_in_graph_and_eager_modes
def testEagerPyFuncOnGPUWithStrings(self):
def fn(a):
return str(a.dtype)
x = constant_op.constant("x", dtype=dtypes.string)
output = script_ops.eager_py_func(fn, inp=[x], Tout=dtypes.string)
self.assertEqual(self.evaluate(output), "<dtype: 'string'>".encode("utf8"))
@test_util.run_in_graph_and_eager_modes
def testEagerPyFuncNotACallable(self):
x = constant_op.constant("x", dtype=dtypes.string)
with self.assertRaisesRegexp(ValueError, "callable"):
_ = script_ops.eager_py_func(x, inp=[x], Tout=dtypes.string)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/py_func_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv3DTransposeTest(test.TestCase):
def testConv3DTransposeSingleStride(self):
with self.cached_session():
strides = [1, 1, 1, 1, 1]
# Input, output: [batch, depth, height, width, channel]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 5, 6, 4, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
# We count the number of cells being added at the locations in the output.
# At the center, #cells = kernel_depth * kernel_height * kernel_width
# At the corners, #cells = ceil(kernel_depth/2) * ceil(kernel_height/2)
# * ceil(kernel_width/2)
# At the edges, #cells =
# kernel_depth * ceil(kernel_height/2) * ceil(kernel_width/2) or
# ceil(kernel_depth/2) * kernel_height * ceil(kernel_width/2) or
# ceil(kernel_depth/2) * ceil(kernel_height/2) * kernel_width
# At the borders, #cells =
# ceil(kernel_depth/2) * kernel_height * kernel_width or
# kernel_depth * ceil(kernel_height/2) * kernel_width or
# kernel_depth * kernel_height * ceil(kernel_width/2)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
for d in xrange(y_shape[1]):
d_in = d > 0 and d < y_shape[1] - 1
h_in = h > 0 and h < y_shape[2] - 1
w_in = w > 0 and w < y_shape[3] - 1
if d_in + h_in + w_in == 3:
target = 27 * 3.0
elif d_in + h_in + w_in == 2:
target = 18 * 3.0
elif d_in or h_in or w_in:
target = 12 * 3.0
else:
target = 8 * 3.0
self.assertAllClose(target, value[n, d, h, w, k])
def testConv3DTransposeSame(self):
with self.cached_session():
strides = [1, 2, 2, 2, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 10, 12, 8, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
for d in xrange(y_shape[1]):
# We add a case for locations divisible by the stride.
d_in = d % strides[1] == 0 and 0 < d < y_shape[1] - 1
h_in = h % strides[2] == 0 and 0 < h < y_shape[2] - 1
w_in = w % strides[3] == 0 and 0 < w < y_shape[3] - 1
if d_in + h_in + w_in == 3:
target = 8 * 3.0
elif d_in + h_in + w_in == 2:
target = 4 * 3.0
elif d_in or h_in or w_in:
target = 2 * 3.0
else:
target = 3.0
self.assertAllClose(target, value[n, d, h, w, k])
@test_util.run_deprecated_v1
def testConv3DTransposeShapeMismatch(self):
# Test case for GitHub issue 18460
x_shape = [2, 2, 3, 4, 3]
f_shape = [3, 3, 3, 2, 2]
y_shape = [2, 2, 6, 8, 6]
strides = [1, 1, 2, 2, 2]
np.random.seed(1)
x_value = np.random.random_sample(x_shape).astype(np.float64)
f_value = np.random.random_sample(f_shape).astype(np.float64)
nn_ops.conv3d_transpose(
x_value, f_value, y_shape, strides, data_format='NCDHW')
def testConv3DTransposeOutputShapeType(self):
# Test case for GitHub issue 18887
for dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session():
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 5, 6, 4, 2]
f_shape = [3, 3, 3, 2, 3]
strides = [1, 1, 1, 1, 1]
x_value = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f_value = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x_value, f_value, constant_op.constant(y_shape, dtype=dtype),
strides=strides, padding="SAME")
self.evaluate(output)
def testConv3DTransposeValid(self):
with self.cached_session():
strides = [1, 2, 2, 2, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 11, 13, 9, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
for d in xrange(y_shape[1]):
# We add a case for locations divisible by the stride.
d_in = d % strides[1] == 0 and pad < d < y_shape[1] - 1 - pad
h_in = h % strides[2] == 0 and pad < h < y_shape[2] - 1 - pad
w_in = w % strides[3] == 0 and pad < w < y_shape[3] - 1 - pad
if d_in + h_in + w_in == 3:
target = 8 * 3.0
elif d_in + h_in + w_in == 2:
target = 4 * 3.0
elif d_in or h_in or w_in:
target = 2 * 3.0
else:
target = 3.0
cache_values[n, d, h, w, k] = target
# copy values in the border
cache_values[n, :, :, 0, k] = cache_values[n, :, :, 1, k]
cache_values[n, :, :, -1, k] = cache_values[n, :, :, -2, k]
cache_values[n, :, 0, :, k] = cache_values[n, :, 1, :, k]
cache_values[n, :, -1, :, k] = cache_values[n, :, -2, :, k]
cache_values[n, 0, :, :, k] = cache_values[n, 1, :, :, k]
cache_values[n, -1, :, :, k] = cache_values[n, -2, :, :, k]
self.assertAllClose(cache_values, value)
@test_util.run_deprecated_v1
def testGradient(self):
x_shape = [2, 3, 4, 3, 2]
f_shape = [3, 3, 3, 2, 2]
y_shape = [2, 6, 8, 6, 2]
strides = [1, 2, 2, 2, 1]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.cached_session():
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
print("conv3d_transpose gradient err = %g " % err)
err_tolerance = 0.0005
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/conv3d_transpose_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for UnicodeEncode op from ragged_string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl as errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import test
class UnicodeEncodeOpTest(test.TestCase, parameterized.TestCase):
def assertAllEqual(self, rt, expected):
with self.cached_session() as sess:
value = sess.run(rt)
if isinstance(value, np.ndarray):
value = value.tolist()
elif isinstance(value, ragged_tensor_value.RaggedTensorValue):
value = value.to_list()
self.assertEqual(value, expected)
def testScalar(self):
with self.cached_session():
with self.assertRaises(ValueError):
ragged_string_ops.unicode_encode(72, "UTF-8")
with self.cached_session():
with self.assertRaises(ValueError):
ragged_string_ops.unicode_encode(constant_op.constant(72), "UTF-8")
def testRequireParams(self):
with self.cached_session():
with self.assertRaises(TypeError):
ragged_string_ops.unicode_encode()
with self.cached_session():
with self.assertRaises(TypeError):
ragged_string_ops.unicode_encode(72)
with self.cached_session():
with self.assertRaises(TypeError):
ragged_string_ops.unicode_encode(encoding="UTF-8")
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
def testStrictErrors(self, encoding):
test_value = np.array([72, 101, 2147483647, -1, 111], np.int32)
with self.cached_session() as session:
with self.assertRaises(errors.InvalidArgumentError):
session.run(
ragged_string_ops.unicode_encode(test_value, encoding, "strict"))
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testIgnoreErrors(self, encoding):
test_value = np.array([72, 101, 2147483647, -1, 111], np.int32)
expected_value = u"Heo".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"ignore")
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testReplaceErrors(self, encoding):
test_value = np.array([72, 101, 2147483647, -1, 111], np.int32)
expected_value = u"He\U0000fffd\U0000fffdo".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"replace")
self.assertAllEqual(unicode_encode_op, expected_value)
# Test custom replacement character
test_value = np.array([72, 101, 2147483647, -1, 111], np.int32)
expected_value = u"Heooo".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"replace", 111)
self.assertAllEqual(unicode_encode_op, expected_value)
# Verify "replace" is default
test_value = np.array([72, 101, 2147483647, -1, 111], np.int32)
expected_value = u"He\U0000fffd\U0000fffdo".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
# Replacement_char must be within range
test_value = np.array([72, 101, 2147483647, -1, 111], np.int32)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"replace", 1114112)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(unicode_encode_op)
# -- regular Tensor tests -- #
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testVector(self, encoding):
test_value = np.array([72, 101, 108, 108, 111], np.int32)
expected_value = u"Hello".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
test_value = np.array([72, 101, 195, 195, 128516], np.int32)
expected_value = u"He\xc3\xc3\U0001f604".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
# Single character string
test_value = np.array([72], np.int32)
expected_value = u"H".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
test_value = np.array([128516], np.int32)
expected_value = u"\U0001f604".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testMatrix(self, encoding):
test_value = np.array(
[[72, 128516, 108, 108, 111], [87, 128516, 114, 108, 100]], np.int32)
expected_value = [
u"H\U0001f604llo".encode(encoding), u"W\U0001f604rld".encode(encoding)
]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test3DimMatrix(self, encoding):
test_value = constant_op.constant(
[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100]],
[[102, 105, 120, 101, 100], [119, 111, 114, 100, 115]],
[[72, 121, 112, 101, 114], [99, 117, 98, 101, 46]]], np.int32)
expected_value = [[u"Hello".encode(encoding), u"World".encode(encoding)],
[u"fixed".encode(encoding), u"words".encode(encoding)],
[u"Hyper".encode(encoding), u"cube.".encode(encoding)]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test4DimMatrix(self, encoding):
test_value = constant_op.constant(
[[[[72, 101, 108, 108, 111]], [[87, 111, 114, 108, 100]]],
[[[102, 105, 120, 101, 100]], [[119, 111, 114, 100, 115]]],
[[[72, 121, 112, 101, 114]], [[99, 117, 98, 101, 46]]]], np.int32)
expected_value = [[[u"Hello".encode(encoding)],
[u"World".encode(encoding)]],
[[u"fixed".encode(encoding)],
[u"words".encode(encoding)]],
[[u"Hyper".encode(encoding)],
[u"cube.".encode(encoding)]]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
# -- Ragged Tensor tests -- #
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testRaggedMatrix(self, encoding):
test_value = ragged_factory_ops.constant(
[[72, 195, 108, 108, 111], [87, 128516, 114, 108, 100, 46]], np.int32)
expected_value = [
u"H\xc3llo".encode(encoding), u"W\U0001f604rld.".encode(encoding)
]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test3DimMatrixWithRagged2ndDim(self, encoding):
test_value = ragged_factory_ops.constant(
[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100]],
[[102, 105, 120, 101, 100]],
[[72, 121, 112, 101, 114], [119, 111, 114, 100, 115],
[99, 117, 98, 101, 46]]], np.int32)
expected_value = [[u"Hello".encode(encoding), u"World".encode(encoding)],
[u"fixed".encode(encoding)],
[
u"Hyper".encode(encoding), u"words".encode(encoding),
u"cube.".encode(encoding)
]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test3DimMatrixWithRagged3rdDim(self, encoding):
test_value = ragged_factory_ops.constant(
[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100, 46]],
[[68, 111, 110, 39, 116], [119, 195, 114, 114, 121, 44, 32, 98, 101]],
[[128516], []]], np.int32)
expected_value = [[u"Hello".encode(encoding), u"World.".encode(encoding)],
[
u"Don't".encode(encoding),
u"w\xc3rry, be".encode(encoding)
], [u"\U0001f604".encode(encoding), u"".encode(encoding)]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test3DimMatrixWithRagged2ndAnd3rdDim(self, encoding):
test_value = ragged_factory_ops.constant(
[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100, 46]], [],
[[128516]]], np.int32)
expected_value = [[u"Hello".encode(encoding), u"World.".encode(encoding)],
[], [u"\U0001f604".encode(encoding)]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test4DimRaggedMatrix(self, encoding):
test_value = ragged_factory_ops.constant(
[[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100]]],
[[[]], [[72, 121, 112, 101]]]], np.int32)
expected_value = [[[u"Hello".encode(encoding), u"World".encode(encoding)]],
[[u"".encode(encoding)], [u"Hype".encode(encoding)]]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testRaggedMatrixWithMultiDimensionInnerValues(self, encoding):
test_flat_values = constant_op.constant([[[72, 101, 108, 108, 111],
[87, 111, 114, 108, 100]],
[[102, 105, 120, 101, 100],
[119, 111, 114, 100, 115]],
[[72, 121, 112, 101, 114],
[99, 117, 98, 101, 46]]])
test_row_splits = [
constant_op.constant([0, 2, 3], dtype=np.int64),
constant_op.constant([0, 1, 1, 3], dtype=np.int64)
]
test_value = ragged_tensor.RaggedTensor.from_nested_row_splits(
test_flat_values, test_row_splits)
expected_value = [[[[u"Hello".encode(encoding), u"World".encode(encoding)]],
[]],
[[[u"fixed".encode(encoding), u"words".encode(encoding)],
[u"Hyper".encode(encoding),
u"cube.".encode(encoding)]]]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/unicode_encode_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for decode_image."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
prefix_path = "tensorflow/core/lib"
class DecodeImageOpTest(test.TestCase):
def testBmp(self):
# Read a real bmp and verify shape
path = os.path.join(prefix_path, "bmp", "testdata", "lena.bmp")
with self.session(use_gpu=True) as sess:
bmp0 = io_ops.read_file(path)
image0 = image_ops.decode_image(bmp0)
image1 = image_ops.decode_bmp(bmp0)
bmp0, image0, image1 = self.evaluate([bmp0, image0, image1])
self.assertEqual(len(bmp0), 4194)
self.assertAllEqual(image0, image1)
@test_util.run_deprecated_v1
def testGif(self):
# Read some real GIFs
path = os.path.join(prefix_path, "gif", "testdata", "scan.gif")
width = 20
height = 40
stride = 5
shape = (12, height, width, 3)
with self.session(use_gpu=True) as sess:
gif0 = io_ops.read_file(path)
image0 = image_ops.decode_image(gif0)
image1 = image_ops.decode_gif(gif0)
gif0, image0, image1 = self.evaluate([gif0, image0, image1])
self.assertEqual(image0.shape, shape)
self.assertAllEqual(image0, image1)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * stride
end = (frame_idx + 1) * stride
if end <= width:
gt[:, start:end, :] = 255
else:
start -= width
end -= width
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
bad_channels = image_ops.decode_image(gif0, channels=1)
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(bad_channels)
@test_util.run_deprecated_v1
def testJpeg(self):
# Read a real jpeg and verify shape
path = os.path.join(prefix_path, "jpeg", "testdata", "jpeg_merge_test1.jpg")
with self.session(use_gpu=True) as sess:
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_image(jpeg0)
image1 = image_ops.decode_jpeg(jpeg0)
jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertAllEqual(image0, image1)
bad_channels = image_ops.decode_image(jpeg0, channels=4)
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(bad_channels)
def testPng(self):
# Read some real PNGs, converting to different channel numbers
inputs = [(1, "lena_gray.png")]
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.cached_session(use_gpu=True) as sess:
path = os.path.join(prefix_path, "png", "testdata", filename)
png0 = io_ops.read_file(path)
image0 = image_ops.decode_image(png0, channels=channels)
image1 = image_ops.decode_png(png0, channels=channels)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
self.assertAllEqual(image0, image1)
@test_util.run_deprecated_v1
def testInvalidBytes(self):
image_bytes = b"ThisIsNotAnImage!"
decode = image_ops.decode_image(image_bytes)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(decode)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/decode_image_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PaddingFIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("PaddingFIFOQueue removed from v2")
class PaddingFIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((None,),), name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list { shape { dim { size: -1 } } } } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32), ((), ()),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list { shape { } shape { } } } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((3, 2),))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(3, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32),
((), ()))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = self.evaluate(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, (
(None, None),))
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], self.evaluate(size_t))
enqueue_op.run()
self.assertEqual([0], self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, shapes=((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithDynamicShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpToWithDynamicShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testConstructPaddingFIFOQueueWithNoShape(self):
with self.cached_session():
with self.assertRaisesRegexp(
ValueError,
r"When providing partial shapes, a list of shapes must be provided."):
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32,
None).queue_ref.eval()
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.float32, dtypes_lib.int32),
((), (2,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testMultiEnqueueManyWithPartiallyKnownShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueManyWithPartiallyKnownShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueManyWithPartiallyKnownShapesAndVariableSizeInput(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_many(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueUpToPartiallyKnownShapesAndVariableInputNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_up_to(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, ((4, 4, 4, 4),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testPartiallyKnownHighDimension(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, (
(4, None, 4, None),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32),
((), (2,)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongPartiallyKnownShapeAtRuntime(self):
with self.cached_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[\?,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongPartiallyKnownShape(self):
with self.cached_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,\?,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
self.evaluate(dequeued_t)
def testParallelEnqueueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(50, dtypes_lib.float32, shapes=((),))
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
self.evaluate(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(self.evaluate(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
self.evaluate(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, self.evaluate(dequeued_t))
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.PaddingFIFOQueue(100, dtypes_lib.int32, ((),))
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.PaddingFIFOQueue(total_count, dtypes_lib.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
self.assertAllEqual(elems[3:], self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
self.evaluate(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], self.evaluate(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeued_t)
self.assertEqual(elems[3], self.evaluate(cleanup_dequeue_t))
def close():
self.evaluate(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, (dtypes_lib.float32,
dtypes_lib.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = self.evaluate([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
self.assertEqual([50.0], self.evaluate(dequeued_t))
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
time.sleep(0.01)
self.assertEqual([50.0], self.evaluate(dequeued_t))
self.assertEqual([60.0], self.evaluate(dequeued_t))
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
def testDoesNotLoseValue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(1, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_2 = data_flow_ops.PaddingFIFOQueue(
15, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_b")
q_b_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.int32, ((),), shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_c")
q_c_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_f")
q_f_2 = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), ((), ()),
shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = []
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),)))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.PaddingFIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
q2 = data_flow_ops.PaddingFIFOQueue(15, dtypes_lib.float32, ((),))
enq_q = data_flow_ops.PaddingFIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op)
@test_util.run_deprecated_v1
def testResetOfBlockingOperation(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q_empty = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.bool, dtypes_lib.complex64, dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = self.evaluate(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testUnknownRank(self):
with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
data_flow_ops.PaddingFIFOQueue(32, [dtypes_lib.float32],
[tensor_shape.TensorShape(None)])
class QueueFromListTest(test.TestCase):
def testQueueFromListShapes(self):
which = constant_op.constant(1)
def _cmp(expected, *shapes):
qs = [
data_flow_ops.PaddingFIFOQueue(10, [dtypes_lib.float32],
[tensor_shape.TensorShape(s)])
for s in shapes
]
s_expected = tensor_shape.TensorShape(expected)
s = data_flow_ops.QueueBase.from_list(which, qs).shapes[0]
if s_expected.ndims is None:
self.assertEqual(s_expected.ndims, s.ndims)
else:
self.assertEqual(s_expected.as_list(), s.as_list())
_cmp(None, [1, None], [None])
_cmp([None], [1], [2])
_cmp([1, None], [1, 1], [1, 2])
_cmp([1, None], [1, 1], [1, None])
_cmp([None, None], [None, 1], [1, None])
_cmp([1], [1], [1], [1])
_cmp([None], [1], [None], [1])
_cmp(None, [1, None], [1], [1])
def testQueueFromListShapesMultipleComponents(self):
q_u_u = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None])])
q_u_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([1, 2])])
q_f_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([3, 4]), tensor_shape.TensorShape([1, 2])])
which = constant_op.constant(1)
s_cmp_1 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_u]).shapes
self.assertEqual([1, 1], [x.ndims for x in s_cmp_1])
self.assertEqual([None, None], [x.as_list()[0] for x in s_cmp_1])
s_cmp_2 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_f]).shapes
self.assertEqual([1, None], [x.ndims for x in s_cmp_2])
self.assertEqual([None], s_cmp_2[0].as_list())
s_cmp_3 = data_flow_ops.QueueBase.from_list(which, [q_f_f, q_f_f]).shapes
self.assertEqual([2, 2], [x.ndims for x in s_cmp_3])
self.assertEqual([[3, 4], [1, 2]], [x.as_list() for x in s_cmp_3])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/padding_fifo_queue_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for SpaceToBatch and BatchToSpace ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def space_to_batch_direct(input_array, block_shape, paddings):
"""Direct Python implementation of space-to-batch conversion.
This is used for tests only.
Args:
input_array: N-D array
block_shape: 1-D array of shape [num_block_dims].
paddings: 2-D array of shape [num_block_dims, 2].
Returns:
Converted tensor.
"""
input_array = np.array(input_array)
block_shape = np.array(block_shape)
num_block_dims = len(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
padded = np.pad(input_array,
pad_width=([[0, 0]] + list(paddings) + [[0, 0]] *
(input_array.ndim - 1 - num_block_dims)),
mode="constant")
reshaped_padded_shape = [input_array.shape[0]]
output_shape = [input_array.shape[0] * np.prod(block_shape)]
for block_dim, block_shape_value in enumerate(block_shape):
reduced_size = padded.shape[block_dim + 1] // block_shape_value
reshaped_padded_shape.append(reduced_size)
output_shape.append(reduced_size)
reshaped_padded_shape.append(block_shape_value)
reshaped_padded_shape.extend(input_array.shape[num_block_dims + 1:])
output_shape.extend(input_array.shape[num_block_dims + 1:])
reshaped_padded = padded.reshape(reshaped_padded_shape)
permuted_reshaped_padded = np.transpose(reshaped_padded, (
list(np.arange(num_block_dims) * 2 + 2) + [0] +
list(np.arange(num_block_dims) * 2 + 1) + list(
np.arange(input_array.ndim - num_block_dims - 1) + 1 + num_block_dims
* 2)))
return permuted_reshaped_padded.reshape(output_shape)
class PythonOpImpl(object):
@staticmethod
def space_to_batch(*args, **kwargs):
return array_ops.space_to_batch(*args, **kwargs)
@staticmethod
def batch_to_space(*args, **kwargs):
return array_ops.batch_to_space(*args, **kwargs)
class CppOpImpl(object):
@staticmethod
def space_to_batch(*args, **kwargs):
return gen_array_ops.space_to_batch(*args, **kwargs)
@staticmethod
def batch_to_space(*args, **kwargs):
return gen_array_ops.batch_to_space(*args, **kwargs)
class SpaceToBatchTest(test.TestCase, PythonOpImpl):
"""Tests input-output pairs for the SpaceToBatch and BatchToSpace ops.
This uses the Python compatibility wrapper that forwards to space_to_batch_nd.
"""
def _testPad(self, inputs, paddings, block_size, outputs):
with self.cached_session(use_gpu=True):
# outputs = space_to_batch(inputs)
x_tf = self.space_to_batch(
math_ops.cast(inputs, dtypes.float32),
paddings,
block_size=block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# inputs = batch_to_space(outputs)
x_tf = self.batch_to_space(
math_ops.cast(outputs, dtypes.float32),
paddings,
block_size=block_size)
self.assertAllEqual(x_tf.eval(), inputs)
def _testOne(self, inputs, block_size, outputs):
paddings = np.zeros((2, 2), dtype=np.int32)
self._testPad(inputs, paddings, block_size, outputs)
# [1, 2, 2, 1] <-> [4, 1, 1, 1]
@test_util.run_deprecated_v1
def testSmallInput2x2(self):
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
self._testOne(x_np, block_size, x_out)
# [1, 2, 2, 1] <-> [1, 3, 3, 1] (padding) <-> [9, 1, 1, 1]
@test_util.run_deprecated_v1
def testSmallInput2x2Pad1x0(self):
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.array([[1, 0], [1, 0]], dtype=np.int32)
block_size = 3
x_out = [[[[0]]], [[[0]]], [[[0]]], [[[0]]], [[[1]]], [[[2]]], [[[0]]],
[[[3]]], [[[4]]]]
self._testPad(x_np, paddings, block_size, x_out)
# Test with depth larger than 1.
# [1, 2, 2, 3] <-> [4, 1, 1, 3]
@test_util.run_deprecated_v1
def testDepthInput2x2(self):
x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Test for larger input dimensions.
# [1, 4, 4, 1] <-> [4, 2, 2, 1]
@test_util.run_deprecated_v1
def testLargerInput2x2(self):
x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]],
[[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
block_size = 2
x_out = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]]
self._testOne(x_np, block_size, x_out)
# Test with batch larger than 1.
# [2, 2, 4, 1] <-> [8, 1, 2, 1]
@test_util.run_deprecated_v1
def testBatchInput2x2(self):
x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]],
[[[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
block_size = 2
x_out = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input spatial dimensions AND batch larger than 1, to ensure
# that elements are correctly laid out spatially and properly interleaved
# along the batch dimension.
# [2, 4, 4, 1] <-> [8, 2, 2, 1]
@test_util.run_deprecated_v1
def testLargerInputBatch2x2(self):
x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]],
[[9], [10], [11], [12]], [[13], [14], [15], [16]]],
[[[17], [18], [19], [20]], [[21], [22], [23], [24]],
[[25], [26], [27], [28]], [[29], [30], [31], [32]]]]
x_out = [[[[1], [3]], [[9], [11]]], [[[17], [19]], [[25], [27]]],
[[[2], [4]], [[10], [12]]], [[[18], [20]], [[26], [28]]],
[[[5], [7]], [[13], [15]]], [[[21], [23]], [[29], [31]]],
[[[6], [8]], [[14], [16]]], [[[22], [24]], [[30], [32]]]]
block_size = 2
self._testOne(x_np, block_size, x_out)
class SpaceToBatchCppTest(SpaceToBatchTest, CppOpImpl):
"""Tests input-output pairs for the SpaceToBatch and BatchToSpace ops.
This uses the C++ ops.
"""
pass
class SpaceToBatchNDTest(test.TestCase):
"""Tests input-output pairs for the SpaceToBatchND and BatchToSpaceND ops."""
def _testPad(self, inputs, block_shape, paddings, outputs):
block_shape = np.array(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
# outputs = space_to_batch(inputs)
x_tf = array_ops.space_to_batch_nd(
math_ops.cast(inputs, dtypes.float32), block_shape, paddings)
self.assertAllEqual(x_tf.eval(), outputs)
# inputs = batch_to_space(outputs)
x_tf = array_ops.batch_to_space_nd(
math_ops.cast(outputs, dtypes.float32), block_shape, paddings)
self.assertAllEqual(x_tf.eval(), inputs)
def _testDirect(self, input_shape, block_shape, paddings):
inputs = np.arange(np.prod(input_shape), dtype=np.float32)
inputs = inputs.reshape(input_shape)
self._testPad(inputs, block_shape, paddings,
space_to_batch_direct(inputs, block_shape, paddings))
@test_util.run_deprecated_v1
def testZeroBlockDimsZeroRemainingDims(self):
self._testPad(
inputs=[1, 2],
block_shape=[],
paddings=[],
outputs=[1, 2],)
@test_util.run_deprecated_v1
def testZeroBlockDimsOneRemainingDim(self):
self._testPad(
inputs=[[1, 2], [3, 4]],
block_shape=[],
paddings=[],
outputs=[[1, 2], [3, 4]])
# Same thing, but with a no-op block dim.
self._testPad(
inputs=[[1, 2], [3, 4]],
block_shape=[1],
paddings=[[0, 0]],
outputs=[[1, 2], [3, 4]])
@test_util.run_deprecated_v1
def testZeroBlockDimsTwoRemainingDims(self):
self._testPad(
inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
block_shape=[],
paddings=[],
outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# Same thing, but with a no-op block dim.
self._testPad(
inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
block_shape=[1],
paddings=[[0, 0]],
outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# Same thing, but with two no-op block dims.
self._testPad(
inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
block_shape=[1, 1],
paddings=[[0, 0], [0, 0]],
outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
@test_util.run_deprecated_v1
def testOneBlockDimZeroRemainingDims(self):
self._testPad(
inputs=[[1, 2, 3], [4, 5, 6]],
block_shape=[2],
paddings=[1, 0],
outputs=[[0, 2], [0, 5], [1, 3], [4, 6]])
@test_util.run_deprecated_v1
def testOneBlockDimOneRemainingDim(self):
self._testPad(
inputs=[[[1, 11], [2, 21], [3, 31]], [[4, 41], [5, 51], [6, 61]]],
block_shape=[2],
paddings=[1, 0],
outputs=[[[0, 0], [2, 21]], [[0, 0], [5, 51]], [[1, 11], [3, 31]],
[[4, 41], [6, 61]]])
@test_util.run_deprecated_v1
def testDirect(self):
# Test with zero-size remaining dimension.
self._testDirect(
input_shape=[3, 1, 2, 0], block_shape=[3], paddings=[[0, 2]])
# Test with zero-size blocked dimension.
self._testDirect(
input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[0, 0]])
# Test with padding up from zero size.
self._testDirect(
input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[1, 2]])
self._testDirect(
input_shape=[3, 3, 4, 5, 2],
block_shape=[3, 4, 2],
paddings=[[1, 2], [0, 0], [3, 0]])
self._testDirect(
input_shape=[3, 3, 4, 5, 2],
block_shape=[3, 4, 2, 2],
paddings=[[1, 2], [0, 0], [3, 0], [0, 0]])
self._testDirect(
input_shape=[3, 2, 2, 3, 4, 5, 2, 5],
block_shape=[1, 1, 3, 4, 2, 2],
paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0]])
self._testDirect(
input_shape=[3, 2, 2, 3, 4, 5, 2, 5],
block_shape=[1, 1, 3, 4, 2, 2, 1],
paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0], [0, 0]])
class SpaceToBatchSpaceToDepth(test.TestCase, PythonOpImpl):
# Verifies that: space_to_batch(x) = transpose(space_to_depth(transpose(x)))
@test_util.run_deprecated_v1
def testSpaceToDepthTranspose(self):
x = np.arange(5 * 10 * 16 * 7, dtype=np.float32).reshape([5, 10, 16, 7])
block_size = 2
paddings = np.zeros((2, 2), dtype=np.int32)
y1 = self.space_to_batch(x, paddings, block_size=block_size)
y2 = array_ops.transpose(
array_ops.space_to_depth(
array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
[3, 1, 2, 0])
with self.session(use_gpu=True):
self.assertAllEqual(y1.eval(), y2.eval())
class SpaceToBatchSpaceToDepthCpp(SpaceToBatchSpaceToDepth, CppOpImpl):
pass
class SpaceToBatchErrorHandlingTest(test.TestCase, PythonOpImpl):
@test_util.run_deprecated_v1
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 2
with self.assertRaises(ValueError):
_ = self.space_to_batch(x_np, paddings, block_size)
@test_util.run_deprecated_v1
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 0
with self.assertRaises(ValueError):
out_tf = self.space_to_batch(x_np, paddings, block_size)
out_tf.eval()
@test_util.run_deprecated_v1
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 1
with self.assertRaises(ValueError):
out_tf = self.space_to_batch(x_np, paddings, block_size)
out_tf.eval()
@test_util.run_deprecated_v1
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 10
with self.assertRaises(ValueError):
out_tf = self.space_to_batch(x_np, paddings, block_size)
out_tf.eval()
@test_util.run_deprecated_v1
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = self.space_to_batch(x_np, paddings, block_size)
@test_util.run_deprecated_v1
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = self.space_to_batch(x_np, paddings, block_size)
@test_util.run_deprecated_v1
def testBlockSizeNotDivisibleBoth(self):
# The block size does not divide neither width or height.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = self.space_to_batch(x_np, paddings, block_size)
@test_util.run_deprecated_v1
def testUnknownShape(self):
t = self.space_to_batch(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
block_size=4)
self.assertEqual(4, t.get_shape().ndims)
class SpaceToBatchErrorHandlingCppTest(SpaceToBatchErrorHandlingTest,
CppOpImpl):
pass
class SpaceToBatchNDErrorHandlingTest(test.TestCase):
def _testStaticShape(self, input_shape, block_shape, paddings, error):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes known at graph construction time.
with self.assertRaises(error):
_ = array_ops.space_to_batch_nd(
np.zeros(input_shape, np.float32), block_shape, paddings)
def _testDynamicShape(self, input_shape, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes unknown at graph construction time.
input_placeholder = array_ops.placeholder(dtypes.float32)
block_shape_placeholder = array_ops.placeholder(
dtypes.int32, shape=block_shape.shape)
paddings_placeholder = array_ops.placeholder(dtypes.int32)
t = array_ops.space_to_batch_nd(input_placeholder, block_shape_placeholder,
paddings_placeholder)
with self.assertRaises(ValueError):
_ = t.eval({
input_placeholder: np.zeros(input_shape, np.float32),
block_shape_placeholder: block_shape,
paddings_placeholder: paddings
})
def _testShape(self, input_shape, block_shape, paddings, error):
self._testStaticShape(input_shape, block_shape, paddings, error)
self._testDynamicShape(input_shape, block_shape, paddings)
@test_util.run_deprecated_v1
def testBlockSize0(self):
# The block size is 0.
self._testShape([1, 2, 2], [0, 2], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockSizeNegative(self):
self._testShape([1, 2, 2], [-1, 2], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testNegativePadding(self):
# The padding is negative.
self._testShape([1, 2, 2], [1, 1], [[0, -1], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockSizeNotDivisible(self):
# The padded size is not divisible by the block size.
self._testShape([1, 2, 3, 1], [3, 3], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockDimsMismatch(self):
# Shape of block_shape does not match shape of paddings.
self._testStaticShape([1, 3, 3, 1], [3, 3], [[0, 0]], ValueError)
@test_util.run_deprecated_v1
def testUnknown(self):
# Verify that input shape and paddings shape can be unknown.
_ = array_ops.space_to_batch_nd(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
# Only number of input dimensions is known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, None)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual(4, t.get_shape().ndims)
# Dimensions are partially known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, 2)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(3, None, None, 2)), [2, 3],
array_ops.placeholder(dtypes.int32))
self.assertEqual([3 * 2 * 3, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(3, None, 2, 2)), [2, 3], [[1, 1], [0, 1]])
self.assertEqual([3 * 2 * 3, None, 1, 2], t.get_shape().as_list())
# Dimensions are fully known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(3, 2, 3, 2)), [2, 3], [[1, 1], [0, 0]])
self.assertEqual([3 * 2 * 3, 2, 1, 2], t.get_shape().as_list())
class SpaceToBatchGradientTest(test.TestCase, PythonOpImpl):
# Check the gradients.
def _checkGrad(self, x, paddings, block_size):
assert 4 == x.ndim
with self.cached_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
tf_y = self.space_to_batch(tf_x, paddings, block_size)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for space_to_batch of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size, pad_beg, pad_end):
block_size_sq = block_size * block_size
x = np.random.normal(0, 1, b * h * w * d *
block_size_sq).astype(np.float32).reshape(
[b, h * block_size, w * block_size, d])
paddings = np.array(
[[pad_beg, pad_end], [pad_beg, pad_end]], dtype=np.int32)
self._checkGrad(x, paddings, block_size)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
block_size = 2
pad_beg = 0
pad_end = 0
self._compare(1, 2, 3, 5, block_size, pad_beg, pad_end)
@test_util.run_deprecated_v1
def testSmall2(self):
block_size = 2
pad_beg = 0
pad_end = 0
self._compare(2, 4, 3, 2, block_size, pad_beg, pad_end)
@test_util.run_deprecated_v1
def testSmallPad1x1(self):
block_size = 2
pad_beg = 1
pad_end = 1
self._compare(1, 2, 3, 5, block_size, pad_beg, pad_end)
class SpaceToBatchGradientCppTest(SpaceToBatchGradientTest, CppOpImpl):
pass
class SpaceToBatchNDGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
with self.cached_session():
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.space_to_batch_nd(tf_x, block_shape, paddings)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
def _compare(self, input_shape, block_shape, paddings):
x = np.random.normal(
0, 1, np.prod(input_shape)).astype(np.float32).reshape(input_shape)
self._checkGrad(x, block_shape, paddings)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
self._compare([1, 4, 6, 5], [2, 2], [[0, 0], [0, 0]])
@test_util.run_deprecated_v1
def testSmall2(self):
self._compare([2, 8, 6, 2], [2, 2], [[0, 0], [0, 0]])
@test_util.run_deprecated_v1
def testSmallPad1(self):
self._compare([2, 4, 6, 2], [2, 2], [[1, 1], [1, 1]])
@test_util.run_deprecated_v1
def testSmallPadThreeBlockDims(self):
self._compare([2, 2, 4, 3, 2], [2, 2, 2], [[1, 1], [1, 1], [1, 0]])
class RequiredSpaceToBatchPaddingsTest(test.TestCase):
def _checkProperties(self, input_shape, block_shape, base_paddings, paddings,
crops):
"""Checks that `paddings` and `crops` satisfy invariants."""
num_block_dims = len(block_shape)
self.assertEqual(len(input_shape), num_block_dims)
if base_paddings is None:
base_paddings = np.zeros((num_block_dims, 2), np.int32)
self.assertEqual(base_paddings.shape, (num_block_dims, 2))
self.assertEqual(paddings.shape, (num_block_dims, 2))
self.assertEqual(crops.shape, (num_block_dims, 2))
for i in range(num_block_dims):
self.assertEqual(paddings[i, 0], base_paddings[i, 0])
self.assertLessEqual(0, paddings[i, 1] - base_paddings[i, 1])
self.assertLess(paddings[i, 1] - base_paddings[i, 1], block_shape[i])
self.assertEqual(
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i],
0)
self.assertEqual(crops[i, 0], 0)
self.assertEqual(crops[i, 1], paddings[i, 1] - base_paddings[i, 1])
def _test(self, input_shape, block_shape, base_paddings):
input_shape = np.array(input_shape)
block_shape = np.array(block_shape)
if base_paddings is not None:
base_paddings = np.array(base_paddings)
# Check with constants.
paddings, crops = array_ops.required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings)
paddings_const = tensor_util.constant_value(paddings)
crops_const = tensor_util.constant_value(crops)
self.assertIsNotNone(paddings_const)
self.assertIsNotNone(crops_const)
self._checkProperties(input_shape, block_shape, base_paddings,
paddings_const, crops_const)
# Check with non-constants.
assignments = {}
input_shape_placeholder = array_ops.placeholder(dtypes.int32)
assignments[input_shape_placeholder] = input_shape
block_shape_placeholder = array_ops.placeholder(dtypes.int32,
[len(block_shape)])
assignments[block_shape_placeholder] = block_shape
if base_paddings is not None:
base_paddings_placeholder = array_ops.placeholder(dtypes.int32,
[len(block_shape), 2])
assignments[base_paddings_placeholder] = base_paddings
else:
base_paddings_placeholder = None
t_paddings, t_crops = array_ops.required_space_to_batch_paddings(
input_shape_placeholder, block_shape_placeholder,
base_paddings_placeholder)
with self.cached_session():
paddings_result = t_paddings.eval(assignments)
crops_result = t_crops.eval(assignments)
self.assertAllEqual(paddings_result, paddings_const)
self.assertAllEqual(crops_result, crops_const)
@test_util.run_deprecated_v1
def testSimple(self):
self._test(
input_shape=np.zeros((0,), np.int32),
block_shape=np.zeros((0,), np.int32),
base_paddings=None)
self._test(
input_shape=np.zeros((0,), np.int32),
block_shape=np.zeros((0,), np.int32),
base_paddings=np.zeros((0, 2), np.int32))
self._test(input_shape=[1], block_shape=[2], base_paddings=None)
self._test(input_shape=[1], block_shape=[2], base_paddings=[[1, 0]])
self._test(input_shape=[3], block_shape=[1], base_paddings=[[1, 2]])
self._test(input_shape=[1], block_shape=[2], base_paddings=[[2, 3]])
self._test(input_shape=[4, 5], block_shape=[3, 2], base_paddings=None)
self._test(
input_shape=[4, 5], block_shape=[3, 2], base_paddings=[[0, 0], [0, 1]])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/kernel_tests/spacetobatch_op_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.